text stringlengths 11 4.05M |
|---|
package main
import "fmt"
type Test struct {
}
func (t *Test) print() {
for i := 0; i <= 10; i++ {
for j := 0; j <= 8; j++ {
fmt.Print("*")
}
fmt.Print("\n")
}
}
func (t *Test) print2(n, m int) {
for i := 0; i <= n; i++ {
for j := 0; j <= m; j++ {
fmt.Print("*")
}
fmt.Print("\n")
}
}
func (t *Test) area(n, m float64) float64 {
return n * m
}
func (t *Test) test(n int) {
if n%2 == 0 {
fmt.Print("偶数\n")
} else {
fmt.Print("基数" +
"\n")
}
}
func main() {
var t Test
t.print()
fmt.Print("\n")
t.print2(2, 6)
res := t.area(2, 6)
fmt.Print("\n", res)
}
|
package lb_handler
import (
"dynamicpath/src/load_balancer/lb_context"
"dynamicpath/src/load_balancer/lb_util"
"dynamicpath/src/load_balancer/logger"
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
)
var HandlerLog *logrus.Entry
func init() {
// init Pool
HandlerLog = logger.HandlerLog
}
func Handle() {
for {
select {
case msg, ok := <-lb_context.LBChannel:
// Wait for Data Update
if ok {
lb_context.Wg.Wait()
lb_context.Mtx.Lock()
switch msg.Event {
case lb_context.EventPduSessionAdd:
val := msg.Value.(lb_context.PduSessionRequest)
sessionKey := fmt.Sprintf("%s-%d", val.Supi, val.SessionInfo.PduSessionId)
topology := lb_context.LB_Self().Topology
session := lb_context.PduSessionContext{
Supi: val.Supi,
SessionInfo: val.SessionInfo,
}
bestPathId := topology.RoundRobinCnt
if lb_context.LB_Self().LoadBalancerType == lb_util.LoadBalancerType_DP {
allOverload := true
for i := 0; i < lb_util.DP_ShortestPathThreshlod; i++ {
if lb_util.AvailableBandwidth[i] < 0 {
continue
}
lb_util.AvailableBandwidth[i] -= topology.Granularity
allOverload = false
bestPathId = topology.PathListAll[i].Id
}
if allOverload {
lb_util.DP_ShortestPathThreshlod = -2
bestPathId = topology.PathListAll[0].Id
}
} else {
bestList := topology.PathListBest
if bestList == nil {
// Round-Robin if all path overflow
topology.RoundRobinCnt = (topology.RoundRobinCnt + 1) % len(topology.PathInfos)
} else {
// Decide Best Path And Send to SMF
bestPathId = bestList[0].Id
lb_util.ModifyPathRemainRate(bestList[0], lb_util.MinusRemainRate, topology.Granularity)
}
}
logger.UtilLog.Debugf("Supi-SessionId: %s, PathId: %d Load: %.2f", sessionKey, bestPathId, *topology.PathInfos[bestPathId].Path.RemainRate)
session.SessionInfo.PathID = bestPathId
topology.PathInfos[bestPathId].PduSessionInfo[sessionKey] = &session
lb_context.SendHttpResponseMessage(msg.HttpChannel, nil, http.StatusCreated, session.SessionInfo)
case lb_context.EventPduSessionDel:
val := msg.Value.(lb_context.PduSessionRequest)
sessionKey := fmt.Sprintf("%s-%d", val.Supi, val.SessionInfo.PduSessionId)
topology := lb_context.LB_Self().Topology
pathId := val.SessionInfo.PathID
pathInfo := topology.PathInfos[pathId]
if pathInfo != nil {
lb_context.SendHttpResponseMessage(msg.HttpChannel, nil, http.StatusNoContent, nil)
lb_util.ModifyPathRemainRate(pathInfo.Path, lb_util.AddRemainRate, topology.Granularity)
delete(topology.PathInfos[pathId].PduSessionInfo, sessionKey)
} else {
lb_context.SendHttpResponseMessage(msg.HttpChannel, nil, http.StatusNotFound, nil)
logger.HandlerLog.Warnf("pathId: %d Not Found", val.SessionInfo.PathID)
}
}
lb_context.Mtx.Unlock()
} else {
HandlerLog.Errorln("Channel closed!")
}
case <-time.After(time.Second * 1):
}
}
}
|
package paxos
import
(
"net"
)
type Paxos struct {
nodes []string
}
func (paxos*Paxos) AddNode(title string){
paxos.nodes = append(paxos.nodes, title)
} |
package main
import (
"fmt"
"io/ioutil"
)
func main() {
const filename = "abc.txt"
contents, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println(err)
} else {
fmt.Printf("%s\n\n", string(contents))
}
/**
go 的 if else 写法
*/
var (
contents2 []byte
err2 error
)
if contents2, err2 = ioutil.ReadFile(filename); err != nil {
fmt.Println(err2)
} else {
fmt.Printf("%s\n\n", string(contents2))
}
fmt.Printf("%s\n\n", string(contents2))
fmt.Print(myswitch(3))
fmt.Print(myswitch(100))
}
/**
默认有 break
除非 fallthrough
switch 后面可以不跟表达式
*/
func myswitch(score int) string {
g := ""
switch {
case score < 60:
g = "F"
case score < 70:
g = "C"
case score < 90:
g = "B"
default:
panic(fmt.Sprintf("Wrong score: %d", score))
}
return g
}
|
package compare
func Test() {
ch1 := make(chan int)
ch2 := make(chan int)
defer close(ch1)
defer close(ch2)
defer close(ch2)
defer close(ch2)
defer close(ch2)
defer close(ch2)
defer close(ch2)
defer close(ch2)
}
|
// Clock stub file
// To use the right term, this is the package *clause*.
// You can document general stuff about the package here if you like.
package clock
import "fmt"
// The value of testVersion here must match `targetTestVersion` in the file
// clock_test.go.
const testVersion = 4
// Clock API as stub definitions. No, it doesn't compile yet.
// More details and hints are in clock_test.go.
type Clock struct {
minutes int
}
func New(hour, minute int) Clock { // -25
// one day has 1440 minutes, and the day part can be omitted
minutes := (hour*60 + minute) % 1440
if minutes < 0 {
minutes += 1440
}
return Clock{minutes}
}
func (c Clock) String() string {
hour := c.minutes / 60 % 24
minute := c.minutes % 60
return fmt.Sprintf("%02d:%02d", hour, minute)
}
func (c Clock) Add(minutes int) Clock {
m := c.minutes + minutes
return New(0, m)
}
|
package main
import (
"fmt"
"github.com/trustmaster/goflow"
)
type Greeter struct {
flow.Component
Name <-chan string
Res chan<- string
}
func (g *Greeter) OnName(name string) {
greeting := fmt.Sprintf("Hello, %s!", name)
g.Res <- greeting
}
type Printer struct {
flow.Component
Line <-chan string
}
func (p *Printer) OnLine(line string) {
fmt.Println(line)
}
type GreetingApp struct {
flow.Graph
}
func NewGreetingApp() *GreetingApp {
n := &GreetingApp{}
n.InitGraphState()
n.Add(&Greeter{}, "greeter")
n.Add(&Printer{}, "printer")
n.Connect("greeter", "Res", "printer", "Line")
n.MapInPort("In", "greeter", "Name")
return n
}
func main() {
net := NewGreetingApp()
in := make(chan string)
net.SetInPort("In", in)
flow.RunNet(net)
in <- "John"
in <- "Boris"
in <- "Hanna"
close(in)
<-net.Wait()
}
|
package main
import (
"fmt"
"log"
"math/rand"
"time"
"net/http"
_ "net/http/pprof"
)
const (
maxDigit int = 6
)
type signalData struct {
signal []int
status string
}
func main() {
c := make(chan signalData)
go sender(c)
go receiver(c)
log.Println(http.ListenAndServe("localhost:6060", nil))
}
func sender(c chan signalData) {
for {
var signalData signalData
signalData.signal = generateSignal()
c <- signalData
}
}
func receiver(c chan signalData) {
for {
select {
case signalData := <-c:
signalData.status = validateSignal(signalData.signal)
fmt.Println("Receiving Signal ", signalData.signal, " with status ", signalData.status)
case <-time.After(time.Second * 1):
fmt.Println("Got timeout while receiving the signal")
return
}
}
}
func generateSignal() []int {
var signal []int
for i := 0; i < maxDigit; i++ {
signal = append(signal, randInt(0, 1))
}
return signal
}
func validateSignal(signal []int) string {
// if the signal begin with 1 and end with 1, it indicates the signal is good
// if the signal begin with 0 and end with 0, it inditates the signal is bad
// if doesn't meet 2 conditions above, evaluate from all node
switch {
case signal[0] == 1 && signal[maxDigit-1] == 1:
return "good"
case signal[0] == 0 && signal[maxDigit-1] == 0:
return "bad"
default:
return evaluateSignal(signal)
}
}
func evaluateSignal(signal []int) string {
var result int
for i := range signal {
if i == 0 {
result = evaluateNode(signal[i], signal[i+1])
} else if i == maxDigit-1 {
break
} else {
result = evaluateNode(signal[i], signal[i+1])
}
}
// if signal[maxDigit-1] == 1 && result != 1 {
// return fmt.Sprintf("someting's wrong good %d", result)
// } else if signal[maxDigit-1] == 0 && result != 0 {
// return fmt.Sprintf("someting's wrong bad %d", result)
// }
if result == 1 {
return "good"
} else {
return "bad"
}
}
func evaluateNode(node1, node2 int) int {
switch {
// case node2 > node1:
// return 1
// case node2 < node1:
// return 0
case node2 == 1:
return 1
case node2 == 0:
return 0
default:
return 0
}
}
func randInt(min int, max int) int {
return min + rand.Intn(max-min+1)
}
|
package main
import (
"log"
"sync"
)
func generate(nums ...int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for _, num := range nums {
log.Printf("[g-goroutine] publishing number : %d \n", num)
out <- num
}
}()
return out
}
func square(id int, inputC <-chan int) <-chan int {
out := make(chan int)
go func() {
defer close(out)
for in := range inputC {
log.Printf("[s-%d] Squaring number : %d \n", id, in)
out <- in * in
}
}()
return out
}
func merge(chans ...<-chan int) <-chan int {
out := make(chan int)
var wg sync.WaitGroup
wg.Add(len(chans))
merger := func(ch <-chan int) {
defer wg.Done()
for ele := range ch {
log.Printf("[m-goroutine] Merging number : %d \n", ele)
out <- ele
}
}
go func() {
for _, c := range chans {
go merger(c)
}
}()
// Check for all go-routines once finishes close the channel
go func() {
wg.Wait()
defer close(out)
}()
return out
}
func main() {
numbers := []int{1, 2, 3, 4, 5, 6, 7, 8}
genC := generate(numbers...)
square1C := square(1, genC)
square2C := square(2, genC)
mergeC := merge(square1C, square2C)
for out := range mergeC {
log.Printf("[m-goroutine] Printing Response : %d\n", out)
}
}
/*
raja@raja-Latitude-3460:~/Documents/coding/golang/go-by-concurrency$ go run -race concurrency_patterns/fan_out_and_fan_in.go
2021/06/06 15:40:02 [g-goroutine] publishing number : 1
2021/06/06 15:40:02 [g-goroutine] publishing number : 2
2021/06/06 15:40:02 [g-goroutine] publishing number : 3
2021/06/06 15:40:02 [s-1] Squaring number : 2
2021/06/06 15:40:02 [s-1] Squaring number : 3
2021/06/06 15:40:02 [g-goroutine] publishing number : 4
2021/06/06 15:40:02 [m-goroutine] Merging number : 4
2021/06/06 15:40:02 [m-goroutine] Merging number : 9
2021/06/06 15:40:02 [s-2] Squaring number : 1
2021/06/06 15:40:02 [m-goroutine] Merging number : 1
2021/06/06 15:40:02 [m-goroutine] Printing Response : 4
2021/06/06 15:40:02 [m-goroutine] Printing Response : 9
2021/06/06 15:40:02 [m-goroutine] Printing Response : 1
2021/06/06 15:40:02 [g-goroutine] publishing number : 5
2021/06/06 15:40:02 [g-goroutine] publishing number : 6
2021/06/06 15:40:02 [s-2] Squaring number : 5
2021/06/06 15:40:02 [s-2] Squaring number : 6
2021/06/06 15:40:02 [g-goroutine] publishing number : 7
2021/06/06 15:40:02 [s-1] Squaring number : 4
2021/06/06 15:40:02 [s-1] Squaring number : 7
2021/06/06 15:40:02 [g-goroutine] publishing number : 8
2021/06/06 15:40:02 [m-goroutine] Merging number : 25
2021/06/06 15:40:02 [m-goroutine] Merging number : 36
2021/06/06 15:40:02 [m-goroutine] Printing Response : 25
2021/06/06 15:40:02 [m-goroutine] Printing Response : 36
2021/06/06 15:40:02 [m-goroutine] Merging number : 16
2021/06/06 15:40:02 [m-goroutine] Merging number : 49
2021/06/06 15:40:02 [s-2] Squaring number : 8
2021/06/06 15:40:02 [m-goroutine] Merging number : 64
2021/06/06 15:40:02 [m-goroutine] Printing Response : 16
2021/06/06 15:40:02 [m-goroutine] Printing Response : 49
2021/06/06 15:40:02 [m-goroutine] Printing Response : 64
*/
|
package images
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/dollarshaveclub/acyl/pkg/eventlogger"
"github.com/dollarshaveclub/acyl/pkg/ghclient"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/dollarshaveclub/acyl/pkg/persistence"
"github.com/mholt/archiver"
"github.com/pkg/errors"
)
type DockerClient interface {
ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error)
}
// DockerBuilderBackend builds images using a Docker Engine
type DockerBuilderBackend struct {
DC DockerClient
RC ghclient.RepoClient
DL persistence.DataLayer
Auths map[string]types.AuthConfig
Push bool
}
var _ BuilderBackend = &DockerBuilderBackend{}
func (dbb *DockerBuilderBackend) log(ctx context.Context, msg string, args ...interface{}) {
eventlogger.GetLogger(ctx).Printf("docker builder: "+msg, args...)
}
// BuildImage synchronously builds and optionally pushes the image using the Docker Engine, returning when the build completes.
func (dbb *DockerBuilderBackend) BuildImage(ctx context.Context, envName, githubRepo, imageRepo, ref string, ops BuildOptions) error {
if dbb.DC == nil {
return errors.New("docker client is nil")
}
if dbb.DL == nil {
return errors.New("datalayer is nil")
}
if dbb.RC == nil {
return errors.New("repo client is nil")
}
if ops.DockerfilePath == "" {
ops.DockerfilePath = "Dockerfile"
}
tdir, err := ioutil.TempDir("", "acyl-docker-builder")
if err != nil {
return fmt.Errorf("error getting temp dir: %w", err)
}
defer os.RemoveAll(tdir)
dbb.log(ctx, "getting repo contents for %v", githubRepo)
tgz, err := dbb.RC.GetRepoArchive(ctx, githubRepo, ref)
if err != nil {
return fmt.Errorf("error getting repo archive: %w", err)
}
defer os.Remove(tgz)
dbb.log(ctx, "unarchiving repo contents: %v", githubRepo)
if err := archiver.Unarchive(tgz, tdir); err != nil {
return fmt.Errorf("error unarchiving repo contents: %w", err)
}
// verify that there's exactly one subdirectory in the unarchived contents
f, err := os.Open(tdir)
if err != nil {
return fmt.Errorf("error opening temp dir: %w", err)
}
fi, err := f.Readdir(-1)
f.Close()
if err != nil {
return fmt.Errorf("error reading temp dir: %w", err)
}
if len(fi) != 1 {
return fmt.Errorf("expected one path in repo archive but got %v", len(fi))
}
if !fi[0].IsDir() {
return fmt.Errorf("top-level directory in repo not found in unarchived repo archive: %v", fi[0].Name())
}
// get all files within the top-level directory
f, err = os.Open(filepath.Join(tdir, fi[0].Name()))
if err != nil {
return fmt.Errorf("error opening top-level repo archive dir: %w", err)
}
fi, err = f.Readdir(-1)
f.Close()
if err != nil {
return fmt.Errorf("error reading top-level repo archive dir: %w", err)
}
files := make([]string, len(fi))
for i := range fi {
files[i] = filepath.Join(f.Name(), fi[i].Name())
}
dbb.log(ctx, "building context tar for %v", githubRepo)
bcontents, err := ioutil.TempFile("", "acyl-docker-builder-context-*.tar")
if err != nil {
return fmt.Errorf("error creating tar temp file: %w", err)
}
bcontents.Close()
tar := archiver.NewTar()
tar.ContinueOnError = true // ignore things like broken symlinks
tar.OverwriteExisting = true
if err := tar.Archive(files, bcontents.Name()); err != nil {
return fmt.Errorf("error writing tar file: %w", err)
}
defer os.Remove(bcontents.Name())
f, err = os.Open(bcontents.Name())
if err != nil {
return fmt.Errorf("error opening tar: %w", err)
}
defer f.Close()
bargs := make(map[string]*string, len(ops.BuildArgs))
for k, v := range ops.BuildArgs {
v := v
bargs[k] = &v
}
opts := types.ImageBuildOptions{
Tags: []string{imageRepo + ":" + ref},
Remove: true,
ForceRemove: true,
PullParent: true,
Dockerfile: ops.DockerfilePath,
BuildArgs: bargs,
AuthConfigs: dbb.Auths,
}
dbb.DL.AddEvent(ctx, envName, fmt.Sprintf("building container: %v:%v", githubRepo, ref))
dbb.log(ctx, "building image: %v", opts.Tags[0])
ticker := time.NewTicker(5 * time.Second)
go func() {
for _ = range ticker.C {
dbb.log(ctx, "... still building %v:%v", githubRepo, ref)
}
}()
resp, err := dbb.DC.ImageBuild(ctx, f, opts)
ticker.Stop()
if err != nil {
return fmt.Errorf("error starting image build: %w", err)
}
err = handleOutput(resp.Body)
if err != nil {
return fmt.Errorf("error performing build: %w", err)
}
if dbb.Push {
rsl := strings.Split(imageRepo, "/")
var registryURLs []string
switch len(rsl) {
case 2: // Docker Hub
registryURLs = []string{"https://index.docker.io/v1/", "https://index.docker.io/v2/"}
case 3: // private registry
registryURLs = []string{"https://" + rsl[0]}
default:
return fmt.Errorf("cannot determine base registry URL from %v", imageRepo)
}
var auth string
for _, url := range registryURLs {
val, ok := dbb.Auths[url]
if ok {
j, err := json.Marshal(&val)
if err != nil {
return fmt.Errorf("error marshaling auth: %v", err)
}
auth = base64.StdEncoding.EncodeToString(j)
}
}
if auth == "" {
return fmt.Errorf("auth not found for %v", imageRepo)
}
opts := types.ImagePushOptions{
All: true,
RegistryAuth: auth,
}
dbb.log(ctx, "pushing image: %v", imageRepo+":"+ref)
ticker = time.NewTicker(5 * time.Second)
go func() {
for _ = range ticker.C {
dbb.log(ctx, "... still pushing %v:%v", githubRepo, ref)
}
}()
resp, err := dbb.DC.ImagePush(ctx, imageRepo+":"+ref, opts)
ticker.Stop()
if err != nil {
return fmt.Errorf("error starting image push: %w", err)
}
err = handleOutput(resp)
if err != nil {
return fmt.Errorf("error pushing image: %w", err)
}
dbb.log(ctx, "image pushed: %v", imageRepo+":"+ref)
}
return nil
}
func handleOutput(resp io.ReadCloser) error {
defer resp.Close()
return jsonmessage.DisplayJSONMessagesStream(resp, ioutil.Discard, 0, false, nil)
}
|
package utils
import (
"strings"
"github.com/astaxie/beego"
"github.com/astaxie/beego/logs"
)
var Logger *logs.BeeLogger
// 保证此文件最早加载
// 初始化日志
func init() {
runmode := strings.TrimSpace(strings.ToLower(beego.AppConfig.DefaultString("runmode", "dev")))
if runmode == "dev" {
Logger = logs.NewLogger(1)
Logger.SetLogger(logs.AdapterConsole)
// 异步
Logger.Async()
} else {
Logger = logs.NewLogger(1000)
level := beego.AppConfig.String("logs.level")
Logger.SetLogger(logs.AdapterMultiFile, `{"filename":"logs/`+runmode+`.log",
"separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
"level":`+level+`,
"daily":true,
"maxdays":10}`)
// 异步
Logger.Async(1e3)
}
Logger.Info("Init: Logger: %s", Logger)
}
|
package sql
import (
"github.com/yydzero/mnt/executor"
"log"
"net"
)
// connstr for libpq connection
type ConnectionArgs struct {
Database string
User string
ClientEncoding string
DateStyle string
}
// Session contains the state of a SQL client connection.
type Session struct {
Database string
User string
TxnState txnState
}
type TxnStateEnum int
const (
Idle TxnStateEnum = iota
Open
Aborted
)
// txnState contains state associated with an ongoing SQL txn.
type txnState struct {
State TxnStateEnum
}
// NewSession creates and initializes new Session object. remote can be nil
func NewSession(args ConnectionArgs, e executor.Executor, remote net.Addr) *Session {
s := Session{}
s.Database = args.Database
s.User = args.User
remoteStr := ""
if remote != nil {
remoteStr = remote.String()
}
log.Printf("remote address: %q\n", remoteStr)
return &s
}
|
package server
import (
"context"
"fmt"
"log"
config "github.com/chutommy/metal-price/metal/config"
data "github.com/chutommy/metal-price/metal/service/data"
metal "github.com/chutommy/metal-price/metal/service/protos/metal"
)
// Metal is a the service server.
type Metal struct {
log *log.Logger
prices *data.Prices
cfg *config.Config
}
// NewMetal constructs a new server.
func NewMetal(l *log.Logger, cfg *config.Config) *Metal {
return &Metal{
log: l,
cfg: cfg,
}
}
// GetPrice handles thegRPC request.
func (m *Metal) GetPrice(ctx context.Context, req *metal.MetalRequest) (*metal.MetalResponse, error) {
// data service
var err error
m.prices, err = data.NewPrices(m.log, m.cfg.Source)
if err != nil {
return nil, fmt.Errorf("could not construct metal price data service: %w", err)
}
// get material
material := req.GetMetal().String()
// logging
m.log.Printf("Handling GetPrice; Material: %s\n", material)
// get price
price, err := m.prices.GetPrice(material)
if err != nil {
return nil, fmt.Errorf("unable to get the price of the material: %v", err)
}
// success
metalResp := &metal.MetalResponse{Price: price}
return metalResp, nil
}
|
package message
import (
"bytes"
"io"
)
const (
// MsgUnknown unknown message
MsgUnknown = iota
// MsgResult result message
MsgResult
// MsgAuth authenticate message
MsgAuth
// MsgData data message
MsgData
// MsgAck ack message
MsgAck
)
// for simple, using json for exchange message
type Marshler interface {
Marshal(v interface{}) (io.Reader, error)
}
type Unmarshaler interface {
Unmarshal(io.Reader, interface{}) error
}
// Frame frame
type Frame interface {
// GetType get message type
Type() string
// GetSize get size
GetSize() uint16
// Packed packed
Packed() (buf *bytes.Buffer, err error)
// Unpack unpack
Unpack(r io.Reader) error
}
type Message Frame
type Result interface {
Frame
}
type CommandMessage struct {
}
|
// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0.
package export
import (
"database/sql"
"fmt"
"regexp"
"strings"
"text/template"
"github.com/pingcap/errors"
tcontext "github.com/pingcap/tidb/dumpling/context"
)
const (
outputFileTemplateSchema = "schema"
outputFileTemplateTable = "table"
outputFileTemplateView = "view"
outputFileTemplateSequence = "sequence"
outputFileTemplateData = "data"
outputFileTemplatePolicy = "placement-policy"
defaultOutputFileTemplateBase = `
{{- define "objectName" -}}
{{fn .DB}}.{{fn .Table}}
{{- end -}}
{{- define "schema" -}}
{{fn .DB}}-schema-create
{{- end -}}
{{- define "event" -}}
{{template "objectName" .}}-schema-post
{{- end -}}
{{- define "function" -}}
{{template "objectName" .}}-schema-post
{{- end -}}
{{- define "procedure" -}}
{{template "objectName" .}}-schema-post
{{- end -}}
{{- define "sequence" -}}
{{template "objectName" .}}-schema-sequence
{{- end -}}
{{- define "trigger" -}}
{{template "objectName" .}}-schema-triggers
{{- end -}}
{{- define "view" -}}
{{template "objectName" .}}-schema-view
{{- end -}}
{{- define "table" -}}
{{template "objectName" .}}-schema
{{- end -}}
{{- define "data" -}}
{{template "objectName" .}}.{{.Index}}
{{- end -}}
{{- define "placement-policy" -}}
{{fn .Policy}}-placement-policy-create
{{- end -}}
`
// DefaultAnonymousOutputFileTemplateText is the default anonymous output file templateText for dumpling's table data file name
DefaultAnonymousOutputFileTemplateText = "result.{{.Index}}"
)
var (
filenameEscapeRegexp = regexp.MustCompile(`[\x00-\x1f%"*./:<>?\\|]|-(?i:schema)`)
// DefaultOutputFileTemplate is the default output file template for dumpling's table data file name
DefaultOutputFileTemplate = template.Must(template.New("data").
Option("missingkey=error").
Funcs(template.FuncMap{
"fn": func(input string) string {
return filenameEscapeRegexp.ReplaceAllStringFunc(input, func(match string) string {
return fmt.Sprintf("%%%02X%s", match[0], match[1:])
})
},
}).
Parse(defaultOutputFileTemplateBase))
)
// ParseOutputFileTemplate parses template from the specified text
func ParseOutputFileTemplate(text string) (*template.Template, error) {
return template.Must(DefaultOutputFileTemplate.Clone()).Parse(text)
}
func prepareDumpingDatabases(tctx *tcontext.Context, conf *Config, db *sql.Conn) ([]string, error) {
databases, err := ShowDatabases(db)
if err != nil {
return nil, err
}
databases = filterDatabases(tctx, conf, databases)
if len(conf.Databases) == 0 {
return databases, nil
}
dbMap := make(map[string]interface{}, len(databases))
for _, database := range databases {
dbMap[database] = struct{}{}
}
var notExistsDatabases []string
for _, database := range conf.Databases {
if _, ok := dbMap[database]; !ok {
notExistsDatabases = append(notExistsDatabases, database)
}
}
if len(notExistsDatabases) > 0 {
return nil, errors.Errorf("Unknown databases [%s]", strings.Join(notExistsDatabases, ","))
}
return conf.Databases, nil
}
type databaseName = string
// TableType represents the type of table
type TableType int8
const (
// TableTypeBase represents the basic table
TableTypeBase TableType = iota
// TableTypeView represents the view table
TableTypeView
// TableTypeSequence represents the view table
// TODO: need to be supported
TableTypeSequence
)
const (
// TableTypeBaseStr represents the basic table string
TableTypeBaseStr = "BASE TABLE"
// TableTypeViewStr represents the view table string
TableTypeViewStr = "VIEW"
// TableTypeSequenceStr represents the view table string
TableTypeSequenceStr = "SEQUENCE"
)
func (t TableType) String() string {
switch t {
case TableTypeBase:
return TableTypeBaseStr
case TableTypeView:
return TableTypeViewStr
case TableTypeSequence:
return TableTypeSequenceStr
default:
return "UNKNOWN"
}
}
// ParseTableType parses table type string to TableType
func ParseTableType(s string) (TableType, error) {
switch s {
case TableTypeBaseStr:
return TableTypeBase, nil
case TableTypeViewStr:
return TableTypeView, nil
case TableTypeSequenceStr:
return TableTypeSequence, nil
default:
return TableTypeBase, errors.Errorf("unknown table type %s", s)
}
}
// TableInfo is the table info for a table in database
type TableInfo struct {
Name string
AvgRowLength uint64
Type TableType
}
// Equals returns true the table info is the same with another one
func (t *TableInfo) Equals(other *TableInfo) bool {
return t.Name == other.Name && t.Type == other.Type
}
// DatabaseTables is the type that represents tables in a database
type DatabaseTables map[databaseName][]*TableInfo
// NewDatabaseTables returns a new DatabaseTables
func NewDatabaseTables() DatabaseTables {
return DatabaseTables{}
}
// AppendTable appends a TableInfo to DatabaseTables
func (d DatabaseTables) AppendTable(dbName string, table *TableInfo) DatabaseTables {
d[dbName] = append(d[dbName], table)
return d
}
// AppendTables appends several basic tables to DatabaseTables
func (d DatabaseTables) AppendTables(dbName string, tableNames []string, avgRowLengths []uint64) DatabaseTables {
for i, t := range tableNames {
d[dbName] = append(d[dbName], &TableInfo{t, avgRowLengths[i], TableTypeBase})
}
return d
}
// AppendViews appends several views to DatabaseTables
func (d DatabaseTables) AppendViews(dbName string, viewNames ...string) DatabaseTables {
for _, v := range viewNames {
d[dbName] = append(d[dbName], &TableInfo{v, 0, TableTypeView})
}
return d
}
// Merge merges another DatabaseTables
func (d DatabaseTables) Merge(other DatabaseTables) {
for name, infos := range other {
d[name] = append(d[name], infos...)
}
}
// Literal returns a user-friendly output for DatabaseTables
func (d DatabaseTables) Literal() string {
var b strings.Builder
b.WriteString("tables list\n")
b.WriteString("\n")
for dbName, tables := range d {
b.WriteString("schema ")
b.WriteString(dbName)
b.WriteString(" :[")
for _, tbl := range tables {
b.WriteString(tbl.Name)
b.WriteString(", ")
}
b.WriteString("]")
}
return b.String()
}
// DatabaseTablesToMap transfers DatabaseTables to Map
func DatabaseTablesToMap(d DatabaseTables) map[string]map[string]struct{} {
mp := make(map[string]map[string]struct{}, len(d))
for name, infos := range d {
mp[name] = make(map[string]struct{}, len(infos))
for _, info := range infos {
if info.Type == TableTypeBase {
mp[name][info.Name] = struct{}{}
}
}
}
return mp
}
|
package qx
import "strings"
type FunctionInfo struct {
Schema string
Name string
Alias string
Arguments []interface{}
}
// ToSQL marshals a FunctionInfo into an SQL query.
func (f *FunctionInfo) ToSQL() (string, []interface{}) {
return f.ToSQLExclude(nil)
}
// ToSQL marshals a FunctionInfo into an SQL query.
func (f *FunctionInfo) ToSQLExclude(excludeTableQualifiers []string) (string, []interface{}) {
var query string
var args []interface{}
schema := f.Schema + "."
if f.Schema == "public." {
schema = ""
}
switch len(f.Arguments) {
case 0:
query = schema + f.Name + "()"
default:
query = schema + f.Name + "(?" + strings.Repeat(", ?", len(f.Arguments)-1) + ")"
}
query, args = FormatPreprocessor(query, f.Arguments, excludeTableQualifiers)
return query, args
}
// GetAlias implements the Table interface. It returns the alias of the
// FunctionInfo.
func (f *FunctionInfo) GetAlias() string {
return f.Alias
}
// GetName implements the Table interface. It returns the name of the
// FunctionInfo.
func (f *FunctionInfo) GetName() string {
return f.Name
}
|
// Copyright 2015-2018 trivago N.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"testing"
"github.com/sirupsen/logrus"
"github.com/trivago/tgo/ttesting"
)
func TestModulate(t *testing.T) {
expect := ttesting.NewExpect(t)
TypeRegistry.Register(mockFormatter{})
mockConf := NewPluginConfig("", "core.mockPlugin")
mockConf.Override("Modulators", []interface{}{
"core.mockFormatter",
})
reader := NewPluginConfigReaderWithError(&mockConf)
modulatorArray, err := reader.GetModulatorArray("Modulators", logrus.StandardLogger(), []Modulator{})
expect.NoError(err)
msg := NewMessage(nil, []byte("foo"), nil, InvalidStreamID)
expect.Equal(ModulateResultContinue, modulatorArray.Modulate(msg))
}
|
package base
import "errors"
var (
NoReadTokenError = errors.New("This Warp10 call need a READ token access on the data")
NoWriteTokenError = errors.New("This Warp10 call need a WRITE token access on the data")
) |
package main
import (
"fmt"
"io"
"log"
"os"
)
func main() {
// experimentWriteToFileUsingFPrintf()
experimentReadFromCertainOffsetInFile()
}
func experimentReadFromCertainOffsetInFile() {
file, err := os.Open("./dicky")
if err != nil {
log.Panicf("error in opening file, err: %v", err)
}
offset := 1
var bytes []byte = make([]byte, 100)
n, err := file.ReadAt(bytes, int64(offset))
if err != nil && err != io.EOF {
log.Panicf("error in read at, err: %v", err)
}
log.Printf("n: %v bytes: %v, bytes in string: %v", n, bytes, string(bytes))
}
func experimentWriteToFileUsingFPrintf() {
fileName := "dicky"
file, err := os.Create(fileName)
if err != nil {
log.Panicf("error in creating file, err: %v", err)
}
for i := 0; i < 10; i++ {
fmt.Fprintf(file, "asu %v\n", i)
}
}
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tstest
import (
"log"
"os"
"testing"
)
type testLogWriter struct {
t *testing.T
}
func (w *testLogWriter) Write(b []byte) (int, error) {
w.t.Helper()
w.t.Logf("%s", b)
return len(b), nil
}
func FixLogs(t *testing.T) {
log.SetFlags(log.Ltime | log.Lshortfile)
log.SetOutput(&testLogWriter{t})
}
func UnfixLogs(t *testing.T) {
defer log.SetOutput(os.Stderr)
}
type panicLogWriter struct{}
func (panicLogWriter) Write(b []byte) (int, error) {
panic("please use tailscale.com/logger.Logf instead of the log package")
}
// PanicOnLog modifies the standard library log package's default output to
// an io.Writer that panics, to root out code that's not plumbing their logging
// through explicit tailscale.com/logger.Logf paths.
func PanicOnLog() {
log.SetOutput(panicLogWriter{})
}
|
package config
import (
"laravel-go/pkg/orm/config"
"github.com/spf13/viper"
)
type DBConfig struct {
Default string
Connections map[string]config.ConnParam
}
func NewDBConfig() *DBConfig {
return &DBConfig{
Default: viper.GetString("DB_CONNECTION"),
Connections: map[string]config.ConnParam{
"mysql": {
Driver: viper.GetString("DB_DRIVER"),
Host: viper.GetString("DB_HOST"),
Port: viper.GetString("DB_PORT"),
Username: viper.GetString("DB_USERNAME"),
Password: viper.GetString("DB_PASSWORD"),
Database: viper.GetString("DB_DATABASE"),
},
"mysql-other": {
Driver: viper.GetString("DB_OTHER_DRIVER"),
Host: viper.GetString("DB_OTHER_HOST"),
Port: viper.GetString("DB_OTHER_PORT"),
Username: viper.GetString("DB_OTHER_USERNAME"),
Password: viper.GetString("DB_OTHER_PASSWORD"),
Database: viper.GetString("DB_OTHER_DATABASE"),
},
},
}
}
|
package main
import (
"encoding/json"
"io/ioutil"
"log"
"github.com/golang/glog"
)
func ReadSwagger(swaggerFile string) SwaggerTemplate {
raw, err := ioutil.ReadFile(swaggerFile)
if err != nil {
log.Fatal(err)
}
_, isJson := isJson(string(raw))
if isJson {
var swagger SwaggerTemplate
json.Unmarshal(raw, &swagger)
glog.V(2).Info(swagger)
return swagger
} else {
glog.Fatal("You swagger file is not valid JSON")
return SwaggerTemplate{}
}
}
|
package main
import "fmt"
func main(){
//Chained conditionals: adding more than one conditions onto the other
//Normal conditions
greaterThan := 5 < 6
fmt.Printf("%t", greaterThan)
//Operators for chaining conditionals
// || - or, && - and, ! not
} |
package _179_Largest_Number
import "testing"
func TestLargestNumber(t *testing.T) {
if ret := largestNumber([]int{10, 2}); ret != "210" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{3, 30, 34, 5, 9}); ret != "9534330" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{1}); ret != "1" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{10}); ret != "10" {
t.Errorf("wrong ret with %s", ret)
}
if ret := largestNumber([]int{0, 0}); ret != "0" {
t.Errorf("wrong ret with %s", ret)
}
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"github.com/fvbock/endless"
"github.com/go-martini/martini"
"github.com/kyf/6ryim/util"
)
const (
CERT_FILE string = "../certs/6ry.crt"
KEY_FILE string = "../certs/6ry.key"
LOG_PATH string = "/var/log/6ryim_daemon/6ryim_daemon.log"
LOG_PREFIX string = "[6ryim_daemon]"
PUSH_SERVICE_URL string = "http://im2.6renyou.com:3031/"
PUSH_SERVICE_ACCESSID string = "6renyou_20151222"
PUSH_SERVICE_SECRETKEY string = "123456789"
TERMINAL_ADMIN string = "1"
TERMINAL_USER string = "2"
MSG_SOURCE_WX string = "1"
MSG_SOURCE_IOS string = "2"
MSG_SOURCE_ANDROID string = "3"
MSG_SOURCE_360STREAM string = "4"
MSG_TYPE_TEXT string = "2"
MSG_TYPE_IMAGE string = "3"
MSG_TYPE_AUDIO string = "4"
MSG_SYSTEM_NAME string = "system"
MSG_SYSTEM string = "1"
MSG_USER string = "0"
MSG_SYSTEM_TYPE_ORDER string = "1"
MSG_SYSTEM_TYPE_FETCH string = "2"
MSG_SYSTEM_TYPE_TRIP_SEND string = "3"
MSG_SYSTEM_TYPE_TRIP_SELECT string = "4"
MSG_SYSTEM_TYPE_CANCEL string = "5"
MSG_SYSTEM_TYPE_ACTIVITY string = "6"
MSG_SYSTEM_TYPE_ERROR string = "7"
)
var (
Addr string
SslAddr string
HTTP_SERVICE_URL string
)
func init() {
flag.StringVar(&Addr, "port", "8060", "websocket daemon listen port")
flag.StringVar(&SslAddr, "sslport", "4433", "websocket daemon listen port")
flag.StringVar(&HTTP_SERVICE_URL, "httpservice_url", "http://127.0.0.1:8989/", "http service url")
}
func main() {
flag.Parse()
m := martini.Classic()
m.Use(auth)
//fp, err := os.OpenFile(LOG_PATH, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
fp, err := util.NewWriter(LOG_PATH)
if err != nil {
fmt.Printf("OpenFile failure, err is %v", err)
os.Exit(1)
}
defer fp.Close()
mylog := log.New(fp, LOG_PREFIX, log.LstdFlags)
go h.run(mylog)
m.Map(mylog)
m.Get("/:token", serveWS)
m.Post("/message/receive", serveMsgReceive)
m.Get("/online/list", listOnline)
var exit chan error = make(chan error)
go func() {
exit <- endless.ListenAndServe(fmt.Sprintf(":%s", Addr), m)
}()
go func() {
exit <- endless.ListenAndServeTLS(fmt.Sprintf(":%s", SslAddr), CERT_FILE, KEY_FILE, m)
}()
e := <-exit
mylog.Printf("service exit:err is %v", e)
os.Exit(1)
}
|
package day01
import (
"strconv"
"../utils"
)
var input, _ = utils.ReadFile("day01/input.txt")
// ParseLines reads a file into a slice of int:s
func ParseLines(input []string) []int {
var expense []int
for _, s := range input {
i, _ := strconv.Atoi(s)
expense = append(expense, i)
}
return expense
}
// Solve1 returns answer to first problem
func Solve1() int {
var res int
x := ParseLines(input)
for i, s := range x {
for j := i + 1; j < len(x); j++ {
res = s + x[j]
if res == 2020 {
return s * x[j]
}
}
}
return -1
}
// Solve2 returns answer to second problem
func Solve2() int {
var res int
x := ParseLines(input)
for i, s := range x {
for j := i + 1; j < len(x); j++ {
for k := j + i; k < len(x); k++ {
res = s + x[j] + x[k]
if res == 2020 {
return s * x[j] * x[k]
}
}
}
}
return -1
}
|
package config
import (
"os"
"encoding/json"
)
type Properties struct {
Server struct {
IP string `json:"ip"`
Port int `json:"port"`
} `json:"server"`
DataSource struct {
Sql struct {
Driver string `json:"driver"`
Url string `json:"url"`
} `json:"sql"`
} `json:"data_source"`
}
var props *Properties
func GetProperties() *Properties {
if props == nil {
file := getConfigurationFile()
if file == nil {
return nil
}
parser := json.NewDecoder(file)
decodeError := parser.Decode(&props)
if decodeError != nil {
println(decodeError.Error())
}
}
return props
}
func getConfigurationFile() *os.File {
configFile, err := os.Open("app.json")
if err != nil {
return nil
}
return configFile
}
|
package core
import (
"fmt"
"github.com/morhekil/goratio/analyser/timeframe"
)
// PropEvent is a concrete implementation of Prop, based on Events
type PropEvent struct {
name string
r *Repository
}
// Analyse performs the analysis of the prop over the given timeframe
func (p PropEvent) Analyse(t timeframe.Moment) {
fmt.Printf("%s\t%+v\n", p.name, t)
}
|
// Copyright 2020 The Reed Developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
package p2p
import (
"github.com/reed/log"
"github.com/tendermint/tmlibs/common"
"net"
"strconv"
)
type Peer struct {
common.BaseService
nodeInfo *NodeInfo
conn *Conn
}
func NewPeer(ourNodeInfo *NodeInfo, nodeInfo *NodeInfo, disConnCh chan<- string, rawConn net.Conn, handlerServ Handler) *Peer {
peer := &Peer{
nodeInfo: nodeInfo,
conn: NewConnection(nodeInfo.RemoteAddr, disConnCh, rawConn, ourNodeInfo, handlerServ),
}
peer.BaseService = *common.NewBaseService(nil, "peer", peer)
return peer
}
func (p *Peer) OnStart() error {
return p.conn.Start()
}
func (p *Peer) OnStop() {
if err := p.conn.Stop(); err != nil {
log.Logger.Error(err)
}
}
func toAddress(ip net.IP, port uint16) string {
return net.JoinHostPort(
ip.String(),
strconv.FormatUint(uint64(port), 10),
)
}
|
package main
import (
auth "github.com/anraku/echo-sample/auth"
handler "github.com/anraku/echo-sample/handler"
log "github.com/anraku/echo-sample/log"
"github.com/labstack/echo"
"github.com/labstack/echo/middleware"
)
func main() {
// Echo instance
e := echo.New()
// Middleware
e.Pre(middleware.Logger())
e.Pre(middleware.LoggerWithConfig(log.OutputAppLog("/var/log/echo-sample/api.log")))
e.Pre(auth.AuthToken())
e.Pre(auth.ServerHeader())
e.Use(middleware.Recover())
// Routes
e.GET("/json", handler.SendJSON)
// Start server
e.Logger.Fatal(e.Start(":1323"))
}
|
package cmd
import (
"os"
"path"
"strings"
"testing"
"github.com/instructure-bridge/muss/config"
)
// helpers
var testbin string
func init() {
cwd, err := os.Getwd()
if err != nil {
panic("Failed to get current dir: " + err.Error())
}
testbin = path.Join(cwd, "..", "testdata", "bin")
}
func newTestConfig(t *testing.T, cfgMap map[string]interface{}) *config.ProjectConfig {
cfg, err := config.NewConfigFromMap(cfgMap)
if err != nil {
t.Fatalf("unexpected config error: %s", err)
}
return cfg
}
func runTestCommand(cfg *config.ProjectConfig, args []string) (string, string, error) {
var stdout, stderr strings.Builder
if cfg == nil {
cfg, _ = config.NewConfigFromMap(nil)
}
cmd := NewRootCommand(cfg)
cmd.SetOut(&stdout)
cmd.SetErr(&stderr)
cmd.SetArgs(args)
// Don't write config files.
sub, _, _ := cmd.Find(args)
sub.PreRunE = nil
err := cmd.Execute()
return stdout.String(), stderr.String(), err
}
func withTestPath(t *testing.T, f func(*testing.T)) {
path := os.Getenv("PATH")
os.Setenv("PATH", strings.Join([]string{testbin, path}, string(os.PathListSeparator)))
defer os.Setenv("PATH", path)
t.Run("with test path", f)
}
|
package slices
import (
"context"
"sync"
)
// AllAsync returns true if f returns true for all elements in slice.
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
func AllAsync[S ~[]T, T any](items S, workers int, f func(el T) bool) bool {
if len(items) == 0 {
return true
}
wg := sync.WaitGroup{}
worker := func(jobs <-chan int, result chan<- bool, ctx context.Context) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
case index, ok := <-jobs:
if !ok {
return
}
if !f(items[index]) {
result <- false
return
}
}
}
}
ctx, cancel := context.WithCancel(context.Background())
// when we're returning the result, cancel all workers
defer cancel()
// calculate workers count
if workers <= 0 || workers > len(items) {
workers = len(items)
}
// run workers
jobs := make(chan int, len(items))
result := make(chan bool, workers)
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(jobs, result, ctx)
}
// close the result channel when all workers have done
go func() {
wg.Wait()
close(result)
}()
// schedule the jobs: indices to check
for i := 0; i < len(items); i++ {
jobs <- i
}
close(jobs)
for range result {
return false
}
return true
}
// AnyAsync returns true if f returns true for any element from slice
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
func AnyAsync[S ~[]T, T any](items S, workers int, f func(el T) bool) bool {
if len(items) == 0 {
return false
}
wg := sync.WaitGroup{}
worker := func(jobs <-chan int, result chan<- bool, ctx context.Context) {
defer wg.Done()
for {
select {
case <-ctx.Done():
return
case index, ok := <-jobs:
if !ok {
return
}
if f(items[index]) {
result <- true
return
}
}
}
}
ctx, cancel := context.WithCancel(context.Background())
// when we're returning the result, cancel all workers
defer cancel()
// calculate workers count
if workers <= 0 || workers > len(items) {
workers = len(items)
}
// run workers
jobs := make(chan int, len(items))
result := make(chan bool, workers)
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(jobs, result, ctx)
}
// close the result channel when all workers have done
go func() {
wg.Wait()
close(result)
}()
// schedule the jobs: indices to check
for i := 0; i < len(items); i++ {
jobs <- i
}
close(jobs)
for range result {
return true
}
return false
}
// EachAsync calls f for every element from slice
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
func EachAsync[S ~[]T, T any](items S, workers int, f func(el T)) {
wg := sync.WaitGroup{}
worker := func(jobs <-chan int) {
defer wg.Done()
for index := range jobs {
f(items[index])
}
}
// calculate workers count
if workers <= 0 || workers > len(items) {
workers = len(items)
}
// run workers
jobs := make(chan int, len(items))
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(jobs)
}
// add indices into jobs for workers
for i := 0; i < len(items); i++ {
jobs <- i
}
close(jobs)
wg.Wait()
}
// FilterAsync returns slice of element for which f returns true
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
//
// The resulting items have the same order as in the input slice.
func FilterAsync[S ~[]T, T any](items S, workers int, f func(el T) bool) S {
resultMap := make([]bool, len(items))
wg := sync.WaitGroup{}
worker := func(jobs <-chan int) {
for index := range jobs {
if f(items[index]) {
resultMap[index] = true
}
}
wg.Done()
}
// calculate workers count
if workers <= 0 || workers > len(items) {
workers = len(items)
}
// run workers
jobs := make(chan int, len(items))
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(jobs)
}
// add indices into jobs for workers
for i := 0; i < len(items); i++ {
jobs <- i
}
close(jobs)
wg.Wait()
// return filtered results
result := make([]T, 0, len(items))
for i, el := range items {
if resultMap[i] {
result = append(result, el)
}
}
return result
}
// MapAsync applies F to all elements in slice of T and returns slice of results
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
//
// The result items have the same order as in the input slice.
func MapAsync[S ~[]T, T any, G any](items S, workers int, f func(el T) G) []G {
result := make([]G, len(items))
wg := sync.WaitGroup{}
worker := func(jobs <-chan int) {
for index := range jobs {
result[index] = f(items[index])
}
wg.Done()
}
// calculate workers count
if workers <= 0 || workers > len(items) {
workers = len(items)
}
// run workers
jobs := make(chan int, len(items))
wg.Add(workers)
for i := 0; i < workers; i++ {
go worker(jobs)
}
// add indices into jobs for workers
for i := 0; i < len(items); i++ {
jobs <- i
}
close(jobs)
wg.Wait()
return result
}
// ReduceAsync reduces slice to a single value with f.
//
// This is an asynchronous function. It will spawn as many goroutines as you specify
// in the `workers` argument. Set it to zero to spawn a new goroutine for each item.
//
// The function is guaranteed to be called with neighbored items. However, it may be called
// out of order. The results are collected into a new slice which is reduced again, until
// only one item remains. You can think about it as a piramid. On each iteration,
// 2 elements ar taken and merged together until only one remains.
//
// An example for sum:
//
// ```
// 1 2 3 4 5
// 3 7 5
// 10 5
// 15
// ```
//
func ReduceAsync[S ~[]T, T any](items S, workers int, f func(left T, right T) T) T {
if len(items) == 0 {
var tmp T
return tmp
}
state := make([]T, len(items))
state = append(state, items...)
wg := sync.WaitGroup{}
worker := func(jobs <-chan int, result chan<- T) {
for index := range jobs {
result <- f(state[index], state[index+1])
}
wg.Done()
}
for len(state) > 1 {
// calculate workers count
if workers <= 0 || workers > len(state) {
workers = len(state)
}
// run workers
jobs := make(chan int, len(state))
wg.Add(workers)
result := make(chan T)
for i := 0; i < workers; i++ {
go worker(jobs, result)
}
go func() {
wg.Wait()
close(result)
}()
// add indices into jobs for workers
for i := 0; i < len(state)-1; i += 2 {
jobs <- i
}
close(jobs)
// collect new state
newState := make([]T, 0, len(state)/2+len(state)%2)
for el := range result {
newState = append(newState, el)
}
if len(state)%2 == 1 {
newState = append(newState, state[len(state)-1])
}
// put new state as current state after all
state = newState
}
return state[0]
}
|
package pathfileops
import (
"os"
"strings"
"testing"
)
func TestFileOpenConfig_CopyIn_01(t *testing.T) {
expectedFOpenCode := os.O_WRONLY | os.O_APPEND | os.O_TRUNC
fOpCfg1, err := FileOpenConfig{}.New(FOpenType.TypeWriteOnly(),
FOpenMode.ModeAppend(), FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by fOpCfg1.New().\n" +
"Error='%v'\n", err.Error())
return
}
actualFOpenCode, err := fOpCfg1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpCfg1.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v'\n",
expectedFOpenCode, actualFOpenCode)
}
fOpCfg2 := FileOpenConfig{}
fOpCfg2.CopyIn(&fOpCfg1)
actualFOpenCode2, err := fOpCfg2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpCfg2.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v'\n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_CopyIn_02(t *testing.T) {
fOpStatus1 := FileOpenConfig{}
fOpStatus2 := FileOpenConfig{}
fOpStatus2.CopyIn(&fOpStatus1)
if !fOpStatus1.Equal(&fOpStatus2) {
t.Error("Error: Expected fOpStatus1==fOpStatus2.\n" +
"HOWEVER, THEY ARE NOT EQUAL!\n")
}
}
func TestFileOpenConfig_CopyOut_01(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus1, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus1.New().\n" +
"Error='%v'\n", err.Error())
return
}
actualFOpenCode, err := fOpStatus1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus1.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
fOpStatus2 := fOpStatus1.CopyOut()
actualFOpenCode2, err := fOpStatus2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus2.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v'\n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_CopyOut_02(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus1, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus1.New().\n" +
"Error='%v'\n", err.Error())
return
}
actualFOpenCode, err := fOpStatus1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus1.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v'\n",
expectedFOpenCode, actualFOpenCode)
}
fOpStatus2 := fOpStatus1.CopyOut()
actualFOpenCode2, err := fOpStatus2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus2.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v'\n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_CopyOut_03(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus1, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by fOpStatus1.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus1.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
fOpStatus2 := fOpStatus1.CopyOut()
actualFOpenCode2, err := fOpStatus2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus2.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_CopyOut_04(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus1, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by fOpStatus1.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus1.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
fOpStatus1.fileOpenModes = make([]FileOpenMode, 0)
fOpStatus2 := fOpStatus1.CopyOut()
actualFOpenCode2, err := fOpStatus2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus2.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_CopyOut_05(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus1, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by fOpStatus1.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus1.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus1.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'. Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
fOpStatus1.fileOpenModes = nil
fOpStatus2 := fOpStatus1.CopyOut()
actualFOpenCode2, err := fOpStatus2.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by fOpStatus2.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode2 {
t.Errorf("Error: Expected File Open Code #2 ='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode2)
}
}
func TestFileOpenConfig_Equal_01(t *testing.T) {
fOpStatus1, err :=
FileOpenConfig{}.New(FOpenType.TypeWriteOnly(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpStatus2 := fOpStatus1.CopyOut()
if !fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2==fOpStatus1.\n" +
"WRONG: They are NOT Equal!\n")
}
if !fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus1==fOpStatus2.\n" +
"WRONG: They are NOT Equal!\n")
}
}
func TestFileOpenConfig_Equal_02(t *testing.T) {
fOpStatus1, err :=
FileOpenConfig{}.New(FOpenType.TypeWriteOnly(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus1=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
fOpStatus2, err := FileOpenConfig{}.New(
FOpenType.TypeWriteOnly(), FOpenMode.ModeAppend(), FOpenMode.ModeExclusive())
if err != nil {
t.Errorf("Error returned by fOpStatus2=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
if fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2!=fOpStatus1.\n" +
"WRONG: They ARE Equal!\n")
}
if fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus1!=fOpStatus2.\nWRONG: They ARE Equal!\n")
}
}
func TestFileOpenConfig_Equal_03(t *testing.T) {
fOpStatus1 := FileOpenConfig{}
fOpStatus2 := FileOpenConfig{}
if !fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2==fOpStatus1.\nWRONG: They are NOT Equal!\n")
}
if !fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus ==fOpStatus2.\nWRONG: They are NOT Equal!\n")
}
}
func TestFileOpenConfig_Equal_04(t *testing.T) {
fOpStatus1, err :=
FileOpenConfig{}.New(FOpenType.TypeWriteOnly(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus1=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
fOpStatus2 := FileOpenConfig{}
if fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2!=fOpStatus1.\nWRONG: They ARE Equal!\n")
}
if fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus ==fOpStatus2.\nWRONG: They ARE Equal!\n")
}
}
func TestFileOpenConfig_Equal_05(t *testing.T) {
fOpStatus1 := FileOpenConfig{}
fOpStatus2, err :=
FileOpenConfig{}.New(FOpenType.TypeWriteOnly(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus2=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
if fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2!=fOpStatus1.\nWRONG: They ARE Equal!\n")
}
if fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus ==fOpStatus2.\nWRONG: They ARE Equal!\n")
}
}
func TestFileOpenConfig_Equal_06(t *testing.T) {
fOpStatus1, err :=
FileOpenConfig{}.New(FOpenType.TypeReadWrite(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus1=FileOpenConfig{}.New().\n"+
"Error='%v'\n", err.Error())
}
fOpStatus2, err :=
FileOpenConfig{}.New(FOpenType.TypeWriteOnly(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus2=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
if fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2!=fOpStatus1.\nWRONG: They ARE Equal!\n")
}
if fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus ==fOpStatus2.\nWRONG: They ARE Equal!\n")
}
}
func TestFileOpenConfig_Equal_07(t *testing.T) {
fOpStatus1, err :=
FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeAppend(),
FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by fOpStatus1=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
fOpStatus2, err :=
FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeCreate(),
FOpenMode.ModeExclusive())
if err != nil {
t.Errorf("Error returned by fOpStatus2=FileOpenConfig{}.New().\n"+
"Error='%v' \n", err.Error())
return
}
if fOpStatus2.Equal(&fOpStatus1) {
t.Error("Expected fOpStatus2!=fOpStatus1.\nWRONG: They ARE Equal!\n")
}
if fOpStatus1.Equal(&fOpStatus2) {
t.Error("Expected fOpStatus ==fOpStatus2.\nWRONG: They ARE Equal!\n")
}
}
func TestFileOpenConfig_New_01(t *testing.T) {
expectedFOpenCode := os.O_WRONLY | os.O_APPEND | os.O_TRUNC
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeWriteOnly(),
FOpenMode.ModeAppend(), FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_New_02(t *testing.T) {
fOpenType := FileOpenType(-99)
_, err := FileOpenConfig{}.New(fOpenType, FOpenMode.ModeCreate())
if err == nil {
t.Error("Expected Error returned by FileOpenConfig{}.New() " +
"because of an invalid File Open Type.\n" +
"However, NO ERROR WAS RETURNED! \n")
}
}
func TestFileOpenConfig_New_03(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_New_04(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_New_05(t *testing.T) {
fOpenMode := FileOpenMode(-99)
_, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(), fOpenMode)
if err == nil {
t.Error("Expected an error return from FileOpenConfig{}.New()\n" +
"because the File Open Mode was invalid.\n" +
"HOWEVER, NO ERROR WAS RETURNED! \n")
}
}
func TestFileOpenConfig_New_06(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpStatus.fileOpenModes = nil
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_GetCompositeFileOpenCode_01(t *testing.T) {
fOpCfg := FileOpenConfig{}
_, err := fOpCfg.GetCompositeFileOpenCode()
if err == nil {
t.Error("Expected error return from fOpCfg.GetCompositeFileOpenCode()\n" +
"because 'fOpCfg' was NOT initialized.\n" +
"HOWEVER, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_GetCompositeFileOpenCode_02(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(FOpenType.TypeNone(), FOpenMode.ModeAppend())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
_, err = fOpCfg.GetCompositeFileOpenCode()
if err == nil {
t.Error("Expected error return from fOpCfg.GetCompositeFileOpenCode()\n" +
"because fOpCfg use TypeNone().\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_GetCompositeFileOpenCode_03(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(FOpenType.TypeNone(), FOpenMode.ModeAppend())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes = nil
_, err = fOpCfg.GetCompositeFileOpenCode()
if err == nil {
t.Error("Expected error return from fOpCfg.GetCompositeFileOpenCode()\n" +
"because fOpCfg.fileOpenModes == nil.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_GetCompositeFileOpenCode_04(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(FOpenType.TypeNone(), FOpenMode.ModeAppend())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenType = FileOpenType(-99)
_, err = fOpCfg.GetCompositeFileOpenCode()
if err == nil {
t.Error("Expected error return from fOpCfg.GetCompositeFileOpenCode()\n" +
"because fOpCfg.fileOpenType is invalid.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_GetCompositeFileOpenCode_05(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeReadWrite(), FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpStatus.fileOpenModes = nil
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v'\n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_GetFileOpenModes_01(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeAppend(),
FOpenMode.ModeCreate(),
FOpenMode.ModeExclusive())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fileOpenModes := fOpCfg.GetFileOpenModes()
if len(fileOpenModes) != 3 {
t.Errorf("Error: Expected fileOpenModes len = 3.\n" +
"Instead, len='%v'\n",
len(fileOpenModes))
}
hasAppend := 0
hasCreate := 0
hasExclusive := 0
for i := 0; i < len(fileOpenModes); i++ {
if fileOpenModes[i] == FOpenMode.ModeAppend() {
hasAppend++
}
if fileOpenModes[i] == FOpenMode.ModeCreate() {
hasCreate++
}
if fileOpenModes[i] == FOpenMode.ModeExclusive() {
hasExclusive++
}
}
if hasAppend != 1 {
t.Errorf("Error: Could not locate correct number of Appends.\n"+
"hasAppend='%v'\n", hasAppend)
}
if hasCreate != 1 {
t.Errorf("Error: Could not locate correct number of Creates.\n"+
"hasCreate='%v'\n", hasCreate)
}
if hasExclusive != 1 {
t.Errorf("Error: Could not locate correct number of Exclusives.\n"+
"hasExclusive='%v'\n", hasExclusive)
}
}
func TestFileOpenConfig_GetFileOpenModes_02(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fileOpenModes := fOpCfg.GetFileOpenModes()
if fileOpenModes == nil {
t.Error("Error: Returned fileOpenModes is nil!\n")
}
if len(fileOpenModes) == 0 {
t.Error("Error: Returned fileOpenModes has Zero Length!\n")
return
}
if len(fileOpenModes) != 1 {
t.Errorf("Error: Returned fileOpenModes Length is NOT '1' !\n"+
"Length='%v'\n", len(fileOpenModes))
return
}
if fileOpenModes[0] != FOpenMode.ModeNone() {
t.Error("Error: Expected fileOpenModes[0] == FOpenMode.ModeNone().\n" +
"It is NOT!\n")
}
}
func TestFileOpenConfig_GetFileOpenModes_03(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fileOpenModes := fOpCfg.GetFileOpenModes()
if fileOpenModes == nil {
t.Error("Error: Returned fileOpenModes is nil!\n")
return
}
if len(fileOpenModes) == 0 {
t.Error("Error: Returned fileOpenModes has Zero Length!\n")
return
}
if len(fileOpenModes) != 1 {
t.Errorf("Error: Returned fileOpenModes Length is NOT '1' !\n"+
"Length='%v'\n", len(fileOpenModes))
}
if fileOpenModes[0] != FOpenMode.ModeNone() {
t.Error("Error: Expected fileOpenModes[0] == FOpenMode.ModeNone().\n" +
"It is NOT!\n")
}
}
func TestFileOpenConfig_GetFileOpenModes_04(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes = nil
fileOpenModes := fOpCfg.GetFileOpenModes()
if fileOpenModes == nil {
t.Error("Error: Returned fileOpenModes is nil!\n")
return
}
if len(fileOpenModes) == 0 {
t.Error("Error: Returned fileOpenModes has Zero Length!\n")
return
}
if len(fileOpenModes) != 1 {
t.Errorf("Error: Returned fileOpenModes Length is NOT '1' !\n"+
"Length='%v'\n", len(fileOpenModes))
}
if fileOpenModes[0] != FOpenMode.ModeNone() {
t.Error("Error: Expected fileOpenModes[0] == FOpenMode.ModeNone().\n" +
"It is NOT!\n")
}
}
func TestFileOpenConfig_GetFileOpenTextString_01(t *testing.T) {
fOpenCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeCreate(),
FOpenMode.ModeExclusive())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
txt := fOpenCfg.GetFileOpenNarrativeText()
if strings.Index(txt, "ReadWrite") == -1 {
t.Error("Error: Could not locate 'ReadWrite' in FileOpen Text!\n")
}
if strings.Index(txt, "Create") == -1 {
t.Error("Error: Could not locate 'Create' in FileOpen Text!\n")
}
if strings.Index(txt, "Exclusive") == -1 {
t.Error("Error: Could not locate 'Exclusive' in FileOpen Text!\n")
}
}
func TestFileOpenConfig_GetFileOpenTextString_02(t *testing.T) {
fOpenCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpenCfg.fileOpenModes = nil
txt := fOpenCfg.GetFileOpenNarrativeText()
if strings.Index(txt, "ReadWrite") == -1 {
t.Error("Error: Could not locate 'ReadWrite' in FileOpen Text!\n")
}
if strings.Index(txt, "None") == -1 {
t.Error("Error: Could not locate 'None' in FileOpen Text!\n")
}
}
func TestFileOpenConfig_GetFileOpenType_01(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadWrite(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpenType := fOpCfg.GetFileOpenType()
if fOpenType != FOpenType.TypeReadWrite() {
t.Errorf("Error: Expected fOpenType='ReadWrite'.\n" +
"Instead, fOpenType='%v'\nstring='%s'\n", fOpenType, fOpenType.String())
}
}
func TestFileOpenConfig_GetFileOpenType_02(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes = nil
fOpenType := fOpCfg.GetFileOpenType()
if fOpenType != FOpenType.TypeReadOnly() {
t.Errorf("Error: Expected fOpenType='ReadOnly'.\n" +
"Instead, fOpenType='%v'\nstring='%s'\n", fOpenType, fOpenType.String())
}
}
func TestFileOpenConfig_IsValid_01(t *testing.T) {
fOpCfg := FileOpenConfig{}
err := fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg is uninitialized.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_02(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
err = fOpCfg.IsValid()
if err != nil {
t.Errorf("Error: Expected no error returned from IsValid().\n" +
"However, an error was returned!\nError='%v'\n", err.Error())
}
}
func TestFileOpenConfig_IsValid_03(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeNone(),
FOpenMode.ModeAppend(),
FOpenMode.ModeCreate(),
FOpenMode.ModeExclusive())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg File Type=None and multiple Modes.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_04(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenType = FileOpenType(-99)
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg File Type=-99.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_05(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes[0] = FileOpenMode(9999999)
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg File Type=-99.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_06(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes = nil
err = fOpCfg.IsValid()
if err != nil {
t.Errorf("Expected NO ERROR RETURN from IsValid().\n"+
"However, AN ERROR WAS RETURNED!\nError='%v'\n", err.Error())
}
}
func TestFileOpenConfig_IsValid_07(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeAppend(),
FOpenMode.ModeCreate(),
FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes[0] = FileOpenMode(9999)
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg contained an invalid File Mode.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_08(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeAppend(),
FOpenMode.ModeCreate(),
FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes[1] = FOpenMode.ModeNone()
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg has multiple File Modes one of which is 'None'.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_09(t *testing.T) {
fOpCfg, err := FileOpenConfig{}.New(
FOpenType.TypeReadOnly(),
FOpenMode.ModeAppend(),
FOpenMode.ModeCreate(),
FOpenMode.ModeTruncate())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
fOpCfg.fileOpenModes[1] = FOpenMode.ModeNone()
err = fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg has multiple File Modes one of which is 'None'.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_IsValid_10(t *testing.T) {
fOpCfg := FileOpenConfig{}
fOpCfg.fileOpenType = FOpenType.TypeNone()
fOpCfg.fileOpenModes = make([]FileOpenMode, 1)
fOpCfg.fileOpenModes[0] = FOpenMode.ModeCreate()
err := fOpCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpCfg.IsValid()\n" +
"because fOpCfg has File Type='None' and fileOpenModes = ModeCreate.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_SetFileOpenType_01(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeNone(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
err = fOpStatus.SetFileOpenType(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by SetFileOpenType{}.New().\n" +
"Error='%v' \n", err.Error())
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_SetFileOpenType_02(t *testing.T) {
expectedFOpenCode := os.O_RDWR
fOpStatus := FileOpenConfig{}
err := fOpStatus.SetFileOpenType(FOpenType.TypeReadWrite())
if err != nil {
t.Errorf("Error returned by SetFileOpenType{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
err = fOpStatus.SetFileOpenModes(FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by fOpStatus." +
"SetFileOpenModes(FOpenMode.ModeNone()).\n"+
"Error='%v' \n", err.Error())
return
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
return
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_SetFileOpenType_03(t *testing.T) {
fOpStatus := FileOpenConfig{}
err := fOpStatus.SetFileOpenType(FileOpenType(-99))
if err == nil {
t.Error("Expected an error return from fOpStatus.\n" +
"SetFileOpenType(FileOpenType(-99)) because FileType== -99.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
func TestFileOpenConfig_SetFileOpenType_04(t *testing.T) {
expectedFOpenType := FOpenType.TypeNone()
fOpCfg := FileOpenConfig{}
err := fOpCfg.SetFileOpenType(expectedFOpenType)
if err != nil {
t.Errorf("Error returned by SetFileOpenType{}.New().\n" +
"Error='%v' \n", err.Error())
return
}
actualFileOpenType := fOpCfg.GetFileOpenType()
if expectedFOpenType != actualFileOpenType {
t.Errorf("Error: Expected File Open Type='%v'.\n"+
"Instead, actual File Open Type='%v' \n",
expectedFOpenType.String(), actualFileOpenType.String())
}
}
func TestFileOpenConfig_SetFileOpenModes_01(t *testing.T) {
expectedFOpenCode := os.O_WRONLY | os.O_APPEND | os.O_CREATE
fOpStatus, err := FileOpenConfig{}.New(FOpenType.TypeNone(),
FOpenMode.ModeNone())
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.New(FOpenType.TypeNone()," +
"FOpenMode.ModeNone()).\n" +
"Error='%v'\n", err.Error())
return
}
err = fOpStatus.SetFileOpenType(FOpenType.TypeWriteOnly())
if err != nil {
t.Errorf("Error returned by fOpStatus.SetFileOpenType(FOpenType.TypeWriteOnly()).\n" +
"Error='%v' \n", err.Error())
}
err = fOpStatus.SetFileOpenModes(FOpenMode.ModeAppend(), FOpenMode.ModeCreate())
if err != nil {
t.Errorf("Error returned by fOpStatus.SetFileOpenModes(FOpenMode.ModeAppend(), "+
"FOpenMode.ModeCreate()).\n" +
"Error='%v' \n",
err.Error())
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_SetFileOpenModes_02(t *testing.T) {
expectedFOpenCode := os.O_WRONLY | os.O_APPEND | os.O_CREATE
fOpStatus := FileOpenConfig{}
fOpStatus.fileOpenType = FOpenType.TypeWriteOnly()
fOpStatus.fileOpenModes = nil
err := fOpStatus.SetFileOpenModes(FOpenMode.ModeAppend(), FOpenMode.ModeCreate())
if err != nil {
t.Errorf("Error returned by fOpStatus.SetFileOpenModes(FOpenMode.ModeAppend(), "+
"FOpenMode.ModeCreate()).\n" +
"Error='%v' \n",
err.Error())
}
actualFOpenCode, err := fOpStatus.GetCompositeFileOpenCode()
if err != nil {
t.Errorf("Error returned by FileOpenConfig{}.GetCompositeFileOpenCode().\n"+
"Error='%v' \n", err.Error())
}
if expectedFOpenCode != actualFOpenCode {
t.Errorf("Error: Expected File Open Code='%v'.\n" +
"Instead, actual File Open Code='%v' \n",
expectedFOpenCode, actualFOpenCode)
}
}
func TestFileOpenConfig_SetFileOpenModes_03(t *testing.T) {
fOpnCfg := FileOpenConfig{}
fOpnCfg.fileOpenType = FOpenType.TypeWriteOnly()
err := fOpnCfg.SetFileOpenModes()
if err != nil {
t.Errorf("Error returned by fOpnCfg.SetFileOpenModes()\n"+
"Error='%v' \n",
err.Error())
}
err = fOpnCfg.IsValid()
if err == nil {
t.Error("Expected an error return from fOpnCfg.IsValid()\n" +
"because file modes config shows as uninitialized.\n" +
"However, NO ERROR WAS RETURNED!\n")
}
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"fmt"
"github.com/codenotary/immudb/pkg/errors"
"io/ioutil"
"log"
"net"
"os"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"unicode"
pgsqlsrv "github.com/codenotary/immudb/pkg/pgsql/server"
"github.com/codenotary/immudb/pkg/stream"
"github.com/codenotary/immudb/pkg/database"
"github.com/codenotary/immudb/pkg/logger"
"github.com/codenotary/immudb/pkg/signer"
"github.com/codenotary/immudb/cmd/helper"
"github.com/codenotary/immudb/cmd/version"
"github.com/codenotary/immudb/pkg/api/schema"
"github.com/codenotary/immudb/pkg/auth"
"github.com/golang/protobuf/ptypes/empty"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/status"
)
const (
//KeyPrefixUser All user keys in the key/value store are prefixed by this keys to distinguish them from keys that have other purposes
KeyPrefixUser = iota + 1
)
var startedAt time.Time
var immudbTextLogo = " _ _ _ \n" +
"(_) | | | \n" +
" _ _ __ ___ _ __ ___ _ _ __| | |__ \n" +
"| | '_ ` _ \\| '_ ` _ \\| | | |/ _` | '_ \\ \n" +
"| | | | | | | | | | | | |_| | (_| | |_) |\n" +
"|_|_| |_| |_|_| |_| |_|\\__,_|\\__,_|_.__/ \n"
// Initialize initializes dependencies, set up multi database capabilities and stats
func (s *ImmuServer) Initialize() error {
_, err := fmt.Fprintf(os.Stdout, "%s\n%s\n%s\n\n", immudbTextLogo, version.VersionStr(), s.Options)
logErr(s.Logger, "Error printing immudb config: %v", err)
if s.Options.Logfile != "" {
s.Logger.Infof("\n%s\n%s\n%s\n\n", immudbTextLogo, version.VersionStr(), s.Options)
}
adminPassword, err := auth.DecodeBase64Password(s.Options.AdminPassword)
if err != nil {
return logErr(s.Logger, "%v", err)
}
if len(adminPassword) == 0 {
s.Logger.Errorf(ErrEmptyAdminPassword.Error())
return ErrEmptyAdminPassword
}
dataDir := s.Options.Dir
if err = s.loadSystemDatabase(dataDir, adminPassword); err != nil {
return logErr(s.Logger, "Unable load system database: %v", err)
}
if err = s.loadDefaultDatabase(dataDir); err != nil {
return logErr(s.Logger, "Unable load default database: %v", err)
}
if err = s.loadUserDatabases(dataDir); err != nil {
return logErr(s.Logger, "Unable load databases: %v", err)
}
s.multidbmode = s.mandatoryAuth()
if !s.Options.GetAuth() && s.multidbmode {
s.Logger.Infof("Authentication must be on.")
return fmt.Errorf("auth should be on")
}
grpcSrvOpts := []grpc.ServerOption{}
if s.Options.TLSConfig != nil {
grpcSrvOpts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(s.Options.TLSConfig))}
}
if s.Options.SigningKey != "" {
if signer, err := signer.NewSigner(s.Options.SigningKey); err != nil {
return logErr(s.Logger, "Unable to configure the cryptographic signer: %v", err)
} else {
s.StateSigner = NewStateSigner(signer)
}
}
if s.Options.usingCustomListener {
s.Logger.Infof("Using custom listener")
s.listener = s.Options.listener
} else {
s.listener, err = net.Listen(s.Options.Network, s.Options.Bind())
if err != nil {
return logErr(s.Logger, "Immudb unable to listen: %v", err)
}
}
systemDbRootDir := s.OS.Join(dataDir, s.Options.GetDefaultDbName())
if s.UUID, err = getOrSetUUID(dataDir, systemDbRootDir); err != nil {
return logErr(s.Logger, "Unable to get or set uuid: %v", err)
}
auth.AuthEnabled = s.Options.GetAuth()
auth.DevMode = s.Options.DevMode
auth.UpdateMetrics = func(ctx context.Context) { Metrics.UpdateClientMetrics(ctx) }
dbSize, _ := s.dbList.GetByIndex(DefaultDbIndex).Size()
if dbSize <= 0 {
s.Logger.Infof("Started with an empty database")
}
if err = s.setupPidFile(); err != nil {
return err
}
if s.Options.StreamChunkSize < stream.MinChunkSize {
return errors.New(stream.ErrChunkTooSmall).WithCode(errors.CodInvalidParameterValue)
}
//===> !NOTE: See Histograms section here:
// https://github.com/grpc-ecosystem/go-grpc-prometheus
// TL;DR:
// Prometheus histograms are a great way to measure latency distributions of
// your RPCs. However, since it is bad practice to have metrics of high
// cardinality the latency monitoring metrics are disabled by default. To
// enable them the following has to be called during initialization code:
if !s.Options.NoHistograms {
grpc_prometheus.EnableHandlingTimeHistogram()
}
//<===
uuidContext := NewUUIDContext(s.UUID)
uis := []grpc.UnaryServerInterceptor{
ErrorMapper, // converts errors in gRPC ones. Need to be the first
uuidContext.UUIDContextSetter,
grpc_prometheus.UnaryServerInterceptor,
auth.ServerUnaryInterceptor,
}
sss := []grpc.StreamServerInterceptor{
ErrorMapperStream, // converts errors in gRPC ones. Need to be the first
uuidContext.UUIDStreamContextSetter,
grpc_prometheus.StreamServerInterceptor,
auth.ServerStreamInterceptor,
}
grpcSrvOpts = append(
grpcSrvOpts,
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(uis...)),
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(sss...)),
grpc.MaxRecvMsgSize(s.Options.MaxRecvMsgSize),
)
s.GrpcServer = grpc.NewServer(grpcSrvOpts...)
schema.RegisterImmuServiceServer(s.GrpcServer, s)
grpc_prometheus.Register(s.GrpcServer)
s.PgsqlSrv = pgsqlsrv.New(pgsqlsrv.Port(s.Options.PgsqlServerPort), pgsqlsrv.DatabaseList(s.dbList), pgsqlsrv.SysDb(s.sysDb), pgsqlsrv.TlsConfig(s.Options.TLSConfig))
if s.Options.PgsqlServer {
if err = s.PgsqlSrv.Initialize(); err != nil {
return err
}
}
return err
}
// Start starts the immudb server
// Loads and starts the System DB, default db and user db
func (s *ImmuServer) Start() (err error) {
s.mux.Lock()
s.pgsqlMux.Lock()
if s.Options.MetricsServer {
if err := s.setUpMetricsServer(); err != nil {
return err
}
defer func() {
if err := s.metricsServer.Close(); err != nil {
s.Logger.Errorf("Failed to shutdown metric server: %s", err)
}
}()
}
s.installShutdownHandler()
go s.printUsageCallToAction()
startedAt = time.Now()
go func() {
if err := s.GrpcServer.Serve(s.listener); err != nil {
s.mux.Unlock()
log.Fatal(err)
}
}()
if s.Options.PgsqlServer {
go func() {
s.Logger.Infof("pgsl server is running at port %d", s.Options.PgsqlServerPort)
if err := s.PgsqlSrv.Serve(); err != nil {
s.pgsqlMux.Unlock()
log.Fatal(err)
}
}()
}
if s.Options.WebServer {
if err := s.setUpWebServer(); err != nil {
return err
}
defer func() {
if err := s.webServer.Close(); err != nil {
s.Logger.Errorf("Failed to shutdown web API/console server: %s", err)
}
}()
}
s.mux.Unlock()
s.pgsqlMux.Unlock()
<-s.quit
return err
}
func logErr(log logger.Logger, formattedMessage string, err error) error {
if err != nil {
log.Errorf(formattedMessage, err)
}
return err
}
func (s *ImmuServer) setupPidFile() error {
var err error
if s.Options.Pidfile != "" {
if s.Pid, err = NewPid(s.Options.Pidfile, s.OS); err != nil {
return logErr(s.Logger, "Failed to write pidfile: %s", err)
}
}
return err
}
func (s *ImmuServer) setUpMetricsServer() error {
s.metricsServer = StartMetrics(
1*time.Minute,
s.Options.MetricsBind(),
s.Logger,
s.metricFuncServerUptimeCounter,
s.metricFuncComputeDBSizes,
s.metricFuncComputeDBEntries,
)
return nil
}
func (s *ImmuServer) setUpWebServer() error {
server, err := StartWebServer(
s.Options.WebBind(),
s.Options.TLSConfig,
s,
s.Logger,
)
if err != nil {
return err
}
s.webServer = server
return nil
}
func (s *ImmuServer) printUsageCallToAction() {
time.Sleep(200 * time.Millisecond)
immuadminCLI := helper.Blue + "immuadmin" + helper.Green
immuclientCLI := helper.Blue + "immuclient" + helper.Green
defaultUsername := helper.Blue + auth.SysAdminUsername + helper.Green
fmt.Fprintf(os.Stdout,
"%sYou can now use %s and %s CLIs to login with the %s superadmin user and start using immudb.%s\n",
helper.Green, immuadminCLI, immuclientCLI, defaultUsername, helper.Reset)
if s.Options.Logfile != "" {
s.Logger.Infof(
"You can now use immuadmin and immuclient CLIs to login with the %s superadmin user and start using immudb.\n",
auth.SysAdminUsername)
}
}
func (s *ImmuServer) loadSystemDatabase(dataDir string, adminPassword string) error {
if s.dbList.Length() != 0 {
panic("loadSystemDatabase should be called before any other database loading")
}
systemDbRootDir := s.OS.Join(dataDir, s.Options.GetSystemAdminDbName())
storeOpts := DefaultStoreOptions().WithSynced(true)
op := database.DefaultOption().
WithDbName(s.Options.GetSystemAdminDbName()).
WithDbRootPath(dataDir).
WithDbRootPath(s.Options.Dir).
WithStoreOptions(storeOpts)
_, sysDbErr := s.OS.Stat(systemDbRootDir)
if s.OS.IsNotExist(sysDbErr) {
db, err := database.NewDb(op, nil, s.Logger)
if err != nil {
return err
}
s.sysDb = db
//sys admin can have an empty array of databases as it has full access
adminUsername, _, err := s.insertNewUser([]byte(auth.SysAdminUsername), []byte(adminPassword), auth.PermissionSysAdmin, "*", false, "")
if err != nil {
return logErr(s.Logger, "%v", err)
}
s.Logger.Infof("Admin user %s successfully created", adminUsername)
} else {
db, err := database.OpenDb(op, nil, s.Logger)
if err != nil {
return err
}
s.sysDb = db
}
return nil
}
//loadDefaultDatabase
func (s *ImmuServer) loadDefaultDatabase(dataDir string) error {
if s.dbList.Length() != 0 {
panic("loadDefaultDatabase should be called right after loading systemDatabase")
}
defaultDbRootDir := s.OS.Join(dataDir, s.Options.GetDefaultDbName())
op := database.DefaultOption().
WithDbName(s.Options.GetDefaultDbName()).
WithDbRootPath(dataDir).
WithDbRootPath(s.Options.Dir).
WithStoreOptions(s.Options.StoreOptions)
_, defaultDbErr := s.OS.Stat(defaultDbRootDir)
if s.OS.IsNotExist(defaultDbErr) {
db, err := database.NewDb(op, s.sysDb, s.Logger)
if err != nil {
return err
}
s.dbList.Append(db)
} else {
db, err := database.OpenDb(op, s.sysDb, s.Logger)
if err != nil {
return err
}
s.dbList.Append(db)
}
return nil
}
func (s *ImmuServer) loadUserDatabases(dataDir string) error {
var dirs []string
//get first level sub directories of data dir
files, err := ioutil.ReadDir(s.Options.Dir)
if err != nil {
return err
}
for _, f := range files {
if !f.IsDir() ||
f.Name() == s.Options.GetSystemAdminDbName() ||
f.Name() == s.Options.GetDefaultDbName() {
continue
}
dirs = append(dirs, f.Name())
}
//load databases that are inside each directory
for _, val := range dirs {
//dbname is the directory name where it is stored
//path iteration above stores the directories as data/db_name
pathparts := strings.Split(val, string(filepath.Separator))
dbname := pathparts[len(pathparts)-1]
op := database.DefaultOption().
WithDbName(dbname).
WithDbRootPath(dataDir).
WithDbRootPath(s.Options.Dir).
WithStoreOptions(s.Options.StoreOptions)
db, err := database.OpenDb(op, s.sysDb, s.Logger)
if err != nil {
return err
}
s.dbList.Append(db)
}
return nil
}
// Stop stops the immudb server
func (s *ImmuServer) Stop() error {
s.mux.Lock()
defer s.mux.Unlock()
s.Logger.Infof("Stopping immudb:\n%v", s.Options)
defer func() { s.quit <- struct{}{} }()
if !s.Options.usingCustomListener {
s.GrpcServer.Stop()
defer func() { s.GrpcServer = nil }()
}
return s.CloseDatabases()
}
//CloseDatabases closes all opened databases including the consinstency checker
func (s *ImmuServer) CloseDatabases() error {
for i := 0; i < s.dbList.Length(); i++ {
val := s.dbList.GetByIndex(int64(i))
val.Close()
}
if s.sysDb != nil {
s.sysDb.Close()
}
return nil
}
func (s *ImmuServer) updateConfigItem(key string, newOrUpdatedLine string, unchanged func(string) bool) error {
configFilepath := s.Options.Config
if strings.TrimSpace(configFilepath) == "" {
return fmt.Errorf("config file does not exist")
}
configBytes, err := s.OS.ReadFile(configFilepath)
if err != nil {
return fmt.Errorf("error reading config file %s: %v", configFilepath, err)
}
configLines := strings.Split(string(configBytes), "\n")
write := false
for i, l := range configLines {
l = strings.TrimSpace(l)
if strings.HasPrefix(l, key+"=") || strings.HasPrefix(l, key+" =") {
kv := strings.Split(l, "=")
if unchanged(kv[1]) {
return fmt.Errorf("Server config already has %s", newOrUpdatedLine)
}
configLines[i] = newOrUpdatedLine
write = true
break
}
}
if !write {
configLines = append(configLines, newOrUpdatedLine)
}
if err := s.OS.WriteFile(configFilepath, []byte(strings.Join(configLines, "\n")), 0644); err != nil {
return err
}
return nil
}
// UpdateAuthConfig ...
func (s *ImmuServer) UpdateAuthConfig(ctx context.Context, req *schema.AuthConfig) (*empty.Empty, error) {
_, err := s.getDbIndexFromCtx(ctx, "UpdateAuthConfig")
if err != nil {
return nil, err
}
e := new(empty.Empty)
s.Options.WithAuth(req.GetKind() > 0)
auth.AuthEnabled = s.Options.GetAuth()
if err := s.updateConfigItem(
"auth",
fmt.Sprintf("auth = %t", auth.AuthEnabled),
func(currValue string) bool {
b, err := strconv.ParseBool(currValue)
return err == nil && b == auth.AuthEnabled
},
); err != nil {
return e, fmt.Errorf(
"auth set to %t, but config file could not be updated: %v",
auth.AuthEnabled, err)
}
return e, nil
}
// UpdateMTLSConfig ...
func (s *ImmuServer) UpdateMTLSConfig(ctx context.Context, req *schema.MTLSConfig) (*empty.Empty, error) {
_, err := s.getDbIndexFromCtx(ctx, "UpdateMTLSConfig")
if err != nil {
return nil, err
}
e := new(empty.Empty)
if err := s.updateConfigItem(
"mtls",
fmt.Sprintf("mtls = %t", req.GetEnabled()),
func(currValue string) bool {
b, err := strconv.ParseBool(currValue)
return err == nil && b == req.GetEnabled()
},
); err != nil {
return e, fmt.Errorf("MTLS could not be set to %t: %v", req.GetEnabled(), err)
}
return e, status.Errorf(
codes.OK,
"MTLS set to %t in server config, but server restart is required for it to take effect.",
req.GetEnabled())
}
// Health ...
func (s *ImmuServer) Health(ctx context.Context, e *empty.Empty) (*schema.HealthResponse, error) {
ind, _ := s.getDbIndexFromCtx(ctx, "Health")
if ind < 0 { //probably immuclient hasn't logged in yet
return s.dbList.GetByIndex(DefaultDbIndex).Health(e)
}
return s.dbList.GetByIndex(ind).Health(e)
}
// CurrentState ...
func (s *ImmuServer) CurrentState(ctx context.Context, e *empty.Empty) (*schema.ImmutableState, error) {
ind, err := s.getDbIndexFromCtx(ctx, "CurrentState")
if err != nil {
return nil, err
}
state, err := s.dbList.GetByIndex(ind).CurrentState()
if err != nil {
return nil, err
}
state.Db = s.dbList.GetByIndex(ind).GetOptions().GetDbName()
if s.Options.SigningKey != "" {
err = s.StateSigner.Sign(state)
if err != nil {
return nil, err
}
}
return state, nil
}
// Set ...
func (s *ImmuServer) Set(ctx context.Context, kv *schema.SetRequest) (*schema.TxMetadata, error) {
ind, err := s.getDbIndexFromCtx(ctx, "Set")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).Set(kv)
}
// VerifiableSet ...
func (s *ImmuServer) VerifiableSet(ctx context.Context, req *schema.VerifiableSetRequest) (*schema.VerifiableTx, error) {
ind, err := s.getDbIndexFromCtx(ctx, "VerifiableSet")
if err != nil {
return nil, err
}
vtx, err := s.dbList.GetByIndex(ind).VerifiableSet(req)
if err != nil {
return nil, err
}
if s.Options.SigningKey != "" {
md := schema.TxMetadataFrom(vtx.DualProof.TargetTxMetadata)
alh := md.Alh()
newState := &schema.ImmutableState{
Db: s.dbList.GetByIndex(ind).GetOptions().GetDbName(),
TxId: md.ID,
TxHash: alh[:],
}
err = s.StateSigner.Sign(newState)
if err != nil {
return nil, err
}
vtx.Signature = newState.Signature
}
return vtx, nil
}
// Get ...
func (s *ImmuServer) Get(ctx context.Context, req *schema.KeyRequest) (*schema.Entry, error) {
ind, err := s.getDbIndexFromCtx(ctx, "Get")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).Get(req)
}
// VerifiableGet ...
func (s *ImmuServer) VerifiableGet(ctx context.Context, req *schema.VerifiableGetRequest) (*schema.VerifiableEntry, error) {
ind, err := s.getDbIndexFromCtx(ctx, "VerifiableGet")
if err != nil {
return nil, err
}
vEntry, err := s.dbList.GetByIndex(ind).VerifiableGet(req)
if err != nil {
return nil, err
}
if s.Options.SigningKey != "" {
md := schema.TxMetadataFrom(vEntry.VerifiableTx.DualProof.TargetTxMetadata)
alh := md.Alh()
newState := &schema.ImmutableState{
Db: s.dbList.GetByIndex(ind).GetOptions().GetDbName(),
TxId: md.ID,
TxHash: alh[:],
}
err = s.StateSigner.Sign(newState)
if err != nil {
return nil, err
}
vEntry.VerifiableTx.Signature = newState.Signature
}
return vEntry, nil
}
// Scan ...
func (s *ImmuServer) Scan(ctx context.Context, req *schema.ScanRequest) (*schema.Entries, error) {
ind, err := s.getDbIndexFromCtx(ctx, "Scan")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).Scan(req)
}
// Count ...
func (s *ImmuServer) Count(ctx context.Context, prefix *schema.KeyPrefix) (*schema.EntryCount, error) {
/*s.Logger.Debugf("count %s", prefix.Prefix)
ind, err := s.getDbIndexFromCtx(ctx, "Count")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).Count(prefix)
*/
return nil, errors.New("Functionality not yet supported")
}
// CountAll ...
func (s *ImmuServer) CountAll(ctx context.Context, e *empty.Empty) (*schema.EntryCount, error) {
/*ind, err := s.getDbIndexFromCtx(ctx, "CountAll")
s.Logger.Debugf("count all for db index %d", ind)
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).CountAll()
*/
return nil, errors.New("Functionality not yet supported")
}
// TxByID ...
func (s *ImmuServer) TxById(ctx context.Context, req *schema.TxRequest) (*schema.Tx, error) {
ind, err := s.getDbIndexFromCtx(ctx, "TxByID")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).TxByID(req)
}
// VerifiableTxByID ...
func (s *ImmuServer) VerifiableTxById(ctx context.Context, req *schema.VerifiableTxRequest) (*schema.VerifiableTx, error) {
ind, err := s.getDbIndexFromCtx(ctx, "VerifiableTxByID")
if err != nil {
return nil, err
}
vtx, err := s.dbList.GetByIndex(ind).VerifiableTxByID(req)
if err != nil {
return nil, err
}
if s.Options.SigningKey != "" {
md := schema.TxMetadataFrom(vtx.DualProof.TargetTxMetadata)
alh := md.Alh()
newState := &schema.ImmutableState{
Db: s.dbList.GetByIndex(ind).GetOptions().GetDbName(),
TxId: md.ID,
TxHash: alh[:],
}
err = s.StateSigner.Sign(newState)
if err != nil {
return nil, err
}
vtx.Signature = newState.Signature
}
return vtx, nil
}
// TxScan ...
func (s *ImmuServer) TxScan(ctx context.Context, req *schema.TxScanRequest) (*schema.TxList, error) {
ind, err := s.getDbIndexFromCtx(ctx, "TxScan")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).TxScan(req)
}
// History ...
func (s *ImmuServer) History(ctx context.Context, req *schema.HistoryRequest) (*schema.Entries, error) {
ind, err := s.getDbIndexFromCtx(ctx, "History")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).History(req)
}
// SetReference ...
func (s *ImmuServer) SetReference(ctx context.Context, req *schema.ReferenceRequest) (*schema.TxMetadata, error) {
ind, err := s.getDbIndexFromCtx(ctx, "SetReference")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).SetReference(req)
}
// VerifibleSetReference ...
func (s *ImmuServer) VerifiableSetReference(ctx context.Context, req *schema.VerifiableReferenceRequest) (*schema.VerifiableTx, error) {
ind, err := s.getDbIndexFromCtx(ctx, "VerifiableSetReference")
if err != nil {
return nil, err
}
vtx, err := s.dbList.GetByIndex(ind).VerifiableSetReference(req)
if err != nil {
return nil, err
}
if s.Options.SigningKey != "" {
md := schema.TxMetadataFrom(vtx.DualProof.TargetTxMetadata)
alh := md.Alh()
newState := &schema.ImmutableState{
Db: s.dbList.GetByIndex(ind).GetOptions().GetDbName(),
TxId: md.ID,
TxHash: alh[:],
}
err = s.StateSigner.Sign(newState)
if err != nil {
return nil, err
}
vtx.Signature = newState.Signature
}
return vtx, nil
}
// ZAdd ...
func (s *ImmuServer) ZAdd(ctx context.Context, req *schema.ZAddRequest) (*schema.TxMetadata, error) {
ind, err := s.getDbIndexFromCtx(ctx, "ZAdd")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).ZAdd(req)
}
// ZScan ...
func (s *ImmuServer) ZScan(ctx context.Context, req *schema.ZScanRequest) (*schema.ZEntries, error) {
ind, err := s.getDbIndexFromCtx(ctx, "ZScan")
if err != nil {
return nil, err
}
return s.dbList.GetByIndex(ind).ZScan(req)
}
// VerifiableZAdd ...
func (s *ImmuServer) VerifiableZAdd(ctx context.Context, req *schema.VerifiableZAddRequest) (*schema.VerifiableTx, error) {
ind, err := s.getDbIndexFromCtx(ctx, "VerifiableZAdd")
if err != nil {
return nil, err
}
vtx, err := s.dbList.GetByIndex(ind).VerifiableZAdd(req)
if err != nil {
return nil, err
}
if s.Options.SigningKey != "" {
md := schema.TxMetadataFrom(vtx.DualProof.TargetTxMetadata)
alh := md.Alh()
newState := &schema.ImmutableState{
Db: s.dbList.GetByIndex(ind).GetOptions().GetDbName(),
TxId: md.ID,
TxHash: alh[:],
}
err = s.StateSigner.Sign(newState)
if err != nil {
return nil, err
}
vtx.Signature = newState.Signature
}
return vtx, nil
}
func (s *ImmuServer) installShutdownHandler() {
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
s.Logger.Infof("Caught SIGTERM")
if err := s.Stop(); err != nil {
s.Logger.Errorf("Shutdown error: %v", err)
}
s.Logger.Infof("Shutdown completed")
}()
}
// CreateDatabase Create a new database instance
func (s *ImmuServer) CreateDatabase(ctx context.Context, newdb *schema.Database) (*empty.Empty, error) {
s.Logger.Debugf("createdatabase")
if !s.Options.GetAuth() {
return nil, fmt.Errorf("this command is available only with authentication on")
}
_, user, err := s.getLoggedInUserdataFromCtx(ctx)
if err != nil {
return nil, fmt.Errorf("could not get loggedin user data")
}
if !user.IsSysAdmin {
return nil, fmt.Errorf("Logged In user does not have permissions for this operation")
}
if newdb.DatabaseName == SystemdbName {
return nil, fmt.Errorf("this database name is reserved")
}
if strings.ToLower(newdb.DatabaseName) != newdb.DatabaseName {
return nil, fmt.Errorf("provide a lowercase database name")
}
newdb.DatabaseName = strings.ToLower(newdb.DatabaseName)
if err = IsAllowedDbName(newdb.DatabaseName); err != nil {
return nil, err
}
//check if database exists
if s.dbList.GetId(newdb.GetDatabaseName()) >= 0 {
return nil, fmt.Errorf("database %s already exists", newdb.GetDatabaseName())
}
dataDir := s.Options.Dir
op := database.DefaultOption().
WithDbName(newdb.DatabaseName).
WithDbRootPath(dataDir).
WithDbRootPath(s.Options.Dir).
WithStoreOptions(s.Options.StoreOptions)
db, err := database.NewDb(op, s.sysDb, s.Logger)
if err != nil {
s.Logger.Errorf(err.Error())
return nil, err
}
s.dbList.Append(db)
s.multidbmode = true
return &empty.Empty{}, nil
}
//DatabaseList returns a list of databases based on the requesting user permissins
func (s *ImmuServer) DatabaseList(ctx context.Context, req *empty.Empty) (*schema.DatabaseListResponse, error) {
s.Logger.Debugf("DatabaseList")
loggedInuser := &auth.User{}
var err error
if !s.Options.GetAuth() {
return nil, fmt.Errorf("this command is available only with authentication on")
}
_, loggedInuser, err = s.getLoggedInUserdataFromCtx(ctx)
if err != nil {
return nil, fmt.Errorf("please login")
}
dbList := &schema.DatabaseListResponse{}
if loggedInuser.IsSysAdmin || s.Options.GetMaintenance() {
for i := 0; i < s.dbList.Length(); i++ {
val := s.dbList.GetByIndex(int64(i))
if val.GetOptions().GetDbName() == SystemdbName {
//do not put sysemdb in the list
continue
}
db := &schema.Database{
DatabaseName: val.GetOptions().GetDbName(),
}
dbList.Databases = append(dbList.Databases, db)
}
} else {
for _, val := range loggedInuser.Permissions {
db := &schema.Database{
DatabaseName: val.Database,
}
dbList.Databases = append(dbList.Databases, db)
}
}
return dbList, nil
}
// UseDatabase ...
func (s *ImmuServer) UseDatabase(ctx context.Context, db *schema.Database) (*schema.UseDatabaseReply, error) {
s.Logger.Debugf("UseDatabase %+v", db)
user := &auth.User{}
var err error
if !s.Options.GetMaintenance() {
if !s.Options.GetAuth() {
return nil, fmt.Errorf("this command is available only with authentication on")
}
_, user, err = s.getLoggedInUserdataFromCtx(ctx)
if err != nil {
if strings.HasPrefix(fmt.Sprintf("%s", err), "token has expired") {
return nil, status.Error(
codes.PermissionDenied, err.Error())
}
return nil, status.Errorf(codes.Unauthenticated, "Please login")
}
if db.DatabaseName == SystemdbName {
return nil, fmt.Errorf("this database can not be selected")
}
//check if this user has permission on this database
//if sysadmin allow to continue
if (!user.IsSysAdmin) &&
(!user.HasPermission(db.DatabaseName, auth.PermissionAdmin)) &&
(!user.HasPermission(db.DatabaseName, auth.PermissionR)) &&
(!user.HasPermission(db.DatabaseName, auth.PermissionRW)) {
return nil, status.Errorf(codes.PermissionDenied,
"Logged in user does not have permission on this database")
}
} else {
user.IsSysAdmin = true
user.Username = ""
s.addUserToLoginList(user)
}
//check if database exists
dbid := s.dbList.GetId(db.DatabaseName)
if dbid < 0 {
return nil, status.Errorf(codes.NotFound, fmt.Sprintf("%s does not exist", db.DatabaseName))
}
token, err := auth.GenerateToken(*user, dbid, s.Options.TokenExpiryTimeMin)
if err != nil {
return nil, err
}
return &schema.UseDatabaseReply{
Token: token,
}, nil
}
func (s *ImmuServer) CleanIndex(ctx context.Context, req *empty.Empty) (*empty.Empty, error) {
if req == nil {
return nil, ErrIllegalArguments
}
ind, err := s.getDbIndexFromCtx(ctx, "CleanIndex")
if err != nil {
return nil, err
}
err = s.dbList.GetByIndex(ind).CompactIndex()
return &empty.Empty{}, err
}
// getDbIndexFromCtx checks if user (loggedin from context) has access to methodname.
// returns index of database
func (s *ImmuServer) getDbIndexFromCtx(ctx context.Context, methodname string) (int64, error) {
//if auth is disabled return index zero (defaultdb) as it is the first database created/loaded
if !s.Options.auth {
if !s.multidbmode {
return DefaultDbIndex, nil
}
}
ind, usr, err := s.getLoggedInUserdataFromCtx(ctx)
if err != nil {
if strings.HasPrefix(fmt.Sprintf("%s", err), "token has expired") {
return 0, status.Error(codes.PermissionDenied, err.Error())
}
if s.Options.GetMaintenance() {
return 0, fmt.Errorf("please select database first")
}
return 0, fmt.Errorf("please login first")
}
if ind < 0 {
return 0, fmt.Errorf("please select a database first")
}
if usr.IsSysAdmin {
return ind, nil
}
if ok := auth.HasPermissionForMethod(usr.WhichPermission(s.dbList.GetByIndex(ind).GetOptions().GetDbName()), methodname); !ok {
return 0, fmt.Errorf("you do not have permission for this operation")
}
return ind, nil
}
// IsAllowedDbName checks if the provided database name meets the requirements
func IsAllowedDbName(dbName string) error {
if len(dbName) < 1 || len(dbName) > 128 {
return fmt.Errorf("database name length outside of limits")
}
var hasSpecial bool
for _, ch := range dbName {
switch {
case unicode.IsLower(ch):
case unicode.IsDigit(ch):
case unicode.IsPunct(ch) || unicode.IsSymbol(ch):
hasSpecial = true
default:
return fmt.Errorf("unrecognized character in database name")
}
}
if hasSpecial {
return fmt.Errorf("punctuation marks and symbols are not allowed in database name")
}
return nil
}
//checkMandatoryAuth checks if auth should be madatory for immudb to start
func (s *ImmuServer) mandatoryAuth() bool {
if s.Options.GetMaintenance() {
return false
}
//check if there are user created databases, should be zero for auth to be off
for i := 0; i < s.dbList.Length(); i++ {
val := s.dbList.GetByIndex(int64(i))
if (val.GetOptions().GetDbName() != s.Options.defaultDbName) &&
(val.GetOptions().GetDbName() != s.Options.systemAdminDbName) {
return true
}
}
//check if there is only default database
if (s.dbList.Length() == 1) && (s.dbList.GetByIndex(DefaultDbIndex).GetOptions().GetDbName() == s.Options.defaultDbName) {
return false
}
if s.sysDb != nil {
//check if there is only sysadmin on systemdb and no other user
itemList, err := s.sysDb.Scan(&schema.ScanRequest{
Prefix: []byte{KeyPrefixUser},
})
if err != nil {
s.Logger.Errorf("error getting users: %v", err)
return true
}
for _, val := range itemList.Entries {
if len(val.Key) > 2 {
if auth.SysAdminUsername != string(val.Key[1:]) {
//another user detected
return true
}
}
}
//systemdb exists but there are no other users created
return false
}
return true
}
|
package mat
type World struct {
Light []Light
AreaLight []AreaLight
Objects []Shape `yaml:"objects,flow"`
}
func NewDefaultWorld() World {
light := NewLight(NewPoint(-10, 10, -10), NewColor(1, 1, 1))
material := NewMaterial(NewColor(0.8, 1.0, 0.6), 0.1, 0.7, 0.2, 200)
s1 := NewSphere()
s1.Material = material
s1.Label = "OUTER SPHERE"
s2 := NewSphere()
s2.Label = "INNER SPHERE"
s2.SetTransform(Scale(0.5, 0.5, 0.5))
return World{
Light: []Light{light},
Objects: []Shape{s1, s2},
}
}
func NewWorld() World {
return World{Light: []Light{}}
}
|
package cache
import (
"fmt"
"os"
"path"
"time"
"log"
"github.com/go-co-op/gocron"
"github.com/pilillo/igovium/putters"
"github.com/pilillo/igovium/utils"
"xorm.io/xorm"
)
type dbHistoricizerType struct {
engine *xorm.Engine
}
func NewDBHistoricizer() *dbHistoricizerType {
return &dbHistoricizerType{}
}
func (h *dbHistoricizerType) Init(cfg *utils.DBCacheConfig) error {
// if an engine is set, then we already used it before
if h.engine != nil {
return nil
}
// otherwise initialize it using the provided conf
var err error
h.engine, err = xorm.NewEngine(cfg.DriverName, cfg.DataSourceName)
if err != nil {
return err
}
return nil
}
// GetExpiredAndDelete ... Returns all expired entries and removes them from the database (within the same transaction).
func (h *dbHistoricizerType) GetExpiredAndDelete() ([]DBCacheEntry, error) {
res, err := h.engine.Transaction(func(session *xorm.Session) (interface{}, error) {
var expired []DBCacheEntry
var err error
now := time.Now().UTC().Unix()
err = h.engine.
Table(&DBCacheEntry{}).
// updated is in secs while ttl is in nanosecs (1 s = 1000000000 ns)
Where("updated_at + (ttl / 1000000000) <= ?", now).
//Desc("updated + ttl").
Find(&expired)
if err != nil {
return nil, err
}
log.Printf("Found %d expired entries in database", len(expired))
if len(expired) > 0 {
var affected int64
affected, err = h.engine.
// updated is in secs while ttl is in nanosecs (1 s = 1000000000 ns)
Where("updated_at + (ttl / 1000000000) <= ?", now).
Delete(&DBCacheEntry{})
if err != nil {
return nil, err
}
if affected == 0 || int(affected) != len(expired) {
return nil, fmt.Errorf("Removed %d entries out of %d identified as expired", affected, len(expired))
}
}
return expired, nil
})
if res == nil {
return nil, err
}
return res.([]DBCacheEntry), err
}
var h = NewDBHistoricizer()
func HistoricizeDBCache(config *utils.DBCacheConfig) {
// init xorm db conn - idempotent
h.Init(config)
expired, err := h.GetExpiredAndDelete()
if err != nil {
log.Fatal(err)
}
log.Printf("Found and removed %d expired entries in database", len(expired))
// todo: add write to target volume
if len(expired) > 0 {
// use formatter based on config
formatManager, err := GetFormatter(config.Historicize.Format)
if err != nil {
log.Fatal(err)
}
// use date partitioner
now := time.Now().UTC()
partName := now.Format(config.Historicize.DatePartitioner)
// create a tmp folder partition
partitionPath := path.Join(config.Historicize.TmpDir, partName)
// create all local partition folders, unless they already exist
err = os.MkdirAll(partitionPath, os.ModePerm)
if err != nil {
log.Fatal(err)
}
// name the file with the current timestamp (does not matter wrt the partition format)
filename := fmt.Sprintf("%s.%s", fmt.Sprint(now.Unix()), config.Historicize.Format)
tmpFilePath := path.Join(partitionPath, filename)
err = formatManager.Save(&expired, tmpFilePath)
if err != nil {
log.Fatal(err)
}
log.Printf("Expired entries written to temporary dir %s as %s", tmpFilePath, filename)
// async put local file to target remote volume if any remote volume is defined
go putters.Put(config.Historicize.TmpDir, partName, filename, &config.Historicize.RemoteVolumeConfig)
}
}
var scheduler = gocron.NewScheduler(time.UTC)
func ScheduleHistoricizeDBCache(config *utils.DBCacheConfig) error {
log.Printf("Running historicize schedule %s", config.Historicize.Schedule)
// run historicize on schedule as defined in config
// Schedule:
// [Minute] [hour] [Day_of_the_Month] [Month_of_the_Year] [Day_of_the_Week]
// [0 to 59, or *] [0 to 23, or *] [1 to 31, or *] [1 to 12, or *] [0 to 7, with (0 == 7, sunday), or *]
_, err := scheduler.Cron(config.Historicize.Schedule).Do(HistoricizeDBCache, config)
if err != nil {
return err
}
// start and continue
scheduler.StartAsync()
return nil
}
|
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func constructMaximumBinaryTree(nums []int) *TreeNode {
if len(nums) == 0 {
return nil
}
mid := findIndexOfMax(nums)
left := nums[0:mid]
right := nums[(mid + 1):len(nums)]
root := TreeNode{ Val: nums[mid] }
root.Left = constructMaximumBinaryTree(left)
root.Right = constructMaximumBinaryTree(right)
return &root
}
func findIndexOfMax(nums []int) int {
max := 0
for i, val := range nums {
if val > nums[max] {
max = i
}
}
return max
}
|
package main
import (
"fmt"
"net"
"os"
"os/exec"
)
func keeprun(cmd *exec.Cmd) {
START:
err := cmd.Start()
if err != nil {
fmt.Println(err)
}
fmt.Println(cmd.Process)
cmd.Wait()
dumpcmd := exec.Command(cmd.Path, cmd.Args...)
dumpcmd.Stdout = os.Stdout
dumpcmd.Stderr = os.Stderr
dumpcmd.ExtraFiles = cmd.ExtraFiles
cmd = dumpcmd
goto START
}
func run(fd *os.File) {
path := "/Users/snow/Downloads/afclient.py"
cmd := exec.Command(path, ">>./log")
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.ExtraFiles = []*os.File{fd}
keeprun(cmd)
}
func main() {
addr, _ := net.ResolveTCPAddr("tcp4", "127.0.0.1:8000")
netListener, _ := net.ListenTCP("tcp4", addr)
fd, _ := netListener.File()
go run(fd)
run(fd)
}
|
package redismgr
import (
"common/clog"
"gopkg.in/redis.v4"
)
var RedisClust *redis.ClusterClient
var log = clog.GetLogger()
func GetRedisCluster() *redis.ClusterClient {
return RedisClust
}
func ConnectRedisCluster(addr []string) {
RedisClust = redis.NewClusterClient(&redis.ClusterOptions{
Addrs: addr,
})
pong, err := RedisClust.Ping().Result()
if err != nil {
log.Panicf("Fail to Connect Redis cluster: %v, [%v]", pong, err)
}
log.Info("Successfully connect to Redis cluster...")
}
|
package server
import (
"github.com/kardianos/service"
)
var logger service.Logger
type sol struct {
}
func (s *sol) Start(srv service.Service) error {
go StartServer()
return nil
}
func (s *sol) Stop(srv service.Service) error {
return nil
}
//InstallService install sleep on lan as a service in current os
func InstallService(port string) {
srvConf := &service.Config{
Name: "sleeponlan",
DisplayName: "Sleep On Lan Service",
Description: "Simple sleep on lan service",
Arguments: []string{"--port", port},
}
sleeponlan := &sol{}
s, err := service.New(sleeponlan, srvConf)
if err != nil {
panic(err)
}
logger, err = s.Logger(nil)
if err != nil {
panic(err)
}
err = s.Install()
if err != nil {
logger.Error(err)
}
}
//UninstallService uninstalls the service from target system
func UninstallService() {
srvConf := &service.Config{
Name: "sleeponlan",
DisplayName: "Sleep On Lan Service",
Description: "Simple sleep on lan service",
}
sleeponlan := &sol{}
s, err := service.New(sleeponlan, srvConf)
logger, err = s.Logger(nil)
if err != nil {
panic(err)
}
err = s.Stop()
if err != nil {
logger.Error(err)
}
err = s.Uninstall()
if err != nil {
logger.Error(err)
}
}
|
package types
// Side of an order
type Side int
const (
// Buy order
Buy Side = iota
// Sell order
Sell
)
// String returns the string name of the Side
func (s Side) String() string {
if int(s) == int(Buy) {
return "Buy"
}
return "Sell"
}
// OrderType of an order
type OrderType int
const (
// Limit order
Limit OrderType = iota
// Market order
Market
// StopLoss order
StopLoss
// StopLossLimit order
StopLossLimit
// TakeProfit order
TakeProfit
// TakeProfitLimit order
TakeProfitLimit
// LimitMaker order
LimitMaker
)
// OrderStatus of an order
type OrderStatus int
const (
// StatusNew ...
StatusNew OrderStatus = iota
// StatusPartiallyFilled ...
StatusPartiallyFilled
// StatusFilled ...
StatusFilled
// StatusCanceled ...
StatusCanceled
// StatusPendingCancel ...
StatusPendingCancel
// StatusRejected ...
StatusRejected
// StatusExpired ...
StatusExpired
)
// TimeInForce of an order
type TimeInForce int
const (
// GoodTillCancel ...
GoodTillCancel TimeInForce = iota
// ImmediateOrCancel ...
ImmediateOrCancel
// FillOrCancel ...
FillOrCancel
)
|
package session
type Manager interface {
Add(string) (string, error)
Get(string) (string, error)
}
|
package main
import "fmt"
/*
@Time : 2020/8/12 20:32
@Author : DELL ricemarch@foxmail.com
@tips:
*/
func wordBreak(s string, wordDict []string) bool {
set := make(map[string]bool)
maxlen := 0
for _, v := range wordDict {
set[v] = true
if len(v) > maxlen {
maxlen = len(v)
}
}
dp := make([]int, len(s))
j := 0
for i := j; i <= len(s); i++ {
subStr := s[j:i]
if set[subStr] {
dp[i-1] = 1
if dp[len(s)-1] == 1 {
return true
}
}
if i-j == maxlen {
for p := j; p < i; p++ {
if dp[p] == 1 {
j = p + 1
break
}
}
}
}
return false
}
//抄 官方题解
//dp[i]=dp[j] && check(s[j..i−1])
//其中 check(s[j..i−1])
//表示子串 s[j..i-1]s[j..i−1] 是否出现在字典中。
func wordBreak_2(s string, wordDict []string) bool {
wordDictSet := make(map[string]bool)
for _, w := range wordDict {
wordDictSet[w] = true
}
dp := make([]bool, len(s)+1)
dp[0] = true
for i := 1; i <= len(s); i++ {
for j := 0; j < i; j++ {
if dp[j] && wordDictSet[s[j:i]] {
dp[i] = true
break
}
}
}
return dp[len(s)]
}
func main() {
var bool = wordBreak_2("leetcode", []string{"leet", "code"})
var bool2 = wordBreak_2("aaaaaaa", []string{"aaaa", "aaa"})
var bool3 = wordBreak_2("cars", []string{"car", "ca", "rs"})
var bool4 = wordBreak_2("catsandog", []string{"cats", "dog", "sand", "and", "cat"})
var bool5 = wordBreak_2("a", []string{"b"})
var bool6 = wordBreak_2("bb", []string{"a", "b", "bbb", "bbbb"})
fmt.Println(bool)
fmt.Println(bool2)
fmt.Println(bool3)
fmt.Println(bool4)
fmt.Println(bool5)
fmt.Println(bool6)
}
|
package main
import (
"sync"
"time"
)
const (
timeformat = "2006-01-02 15:04:05"
timeformatMetro = time.RFC3339
dataFormat = `{"ID":"%d", "time":"%s", "type":"%s", "value":"%.2f"}`
dataFormatMetro = `{"id":%d,"time":"%s","type":[{"name":"%s","value":%.2f,"range":{"min":%d,"max":%d,"delta":1,"time":%d},"alarms":{"min":false,"max":false,"delta":false}}]}`
)
type storage struct {
crysrc cryptoSource
devIrArray []*devIrModel
devTemperatureArray []*devTemperatureModel
devLightArray []*devLightModel
devCo2Array []*devCo2Model
chanceIr float64
}
type mqttServer struct {
Host string `json:"host"`
Port string `json:"port"`
Login string `json:"login"`
Password string `json:"password"`
Topic string `json:"topic"`
CommandTopic string `json:"commandtopic"`
}
type irConf struct {
Enabled bool `json:"enabled"`
Count int `json:"count"`
Interval int `json:"interval"`
All string `json:"all"`
Chance int `json:"chance"`
}
type devConf struct {
Enabled bool `json:"enabled"`
Count int `json:"count"`
Interval int `json:"interval"`
Range struct {
Low int `json:"low"`
High int `json:"high"`
} `json:"range"`
Overflowcount int
Chance int `json:"chance"`
}
//Config configuration emulator
type Config struct {
Mqttserver *mqttServer `json:"mqttserver"`
Ir *irConf `json:"ir"`
Temperature *devConf `json:"temperature"`
Light *devConf `json:"light"`
Co2 *devConf `json:"co2"`
}
type devData struct {
Time time.Time `json:"time"`
ID int `json:"id"`
Value float64 `json:"value"`
}
//DevIrModel device model
type devIrModel struct {
Data *devData `json:"data"`
Lock sync.Mutex
Correction float64 `json:"correction"`
Enabled bool `json:"enabled"`
}
//DevTemperatureModel device model
type devTemperatureModel struct {
Data *devData `json:"data"`
Lock sync.Mutex
Correction float64 `json:"correction"`
Enabled bool `json:"enabled"`
GoodDevice bool `json:"gooddevice"`
Balance int `json:"balance"`
Chance int `json:"chance"`
Range struct {
Low int `json:"low"`
High int `json:"high"`
} `json:"range"`
}
//DevLightModel device model
type devLightModel struct {
Data *devData `json:"data"`
Lock sync.Mutex
Correction float64 `json:"correction"`
Enabled bool `json:"enabled"`
GoodDevice bool `json:"gooddevice"`
Balance int `json:"balance"`
Chance int `json:"chance"`
Range struct {
Low int `json:"low"`
High int `json:"high"`
} `json:"range"`
}
//DevCo2Model device model
type devCo2Model struct {
Data *devData `json:"data"`
Lock sync.Mutex
Correction float64 `json:"correction"`
Enabled bool `json:"enabled"`
GoodDevice bool `json:"gooddevice"`
Balance int `json:"balance"`
Chance int `json:"chance"`
Range struct {
Low int `json:"low"`
High int `json:"high"`
} `json:"range"`
}
type controlCommand struct {
Type string `json:"type"`
ID int `json:"id"`
Cmd string `json:"cmd"`
Value string `json:"value"`
}
|
package labels
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// label the object with the first non-empty value, if all value are empty, it is not set at all
func Label(obj metav1.Object, name string, values ...string) {
for _, value := range values {
if value == "" {
continue
}
labels := obj.GetLabels()
if labels == nil {
labels = map[string]string{}
}
labels[name] = value
obj.SetLabels(labels)
return
}
}
func UnLabel(obj metav1.Object, name string) {
labels := obj.GetLabels()
if labels == nil {
return
}
delete(labels, name)
}
|
package commands
import (
"fmt"
"strings"
"github.com/go-crypt/crypt"
"github.com/go-crypt/crypt/algorithm"
"github.com/spf13/cobra"
"github.com/authelia/authelia/v4/internal/authentication"
"github.com/authelia/authelia/v4/internal/configuration"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
func newCryptoHashCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: cmdUseHash,
Short: cmdAutheliaCryptoHashShort,
Long: cmdAutheliaCryptoHashLong,
Example: cmdAutheliaCryptoHashExample,
Args: cobra.NoArgs,
DisableAutoGenTag: true,
}
cmd.AddCommand(
newCryptoHashValidateCmd(ctx),
newCryptoHashGenerateCmd(ctx),
)
return cmd
}
func newCryptoHashGenerateCmd(ctx *CmdCtx) (cmd *cobra.Command) {
defaults := map[string]any{
prefixFilePassword + ".algorithm": schema.DefaultPasswordConfig.Algorithm,
prefixFilePassword + ".argon2.variant": schema.DefaultPasswordConfig.Argon2.Variant,
prefixFilePassword + ".argon2.iterations": schema.DefaultPasswordConfig.Argon2.Iterations,
prefixFilePassword + ".argon2.memory": schema.DefaultPasswordConfig.Argon2.Memory,
prefixFilePassword + ".argon2.parallelism": schema.DefaultPasswordConfig.Argon2.Parallelism,
prefixFilePassword + ".argon2.key_length": schema.DefaultPasswordConfig.Argon2.KeyLength,
prefixFilePassword + ".argon2.salt_length": schema.DefaultPasswordConfig.Argon2.SaltLength,
prefixFilePassword + ".sha2crypt.variant": schema.DefaultPasswordConfig.SHA2Crypt.Variant,
prefixFilePassword + ".sha2crypt.iterations": schema.DefaultPasswordConfig.SHA2Crypt.Iterations,
prefixFilePassword + ".sha2crypt.salt_length": schema.DefaultPasswordConfig.SHA2Crypt.SaltLength,
prefixFilePassword + ".pbkdf2.variant": schema.DefaultPasswordConfig.PBKDF2.Variant,
prefixFilePassword + ".pbkdf2.iterations": schema.DefaultPasswordConfig.PBKDF2.Iterations,
prefixFilePassword + ".pbkdf2.salt_length": schema.DefaultPasswordConfig.PBKDF2.SaltLength,
prefixFilePassword + ".bcrypt.variant": schema.DefaultPasswordConfig.BCrypt.Variant,
prefixFilePassword + ".bcrypt.cost": schema.DefaultPasswordConfig.BCrypt.Cost,
prefixFilePassword + ".scrypt.iterations": schema.DefaultPasswordConfig.SCrypt.Iterations,
prefixFilePassword + ".scrypt.block_size": schema.DefaultPasswordConfig.SCrypt.BlockSize,
prefixFilePassword + ".scrypt.parallelism": schema.DefaultPasswordConfig.SCrypt.Parallelism,
prefixFilePassword + ".scrypt.key_length": schema.DefaultPasswordConfig.SCrypt.KeyLength,
prefixFilePassword + ".scrypt.salt_length": schema.DefaultPasswordConfig.SCrypt.SaltLength,
}
cmd = &cobra.Command{
Use: cmdUseGenerate,
Short: cmdAutheliaCryptoHashGenerateShort,
Long: cmdAutheliaCryptoHashGenerateLong,
Example: cmdAutheliaCryptoHashGenerateExample,
Args: cobra.NoArgs,
PreRunE: ctx.ChainRunE(
ctx.ConfigSetDefaultsRunE(defaults),
ctx.CryptoHashGenerateMapFlagsRunE,
ctx.ConfigLoadRunE,
ctx.ConfigValidateSectionPasswordRunE,
),
RunE: ctx.CryptoHashGenerateRunE,
DisableAutoGenTag: true,
}
cmdFlagPassword(cmd, true)
cmdFlagRandomPassword(cmd)
for _, use := range []string{cmdUseHashArgon2, cmdUseHashSHA2Crypt, cmdUseHashPBKDF2, cmdUseHashBCrypt, cmdUseHashSCrypt} {
cmd.AddCommand(newCryptoHashGenerateSubCmd(ctx, use))
}
return cmd
}
func newCryptoHashGenerateSubCmd(ctx *CmdCtx, use string) (cmd *cobra.Command) {
defaults := map[string]any{
prefixFilePassword + ".algorithm": schema.DefaultPasswordConfig.Algorithm,
prefixFilePassword + ".argon2.variant": schema.DefaultPasswordConfig.Argon2.Variant,
prefixFilePassword + ".argon2.iterations": schema.DefaultPasswordConfig.Argon2.Iterations,
prefixFilePassword + ".argon2.memory": schema.DefaultPasswordConfig.Argon2.Memory,
prefixFilePassword + ".argon2.parallelism": schema.DefaultPasswordConfig.Argon2.Parallelism,
prefixFilePassword + ".argon2.key_length": schema.DefaultPasswordConfig.Argon2.KeyLength,
prefixFilePassword + ".argon2.salt_length": schema.DefaultPasswordConfig.Argon2.SaltLength,
prefixFilePassword + ".sha2crypt.variant": schema.DefaultPasswordConfig.SHA2Crypt.Variant,
prefixFilePassword + ".sha2crypt.iterations": schema.DefaultPasswordConfig.SHA2Crypt.Iterations,
prefixFilePassword + ".sha2crypt.salt_length": schema.DefaultPasswordConfig.SHA2Crypt.SaltLength,
prefixFilePassword + ".pbkdf2.variant": schema.DefaultPasswordConfig.PBKDF2.Variant,
prefixFilePassword + ".pbkdf2.iterations": schema.DefaultPasswordConfig.PBKDF2.Iterations,
prefixFilePassword + ".pbkdf2.salt_length": schema.DefaultPasswordConfig.PBKDF2.SaltLength,
prefixFilePassword + ".bcrypt.variant": schema.DefaultPasswordConfig.BCrypt.Variant,
prefixFilePassword + ".bcrypt.cost": schema.DefaultPasswordConfig.BCrypt.Cost,
prefixFilePassword + ".scrypt.iterations": schema.DefaultPasswordConfig.SCrypt.Iterations,
prefixFilePassword + ".scrypt.block_size": schema.DefaultPasswordConfig.SCrypt.BlockSize,
prefixFilePassword + ".scrypt.parallelism": schema.DefaultPasswordConfig.SCrypt.Parallelism,
prefixFilePassword + ".scrypt.key_length": schema.DefaultPasswordConfig.SCrypt.KeyLength,
prefixFilePassword + ".scrypt.salt_length": schema.DefaultPasswordConfig.SCrypt.SaltLength,
}
useFmt := fmtCryptoHashUse(use)
cmd = &cobra.Command{
Use: use,
Short: fmt.Sprintf(fmtCmdAutheliaCryptoHashGenerateSubShort, useFmt),
Long: fmt.Sprintf(fmtCmdAutheliaCryptoHashGenerateSubLong, useFmt, useFmt),
Example: fmt.Sprintf(fmtCmdAutheliaCryptoHashGenerateSubExample, use),
Args: cobra.NoArgs,
PersistentPreRunE: ctx.ChainRunE(
ctx.ConfigSetDefaultsRunE(defaults),
ctx.CryptoHashGenerateMapFlagsRunE,
ctx.ConfigLoadRunE,
ctx.ConfigValidateSectionPasswordRunE,
),
RunE: ctx.CryptoHashGenerateRunE,
DisableAutoGenTag: true,
}
switch use {
case cmdUseHashArgon2:
cmdFlagIterations(cmd, schema.DefaultPasswordConfig.Argon2.Iterations)
cmdFlagParallelism(cmd, schema.DefaultPasswordConfig.Argon2.Parallelism)
cmdFlagKeySize(cmd, schema.DefaultPasswordConfig.Argon2.KeyLength)
cmdFlagSaltSize(cmd, schema.DefaultPasswordConfig.Argon2.SaltLength)
cmd.Flags().StringP(cmdFlagNameVariant, "v", schema.DefaultPasswordConfig.Argon2.Variant, "variant, options are 'argon2id', 'argon2i', and 'argon2d'")
cmd.Flags().IntP(cmdFlagNameMemory, "m", schema.DefaultPasswordConfig.Argon2.Memory, "memory in kibibytes")
cmd.Flags().String(cmdFlagNameProfile, "", "profile to use, options are low-memory and recommended")
case cmdUseHashSHA2Crypt:
cmdFlagIterations(cmd, schema.DefaultPasswordConfig.SHA2Crypt.Iterations)
cmdFlagSaltSize(cmd, schema.DefaultPasswordConfig.SHA2Crypt.SaltLength)
cmd.Flags().StringP(cmdFlagNameVariant, "v", schema.DefaultPasswordConfig.SHA2Crypt.Variant, "variant, options are sha256 and sha512")
cmd.PreRunE = ctx.ChainRunE()
case cmdUseHashPBKDF2:
cmdFlagIterations(cmd, schema.DefaultPasswordConfig.PBKDF2.Iterations)
cmdFlagSaltSize(cmd, schema.DefaultPasswordConfig.PBKDF2.SaltLength)
cmd.Flags().StringP(cmdFlagNameVariant, "v", schema.DefaultPasswordConfig.PBKDF2.Variant, "variant, options are 'sha1', 'sha224', 'sha256', 'sha384', and 'sha512'")
case cmdUseHashBCrypt:
cmd.Flags().StringP(cmdFlagNameVariant, "v", schema.DefaultPasswordConfig.BCrypt.Variant, "variant, options are 'standard' and 'sha256'")
cmd.Flags().IntP(cmdFlagNameCost, "i", schema.DefaultPasswordConfig.BCrypt.Cost, "hashing cost")
case cmdUseHashSCrypt:
cmdFlagIterations(cmd, schema.DefaultPasswordConfig.SCrypt.Iterations)
cmdFlagKeySize(cmd, schema.DefaultPasswordConfig.SCrypt.KeyLength)
cmdFlagSaltSize(cmd, schema.DefaultPasswordConfig.SCrypt.SaltLength)
cmdFlagParallelism(cmd, schema.DefaultPasswordConfig.SCrypt.Parallelism)
cmd.Flags().IntP(cmdFlagNameBlockSize, "r", schema.DefaultPasswordConfig.SCrypt.BlockSize, "block size")
}
return cmd
}
func newCryptoHashValidateCmd(ctx *CmdCtx) (cmd *cobra.Command) {
cmd = &cobra.Command{
Use: fmt.Sprintf(cmdUseFmtValidate, cmdUseValidate),
Short: cmdAutheliaCryptoHashValidateShort,
Long: cmdAutheliaCryptoHashValidateLong,
Example: cmdAutheliaCryptoHashValidateExample,
Args: cobra.ExactArgs(1),
RunE: ctx.CryptoHashValidateRunE,
DisableAutoGenTag: true,
}
cmdFlagPassword(cmd, false)
return cmd
}
// CryptoHashValidateRunE is the RunE for the authelia crypto hash validate command.
func (ctx *CmdCtx) CryptoHashValidateRunE(cmd *cobra.Command, args []string) (err error) {
var (
password string
valid bool
)
if password, _, err = cmdCryptoHashGetPassword(cmd, args, false, false); err != nil {
return fmt.Errorf("error occurred trying to obtain the password: %w", err)
}
if len(password) == 0 {
return fmt.Errorf("no password provided")
}
if valid, err = crypt.CheckPassword(password, args[0]); err != nil {
return fmt.Errorf("error occurred trying to validate the password against the digest: %w", err)
}
switch {
case valid:
fmt.Println("The password matches the digest.")
default:
fmt.Println("The password does not match the digest.")
}
return nil
}
// CryptoHashGenerateMapFlagsRunE is the RunE which configures the flags map configuration source for the
// authelia crypto hash generate commands.
func (ctx *CmdCtx) CryptoHashGenerateMapFlagsRunE(cmd *cobra.Command, args []string) (err error) {
var flagsMap map[string]string
switch cmd.Use {
case cmdUseHashArgon2:
flagsMap = map[string]string{
cmdFlagNameVariant: prefixFilePassword + ".argon2.variant",
cmdFlagNameIterations: prefixFilePassword + ".argon2.iterations",
cmdFlagNameMemory: prefixFilePassword + ".argon2.memory",
cmdFlagNameParallelism: prefixFilePassword + ".argon2.parallelism",
cmdFlagNameKeySize: prefixFilePassword + ".argon2.key_length",
cmdFlagNameSaltSize: prefixFilePassword + ".argon2.salt_length",
}
case cmdUseHashSHA2Crypt:
flagsMap = map[string]string{
cmdFlagNameVariant: prefixFilePassword + ".sha2crypt.variant",
cmdFlagNameIterations: prefixFilePassword + ".sha2crypt.iterations",
cmdFlagNameSaltSize: prefixFilePassword + ".sha2crypt.salt_length",
}
case cmdUseHashPBKDF2:
flagsMap = map[string]string{
cmdFlagNameVariant: prefixFilePassword + ".pbkdf2.variant",
cmdFlagNameIterations: prefixFilePassword + ".pbkdf2.iterations",
cmdFlagNameKeySize: prefixFilePassword + ".pbkdf2.key_length",
cmdFlagNameSaltSize: prefixFilePassword + ".pbkdf2.salt_length",
}
case cmdUseHashBCrypt:
flagsMap = map[string]string{
cmdFlagNameVariant: prefixFilePassword + ".bcrypt.variant",
cmdFlagNameCost: prefixFilePassword + ".bcrypt.cost",
}
case cmdUseHashSCrypt:
flagsMap = map[string]string{
cmdFlagNameIterations: prefixFilePassword + ".scrypt.iterations",
cmdFlagNameBlockSize: prefixFilePassword + ".scrypt.block_size",
cmdFlagNameParallelism: prefixFilePassword + ".scrypt.parallelism",
cmdFlagNameKeySize: prefixFilePassword + ".scrypt.key_length",
cmdFlagNameSaltSize: prefixFilePassword + ".scrypt.salt_length",
}
}
if flagsMap != nil {
ctx.cconfig.sources = append(ctx.cconfig.sources, configuration.NewCommandLineSourceWithMapping(cmd.Flags(), flagsMap, false, false))
}
return nil
}
// CryptoHashGenerateRunE is the RunE for the authelia crypto hash generate commands.
func (ctx *CmdCtx) CryptoHashGenerateRunE(cmd *cobra.Command, args []string) (err error) {
var (
hash algorithm.Hash
digest algorithm.Digest
password string
random bool
)
if password, random, err = cmdCryptoHashGetPassword(cmd, args, false, true); err != nil {
return err
}
if len(password) == 0 {
return fmt.Errorf("no password provided")
}
switch cmd.Use {
case cmdUseGenerate:
break
default:
ctx.config.AuthenticationBackend.File.Password.Algorithm = cmd.Use
}
if hash, err = authentication.NewFileCryptoHashFromConfig(ctx.config.AuthenticationBackend.File.Password); err != nil {
return err
}
if digest, err = hash.Hash(password); err != nil {
return err
}
if random {
fmt.Printf("Random Password: %s\n", password)
}
fmt.Printf("Digest: %s\n", digest.Encode())
return nil
}
func cmdCryptoHashGetPassword(cmd *cobra.Command, args []string, useArgs, useRandom bool) (password string, random bool, err error) {
if useRandom {
if random, err = cmd.Flags().GetBool(cmdFlagNameRandom); err != nil {
return
}
}
switch {
case random:
password, err = flagsGetRandomCharacters(cmd.Flags(), cmdFlagNameRandomLength, cmdFlagNameRandomCharSet, cmdFlagNameCharacters)
return
case cmd.Flags().Changed(cmdFlagNamePassword):
password, err = cmd.Flags().GetString(cmdFlagNamePassword)
return
case useArgs && len(args) != 0:
password, err = strings.Join(args, " "), nil
return
}
var (
noConfirm bool
)
if password, err = termReadPasswordWithPrompt("Enter Password: ", "password"); err != nil {
err = fmt.Errorf("failed to read the password from the terminal: %w", err)
return
}
if cmd.Use == fmt.Sprintf(cmdUseFmtValidate, cmdUseValidate) {
fmt.Println("")
return
}
if noConfirm, err = cmd.Flags().GetBool(cmdFlagNameNoConfirm); err == nil && !noConfirm {
var confirm string
if confirm, err = termReadPasswordWithPrompt("Confirm Password: ", ""); err != nil {
return
}
if password != confirm {
fmt.Println("")
err = fmt.Errorf("the password did not match the confirmation password")
return
}
}
fmt.Println("")
return
}
func cmdFlagPassword(cmd *cobra.Command, noConfirm bool) {
cmd.PersistentFlags().String(cmdFlagNamePassword, "", "manually supply the password rather than using the terminal prompt")
if noConfirm {
cmd.PersistentFlags().Bool(cmdFlagNameNoConfirm, false, "skip the password confirmation prompt")
}
}
func cmdFlagRandomPassword(cmd *cobra.Command) {
cmd.PersistentFlags().Bool(cmdFlagNameRandom, false, "uses a randomly generated password")
cmd.PersistentFlags().String(cmdFlagNameRandomCharSet, cmdFlagValueCharSet, cmdFlagUsageCharset)
cmd.PersistentFlags().String(cmdFlagNameRandomCharacters, "", cmdFlagUsageCharacters)
cmd.PersistentFlags().Int(cmdFlagNameRandomLength, 72, cmdFlagUsageLength)
}
func cmdFlagIterations(cmd *cobra.Command, value int) {
cmd.Flags().IntP(cmdFlagNameIterations, "i", value, "number of iterations")
}
func cmdFlagKeySize(cmd *cobra.Command, value int) {
cmd.Flags().IntP(cmdFlagNameKeySize, "k", value, "key size in bytes")
}
func cmdFlagSaltSize(cmd *cobra.Command, value int) {
cmd.Flags().IntP(cmdFlagNameSaltSize, "s", value, "salt size in bytes")
}
func cmdFlagParallelism(cmd *cobra.Command, value int) {
cmd.Flags().IntP(cmdFlagNameParallelism, "p", value, "parallelism or threads")
}
|
package ui
import (
"fmt"
"strings"
"github.com/rivo/tview"
)
func (a *App) Live() (*tview.List, error) {
matches, err := a.be.GetLiveMatches()
if err != nil {
return nil, err
}
list := tview.NewList()
for i, m := range matches {
var rh, dh []string
for _, p := range m.Players {
if p.IsRadiant {
rh = append(rh, p.Hero)
} else {
dh = append(dh, p.Hero)
}
}
name := fmt.Sprintf("%s %d - %d %s", m.RadiantName, m.RadiantScore, m.DireScore, m.DireName)
desc := fmt.Sprintf("%s net: %d %s", strings.Join(rh, ","), m.RadiantLead, strings.Join(dh, ","))
page := fmt.Sprintf("match/%d", m.ID)
list.AddItem(name, desc, rune(i), func() {
a.pages.SwitchToPage(page)
})
}
return list, nil
}
|
/*
Description
The sequence of n − 1 consecutive composite numbers (positive integers that are not prime and not equal to 1) lying between two successive prime numbers p and p + n is called a prime gap of length n. For example, ‹24, 25, 26, 27, 28› between 23 and 29 is a prime gap of length 6.
Your mission is to write a program to calculate, for a given positive integer k, the length of the prime gap that contains k. For convenience, the length is considered 0 in case no prime gap contains k.
Input
The input is a sequence of lines each of which contains a single positive integer. Each positive integer is greater than 1 and less than or equal to the 100000th prime number, which is 1299709. The end of the input is indicated by a line containing a single zero.
Output
The output should be composed of lines each of which contains a single non-negative integer. It is the length of the prime gap that contains the corresponding positive integer in the input if it is a composite number, or 0 otherwise. No other characters should occur in the output.
Sample Input
10
11
27
2
492170
0
Sample Output
4
0
6
0
114
Source
Japan 2007
*/
package main
import (
"math/big"
)
func main() {
tab := []int64{
0, 0, 2, 0, 2, 0, 4, 4, 4, 0, 2, 0, 4, 4, 4, 0, 2, 0, 4,
4, 4, 0, 6, 6, 6, 6, 6, 0, 2, 0, 6, 6, 6, 6, 6, 0, 4, 4,
4, 0, 2, 0, 4, 4, 4, 0, 6, 6, 6, 6, 6, 0, 6, 6, 6, 6, 6,
0, 2, 0, 6, 6, 6, 6, 6, 0, 4, 4, 4, 0, 2, 0, 6, 6, 6, 6,
6, 0, 4, 4, 4, 0, 6, 6, 6, 6, 6, 0, 8, 8, 8, 8, 8, 8, 8,
0, 4, 4, 4, 0, 2, 0, 4, 4, 4,
}
assert(primegap(10) == 4)
assert(primegap(11) == 0)
assert(primegap(27) == 6)
assert(primegap(2) == 0)
assert(primegap(492170) == 114)
for i := range tab {
assert(primegap(int64(i+2)) == tab[i])
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
// https://oeis.org/A072680
func primegap(n int64) int64 {
if n < 2 || isprime(n) {
return 0
}
i := n - 1
j := n + 1
for !isprime(i) {
i--
}
for !isprime(j) {
j++
}
return j - i
}
func isprime(n int64) bool {
x := big.NewInt(n)
return x.ProbablyPrime(2)
}
|
package model
import "time"
const (
SexWomen = "W"
SexMan = "M"
SexUnKnown = "U"
)
type User struct {
Id int64 `xorm:"pk autoincr bigint(64)" from:"id" json:"id"`
Mobile string `xorm:"varchar(20)" from:"mobile" json:"mobile"`
Passwd string `xorm:"varchar(40)" from:"passwd" json:"_"`
Avatar string `xorm:"varchar(150)" from:"avatar" json:"avatar"`
Sex string `xorm:"varchar(2)" from:"sex" json:"sex"`
Nickname string `xorm:"varchar(20)" from:"nickname" json:"nickname"`
Salt string `xorm:"varchar(10)" from:"salt" json:"_"`
Online int `xorm:"int(10)" from:"online" json:"online"`
Token string `xorm:"varchar(40)" from:"token" json:"token"`
Memo string `xorm:"varchar(140)" from:"memo" json:"memo"`
Createat time.Time `xorm:"datetime" from:"createat" json:"createat"`
}
|
/*
* Copyright The Titan Project Contributors.
*/
package forwarder
type Capability struct {
Scope string
}
type CreateVolumeRequest struct {
Name string
Opts map[string]interface{}
}
type GetPathResponse struct {
Err string
Mountpoint string
}
type GetVolumeResponse struct {
Err string
Volume Volume
}
type ListVolumeResponse struct {
Err string
Volumes []Volume
}
type MountVolumeRequest struct {
Name string
ID string
}
type PluginDescription struct {
Implements []string
}
type Volume struct {
Name string
Mountpoint string
Status map[string]string
}
type VolumeCapabilities struct {
Capabilities Capability
}
type VolumeRequest struct {
Name string
}
type VolumeResponse struct {
Err string
}
|
package web
import (
"fmt"
"testing"
"time"
"github.com/mgutz/ansi"
_ "github.com/GoAdminGroup/go-admin/adapter/gin"
_ "github.com/GoAdminGroup/go-admin/modules/db/drivers/mysql"
_ "github.com/GoAdminGroup/themes/adminlte"
"github.com/sclevine/agouti"
)
type Testers func(t *testing.T, page *Page)
type ServerStarter func(quit chan struct{})
func UserAcceptanceTestSuit(t *testing.T, testers Testers, serverStarter ServerStarter, local bool, options ...string) {
var quit = make(chan struct{})
go serverStarter(quit)
if len(options) == 0 {
options = []string{
"--user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36",
"--window-size=1500,900",
"--incognito",
"--blink-settings=imagesEnabled=true",
"--no-default-browser-check",
"--ignore-ssl-errors=true",
"--ssl-protocol=any",
"--no-sandbox",
"--disable-breakpad",
"--disable-gpu",
"--disable-logging",
"--no-zygote",
"--allow-running-insecure-content",
}
if !local {
options = append(options, "--headless")
}
}
driver := agouti.ChromeDriver(
agouti.ChromeOptions("args", options),
agouti.Desired(
agouti.Capabilities{
"loggingPrefs": map[string]string{
"performance": "ALL",
},
"acceptSslCerts": true,
"acceptInsecureCerts": true,
},
))
err := driver.Start()
if err != nil {
panic("failed to start driver, error: " + err.Error())
}
page, err := driver.NewPage()
if err != nil {
panic("failed to open page, error: " + err.Error())
}
fmt.Println()
fmt.Println("============================================")
printlnWithColor("User Acceptance Testing", "blue")
fmt.Println("============================================")
fmt.Println()
testers(t, &Page{T: t, Page: page, Driver: driver, Quit: quit})
wait(2)
if !local {
err = page.CloseWindow()
if err != nil {
fmt.Println("failed to close page, error: ", err)
}
err = page.Destroy()
if err != nil {
fmt.Println("failed to destroy page, error: ", err)
}
err = driver.Stop()
if err != nil {
fmt.Println("failed to stop driver, error: ", err)
}
}
quit <- struct{}{}
}
func printlnWithColor(msg string, color string) {
fmt.Println(ansi.Color(msg, color))
}
func printPart(part string) {
printlnWithColor("> "+part, colorBlue)
}
func wait(t int) {
time.Sleep(time.Duration(t) * time.Second)
}
const basePath = "http://localhost:9033"
func url(suffix string) string {
if suffix == "/" {
suffix = ""
}
return basePath + "/admin" + suffix
}
const (
colorBlue = "blue"
colorGreen = "green"
)
|
package kong
import (
"io/ioutil"
"net/http"
"strings"
)
const (
nodeInfo = "/"
status = "/status"
service = "/service"
)
type Admin struct {
adminUrl string
}
func New(adminUrl string) *Admin {
// remove trailing slash as our endpoint constants (see above)
// already has leading slashes
if strings.HasSuffix(adminUrl, "/") {
adminUrl = adminUrl[:len(adminUrl)-1]
}
return &Admin{
adminUrl: adminUrl,
}
}
// see https://docs.konghq.com/2.1.x/admin-api/#retrieve-node-information
func (admin *Admin) NodeInfo() (string, error) {
return get(admin.adminUrl + nodeInfo)
}
// see https://docs.konghq.com/2.1.x/admin-api/#retrieve-node-status
func (admin *Admin) GetStatus() (string, error) {
return get(admin.adminUrl + status)
}
func get(url string) (string, error) {
resp, err := http.Get(url)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
|
package log
import (
"github.com/feng/future/go-kit/agfun/agfun-server/service"
)
//LoggingMiddleware 日志中间件
func LoggingMiddleware() service.SvcMiddleware {
return func(next service.AppService) service.AppService {
return logmw{next}
}
}
type logmw struct {
service.AppService
}
|
package util
import (
"net"
"testing"
)
func TestIPv4ToUint32(t *testing.T) {
tests := []struct{
ip net.IP
res uint32
} {
{ net.IPv4(0, 0, 0, 0), 0 },
{ net.IPv4(1, 1, 1, 1), 16843009 },
{ net.IPv4(10, 192, 50, 1), 180367873 },
}
for _, test := range tests {
if res := IPv4ToUint32(test.ip); res != test.res {
t.Error("ipv4ToUint32 error:", test)
}
}
}
func TestUint32ToIPv4(t *testing.T) {
tests := []struct{
n uint32
ip net.IP
} {
{ 0, net.IPv4(0, 0, 0, 0) },
{ 16843009, net.IPv4(1, 1, 1, 1) },
{ 180367873, net.IPv4(10, 192, 50, 1) },
}
for _, test := range tests {
if ip := Uint32ToIPv4(test.n); !test.ip.Equal(ip) {
t.Errorf("uint32ToIPv4: %v\nexpect %v, get %v\n", test, test.ip, ip)
}
}
} |
package ircserver
import (
"testing"
"github.com/robustirc/robustirc/internal/robust"
"gopkg.in/sorcix/irc.v2"
)
func TestServerInvite(t *testing.T) {
i, ids := stdIRCServerWithServices()
i.ProcessMessage(&robust.Message{Session: ids["secure"]}, irc.ParseMessage("JOIN #test"))
mustMatchIrcmsgs(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ INVITE mero #test")),
[]*irc.Message{
irc.ParseMessage(":robustirc.net 341 ChanServ mero #test"),
irc.ParseMessage(":ChanServ!services@services INVITE mero :#test"),
irc.ParseMessage(":robustirc.net NOTICE #test :ChanServ invited mero into the channel."),
})
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ INVITE moro #test")),
":robustirc.net 401 ChanServ moro :No such nick/channel")
i.ProcessMessage(&robust.Message{Session: ids["mero"]}, irc.ParseMessage("JOIN #test"))
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ INVITE mero #test")),
":robustirc.net 443 ChanServ mero #test :is already on channel")
mustMatchMsg(t,
i.ProcessMessage(&robust.Message{Session: ids["services"]}, irc.ParseMessage(":ChanServ INVITE mero #toast")),
":robustirc.net 403 ChanServ #toast :No such channel")
}
|
package model
import (
"github.com/golang/protobuf/ptypes/timestamp"
protobuf "github.com/oojob/protobuf"
"go.mongodb.org/mongo-driver/bson/primitive"
)
// EmailModel email
type EmailModel struct {
Email string `bson:"email,omitempty"`
EmailStatus protobuf.Email_EmailStatus `bson:"verified,omitempty"`
Show bool `bson:"show,omitempty"`
}
// EducationModel education
type EducationModel struct {
Education string `bson:"education,omitempty"`
Show bool `bson:"show,omitempty"`
}
// IdentifierModel identifier
type IdentifierModel struct {
Identifier string `bson:"identifier,omitempty"`
Name string `bson:"name,omitempty"`
AlternateName string `bson:"alternate_name,omitempty"`
Type string `bson:"type,omitempty"`
AdditionalType string `bson:"additional_type,omitempty"`
Description string `bson:"description,omitempty"`
DisambiguatingDescription string `bson:"disambiguating_description,omitempty"`
Headline string `bson:"headline,omitempty"`
Slogan string `bson:"slogan,omitempty"`
}
// ProfileSecutiryModel profile security
type ProfileSecutiryModel struct {
Password string `bson:"password,omitempty"`
PasswordSalt string `bson:"password_salt,omitempty"`
PasswordHash string `bson:"password_hash,omitempty"`
Code string `bson:"code,omitempty"`
CodeType string `bson:"code_type,omitempty"`
AccountType string `bson:"account_type,omitempty"`
Verified bool `bson:"verified,omitempty"`
}
// AddressModel address
type AddressModel struct {
Country string `bson:"country,omitempty"`
Locality string `bson:"locality,omitempty"`
Region string `bson:"region,omitempty"`
PostalCode int64 `bson:"postal_code,omitempty"`
Street string `bson:"street,omitempty"`
}
// MetadataModel metadata
type MetadataModel struct {
CreatedAt timestamp.Timestamp `bson:"created_at,omitempty"`
UpdatedAt timestamp.Timestamp `bson:"updated_at,omitempty"`
PublishedDate timestamp.Timestamp `bson:"published_date,omitempty"`
EndDate timestamp.Timestamp `bson:"end_date,omitempty"`
LastActive timestamp.Timestamp `bson:"last_active,omitempty"`
}
// TokenDetails for token data
type TokenDetails struct {
AccessToken string
RefreshToken string
AccessUUID string
RefreshUUID string
AtExpires int64
RtExpires int64
}
// Profile profile model schema
type Profile struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Identity IdentifierModel `bson:"identity,omitempty"`
GivenName string `bson:"given_name,omitempty"`
MiddleName string `bson:"middle_name,omitempty"`
FamilyName string `bson:"family_name,omitempty"`
Username string `bson:"username,omitempty"`
Email EmailModel `bson:"email,omitempty"`
Gender string `bson:"gender,omitempty"`
Birthdate timestamp.Timestamp `bson:"birthdate,omitempty"`
CurrentPosition string `bson:"current_position,omitempty"`
Education EducationModel `bson:"education,omitempty"`
Address AddressModel `bson:"address,omitempty"`
Security ProfileSecutiryModel `bson:"security,omitempty"`
Metadata MetadataModel `bson:"metadata,omitempty"`
}
|
package rpcserver
import (
"encoding/json"
"errors"
"fmt"
"github.com/incognitochain/incognito-chain/common"
"github.com/incognitochain/incognito-chain/common/base58"
"github.com/incognitochain/incognito-chain/dataaccessobject/statedb"
"github.com/incognitochain/incognito-chain/metadata"
"github.com/incognitochain/incognito-chain/rpcserver/bean"
"github.com/incognitochain/incognito-chain/rpcserver/jsonresult"
"github.com/incognitochain/incognito-chain/rpcserver/rpcservice"
)
func (httpServer *HttpServer) handleGetLiquidationTpExchangeRates(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) < 1 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 1"))
}
// get meta data from params
data, ok := arrayParams[0].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata param is invalid"))
}
beaconHeight, ok := data["BeaconHeight"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata BeaconHeight is invalid"))
}
custodianAddress, ok := data["CustodianAddress"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata CustodianAddress is invalid"))
}
// get feature stateDB from beaconheight
featureStateRootHash, err := httpServer.config.BlockChain.GetBeaconFeatureRootHash(httpServer.config.BlockChain.GetBeaconChainDatabase(), uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationError, fmt.Errorf("Can't found FeatureStateRootHash of beacon height %+v, error %+v", beaconHeight, err))
}
stateDB, err := statedb.NewWithPrefixTrie(featureStateRootHash, statedb.NewDatabaseAccessWarper(httpServer.config.BlockChain.GetBeaconChainDatabase()))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationError, err)
}
result, err := httpServer.portal.GetLiquidateTpExchangeRates(stateDB, custodianAddress, uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationError, err)
}
return result, nil
}
func (httpServer *HttpServer) handleGetLiquidationTpExchangeRatesByTokenId(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) == 0 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Params should be not empty"))
}
if len(arrayParams) < 1 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 1"))
}
// get meta data from params
data, ok := arrayParams[0].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata param is invalid"))
}
beaconHeight, ok := data["BeaconHeight"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata BeaconHeight is invalid"))
}
custodianAddress, ok := data["CustodianAddress"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata CustodianAddress is invalid"))
}
pTokenID, ok := data["TokenID"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is invalid"))
}
if !common.IsPortalExchangeRateToken(pTokenID) {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is not support"))
}
// get feature stateDB from beaconheight
featureStateRootHash, err := httpServer.config.BlockChain.GetBeaconFeatureRootHash(httpServer.config.BlockChain.GetBeaconChainDatabase(), uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationByTokenIdError, fmt.Errorf("Can't found FeatureStateRootHash of beacon height %+v, error %+v", beaconHeight, err))
}
stateDB, err := statedb.NewWithPrefixTrie(featureStateRootHash, statedb.NewDatabaseAccessWarper(httpServer.config.BlockChain.GetBeaconChainDatabase()))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationByTokenIdError, err)
}
result, err := httpServer.portal.GetLiquidateTpExchangeRatesByToken(stateDB, custodianAddress, pTokenID, uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetTpExchangeRatesLiquidationByTokenIdError, err)
}
return result, nil
}
func (httpServer *HttpServer) handleGetLiquidationExchangeRatesPool(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) == 0 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Params should be not empty"))
}
if len(arrayParams) < 1 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 1"))
}
// get meta data from params
data, ok := arrayParams[0].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata param is invalid"))
}
beaconHeight, ok := data["BeaconHeight"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata BeaconHeight is invalid"))
}
pTokenID, ok := data["TokenID"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is invalid"))
}
if !common.IsPortalExchangeRateToken(pTokenID) {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is not support"))
}
featureStateRootHash, err := httpServer.config.BlockChain.GetBeaconFeatureRootHash(httpServer.config.BlockChain.GetBeaconChainDatabase(), uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetExchangeRatesLiquidationPoolError, fmt.Errorf("Can't found FeatureStateRootHash of beacon height %+v, error %+v", beaconHeight, err))
}
stateDB, err := statedb.NewWithPrefixTrie(featureStateRootHash, statedb.NewDatabaseAccessWarper(httpServer.config.BlockChain.GetBeaconChainDatabase()))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetExchangeRatesLiquidationPoolError, err)
}
result, err := httpServer.portal.GetLiquidateExchangeRatesPool(stateDB, pTokenID)
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetExchangeRatesLiquidationPoolError, err)
}
return result, nil
}
func (httpServer *HttpServer) handleGetAmountNeededForCustodianDepositLiquidation(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) == 0 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Params should be not empty"))
}
if len(arrayParams) < 1 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 1"))
}
// get meta data from params
data, ok := arrayParams[0].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata param is invalid"))
}
beaconHeight, ok := data["BeaconHeight"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata BeaconHeight is invalid"))
}
custodianAddress, ok := data["CustodianAddress"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata CustodianAddress is invalid"))
}
isFreeCollateralSelected, ok := data["IsFreeCollateralSelected"].(bool)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata IsFreeCollateralSelected is invalid"))
}
pTokenID, ok := data["TokenID"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is invalid"))
}
if !common.IsPortalToken(pTokenID) {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata TokenID is not support"))
}
featureStateRootHash, err := httpServer.config.BlockChain.GetBeaconFeatureRootHash(httpServer.config.BlockChain.GetBeaconChainDatabase(), uint64(beaconHeight))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetPortalStateError, fmt.Errorf("Can't found FeatureStateRootHash of beacon height %+v, error %+v", beaconHeight, err))
}
stateDB, err := statedb.NewWithPrefixTrie(featureStateRootHash, statedb.NewDatabaseAccessWarper(httpServer.config.BlockChain.GetBeaconChainDatabase()))
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetAmountNeededForCustodianDepositLiquidationError, err)
}
portalParam := httpServer.config.BlockChain.GetPortalParams(uint64(beaconHeight))
result, err := httpServer.portal.CalculateAmountNeededCustodianDepositLiquidation(stateDB, custodianAddress, pTokenID, isFreeCollateralSelected, portalParam)
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.GetAmountNeededForCustodianDepositLiquidationError, err)
}
return result, nil
}
func (httpServer *HttpServer) createRawRedeemLiquidationExchangeRates(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) == 0 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Params should be not empty"))
}
if len(arrayParams) >= 7 {
hasPrivacyTokenParam, ok := arrayParams[6].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("HasPrivacyToken is invalid"))
}
hasPrivacyToken := int(hasPrivacyTokenParam) > 0
if hasPrivacyToken {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("The privacy mode must be disabled"))
}
}
if len(arrayParams) < 5 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 5"))
}
tokenParamsRaw, ok := arrayParams[4].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param metadata is invalid"))
}
redeemTokenID, ok := tokenParamsRaw["RedeemTokenID"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("RedeemTokenID is invalid"))
}
redeemAmountParam, ok := tokenParamsRaw["RedeemAmount"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("RedeemAmount is invalid"))
}
redeemAmount := uint64(redeemAmountParam)
redeemFeeParam, ok := tokenParamsRaw["RedeemFee"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("RedeemFee is invalid"))
}
redeemFee := uint64(redeemFeeParam)
redeemerIncAddressStr, ok := tokenParamsRaw["RedeemerIncAddressStr"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("RedeemerIncAddressStr is invalid"))
}
remoteAddress, ok := tokenParamsRaw["RemoteAddress"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("RemoteAddress is invalid"))
}
meta, _ := metadata.NewPortalRedeemLiquidateExchangeRates(metadata.PortalRedeemLiquidateExchangeRatesMeta, redeemTokenID, redeemAmount, redeemerIncAddressStr, remoteAddress, redeemFee)
customTokenTx, rpcErr := httpServer.txService.BuildRawPrivacyCustomTokenTransaction(params, meta)
if rpcErr != nil {
Logger.log.Error(rpcErr)
return nil, rpcErr
}
byteArrays, err2 := json.Marshal(customTokenTx)
if err2 != nil {
Logger.log.Error(err2)
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err2)
}
result := jsonresult.CreateTransactionResult{
TxID: customTokenTx.Hash().String(),
Base58CheckData: base58.Base58Check{}.Encode(byteArrays, 0x00),
}
return result, nil
}
func (httpServer *HttpServer) handleCreateAndSendRedeemLiquidationExchangeRates(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
data, err := httpServer.createRawRedeemLiquidationExchangeRates(params, closeChan)
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err)
}
tx := data.(jsonresult.CreateTransactionResult)
base58CheckData := tx.Base58CheckData
newParam := make([]interface{}, 0)
newParam = append(newParam, base58CheckData)
sendResult, err1 := httpServer.handleSendRawPrivacyCustomTokenTransaction(newParam, closeChan)
if err1 != nil {
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err1)
}
return sendResult, nil
}
func (httpServer *HttpServer) createLiquidationCustodianDeposit(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
arrayParams := common.InterfaceSlice(params)
if len(arrayParams) == 0 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Params should be not empty"))
}
if len(arrayParams) < 5 {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("Param array must be at least 5"))
}
// get meta data from params
data, ok := arrayParams[4].(map[string]interface{})
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata param is invalid"))
}
incognitoAddress, ok := data["IncognitoAddress"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata IncognitoAddress is invalid"))
}
pTokenId, ok := data["PTokenId"].(string)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata PTokenId param is invalid"))
}
if !common.IsPortalToken(pTokenId) {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata public token is not supported currently"))
}
freeCollateralSelected, ok := data["FreeCollateralSelected"].(bool)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata FreeCollateralSelected is invalid"))
}
depositedAmountData, ok := data["DepositedAmount"].(float64)
if !ok {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errors.New("metadata DepositedAmount is invalid"))
}
depositedAmount := uint64(depositedAmountData)
meta, _ := metadata.NewPortalLiquidationCustodianDeposit(
metadata.PortalLiquidationCustodianDepositMeta,
incognitoAddress,
pTokenId,
depositedAmount,
freeCollateralSelected,
)
// create new param to build raw tx from param interface
createRawTxParam, errNewParam := bean.NewCreateRawTxParam(params)
if errNewParam != nil {
return nil, rpcservice.NewRPCError(rpcservice.RPCInvalidParamsError, errNewParam)
}
// HasPrivacyCoin param is always false
createRawTxParam.HasPrivacyCoin = false
tx, err1 := httpServer.txService.BuildRawTransaction(createRawTxParam, meta)
if err1 != nil {
Logger.log.Error(err1)
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err1)
}
byteArrays, err2 := json.Marshal(tx)
if err2 != nil {
Logger.log.Error(err1)
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err2)
}
result := jsonresult.CreateTransactionResult{
TxID: tx.Hash().String(),
Base58CheckData: base58.Base58Check{}.Encode(byteArrays, 0x00),
}
return result, nil
}
func (httpServer *HttpServer) handleCreateAndSendLiquidationCustodianDeposit(params interface{}, closeChan <-chan struct{}) (interface{}, *rpcservice.RPCError) {
data, err := httpServer.createLiquidationCustodianDeposit(params, closeChan)
if err != nil {
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err)
}
tx := data.(jsonresult.CreateTransactionResult)
base58CheckData := tx.Base58CheckData
newParam := make([]interface{}, 0)
newParam = append(newParam, base58CheckData)
sendResult, err1 := httpServer.handleSendRawTransaction(newParam, closeChan)
if err1 != nil {
return nil, rpcservice.NewRPCError(rpcservice.UnexpectedError, err1)
}
return sendResult, nil
}
|
package game
import "fmt"
type Monster struct {
Character
}
func NewRat(pos Position) *Monster {
return &Monster{Character{
Entity: Entity{
Position: pos,
Name: "Rat",
Rune: 'R',
},
Hitpoints: 500,
Strength: 0,
Speed: 1.5,
ActionPoints: 0.0,
}}
}
func NewSpider(pos Position) *Monster {
return &Monster{Character{
Entity: Entity{
Position: pos,
Name: "Spider",
Rune: 'S',
},
Hitpoints: 1000,
Strength: 0,
Speed: 1.0,
ActionPoints: 0.0,
}}
}
func (m *Monster) Update(level *Level) {
m.ActionPoints += m.Speed
var (
playerPos = level.Player.Position
pos = level.astar(m.Position, playerPos)
)
var movIndex = 1
for i := 0; i < int(m.ActionPoints); i++ {
// Most be > 1 because 1st position is the monsters current
if movIndex < len(pos) {
m.Move(pos[movIndex], level)
movIndex++
m.ActionPoints--
}
}
}
func (m *Monster) Move(pos Position, level *Level) {
if _, ok := level.Monsters[pos]; !ok && pos != level.Player.Position {
delete(level.Monsters, m.Position)
level.Monsters[pos] = m
m.Position = pos
} else {
level.AddEvent(fmt.Sprintf("%s Attacks %d Player !", m.Name, m.Strength))
Attack(m, level.Player)
if m.Hitpoints <= 0 {
delete(level.Monsters, m.Position)
}
}
}
|
package metadata_test
import (
"fmt"
"testing"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/provenance-io/provenance/app"
simapp "github.com/provenance-io/provenance/app"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types"
sdk "github.com/cosmos/cosmos-sdk/types"
"github.com/stretchr/testify/suite"
testnet "github.com/cosmos/cosmos-sdk/testutil/network"
"github.com/provenance-io/provenance/x/metadata"
"github.com/provenance-io/provenance/x/metadata/types"
"github.com/provenance-io/provenance/x/metadata/types/p8e"
)
type HandlerTestSuite struct {
suite.Suite
cfg testnet.Config
testnet *testnet.Network
app *app.App
ctx sdk.Context
pubkey1 cryptotypes.PubKey
user1 string
user1Addr sdk.AccAddress
pubkey2 cryptotypes.PubKey
user2 string
user2Addr sdk.AccAddress
handler sdk.Handler
}
func (s *HandlerTestSuite) SetupTest() {
app := simapp.Setup(false)
s.app = app
ctx := app.BaseApp.NewContext(false, tmproto.Header{})
s.ctx = ctx
s.pubkey1 = secp256k1.GenPrivKey().PubKey()
s.user1Addr = sdk.AccAddress(s.pubkey1.Address())
s.user1 = s.user1Addr.String()
s.pubkey2 = secp256k1.GenPrivKey().PubKey()
s.user2Addr = sdk.AccAddress(s.pubkey2.Address())
s.user2 = s.user2Addr.String()
s.app.AccountKeeper.SetAccount(s.ctx, s.app.AccountKeeper.NewAccountWithAddress(s.ctx, s.user1Addr))
handler := metadata.NewHandler(app.MetadataKeeper)
s.handler = handler
}
func TestHandlerTestSuite(t *testing.T) {
suite.Run(t, new(HandlerTestSuite))
}
func createContractSpec(inputSpecs []*p8e.DefinitionSpec, outputSpec p8e.OutputSpec, definitionSpec p8e.DefinitionSpec) p8e.ContractSpec {
return p8e.ContractSpec{ConsiderationSpecs: []*p8e.ConsiderationSpec{
{FuncName: "additionalParties",
InputSpecs: inputSpecs,
OutputSpec: &outputSpec,
ResponsibleParty: 1,
},
},
Definition: &definitionSpec,
InputSpecs: inputSpecs,
PartiesInvolved: []p8e.PartyType{p8e.PartyType_PARTY_TYPE_AFFILIATE},
}
}
func createDefinitionSpec(name string, classname string, reference p8e.ProvenanceReference, defType int) p8e.DefinitionSpec {
return p8e.DefinitionSpec{
Name: name,
ResourceLocation: &p8e.Location{Classname: classname,
Ref: &reference,
},
Type: 1,
}
}
func (s HandlerTestSuite) TestAddContractSpecMsg() {
validDefSpec := createDefinitionSpec("perform_input_checks", "io.provenance.loan.LoanProtos$PartiesList", p8e.ProvenanceReference{Hash: "Adv+huolGTKofYCR0dw5GHm/R7sUWOwF32XR8r8r9kDy4il5U/LApxOWYHb05jhK4+eY4YzRMRiWcxU3Lx0+Mw=="}, 1)
invalidDefSpec := createDefinitionSpec("perform_action", "", p8e.ProvenanceReference{Hash: "Adv+huolGTKofYCR0dw5GHm/R7sUWOwF32XR8r8r9kDy4il5U/LApxOWYHb05jhK4+eY4YzRMRiWcxU3Lx0+Mw=="}, 1)
cases := map[string]struct {
v39CSpec p8e.ContractSpec
signers []string
wantErr bool
errorMsg string
}{
"should successfully ADD contract spec in from v38 to v40": {
createContractSpec([]*p8e.DefinitionSpec{&validDefSpec}, p8e.OutputSpec{Spec: &validDefSpec}, validDefSpec),
[]string{s.user1},
false,
"",
},
"should successfully UPDATE contract spec in from v38 to v40": {
createContractSpec([]*p8e.DefinitionSpec{&validDefSpec}, p8e.OutputSpec{Spec: &validDefSpec}, validDefSpec),
[]string{s.user1},
false,
"",
},
"should fail to add due to invalid signers": {
createContractSpec([]*p8e.DefinitionSpec{&validDefSpec}, p8e.OutputSpec{Spec: &validDefSpec}, validDefSpec),
[]string{s.user2},
true,
fmt.Sprintf("missing signature from existing owner %s; required for update", s.user1),
},
"should fail on converting contract validate basic": {
createContractSpec([]*p8e.DefinitionSpec{&invalidDefSpec}, p8e.OutputSpec{Spec: &validDefSpec}, validDefSpec),
[]string{s.user1},
true,
"input specification type name cannot be empty",
},
}
for n, tc := range cases {
tc := tc
s.Run(n, func() {
_, err := s.handler(s.ctx, &types.MsgAddP8EContractSpecRequest{Contractspec: tc.v39CSpec, Signers: tc.signers})
if tc.wantErr {
s.Error(err)
s.Equal(tc.errorMsg, err.Error())
} else {
s.NoError(err)
}
})
}
}
|
package stack
type Stack struct {
size int //栈的大小
top int //栈顶
data []int //使用切片创建栈,假使存的数据类型为int型
}
//分配一个新的栈
func CreatStack(size int) Stack {
newStack := Stack{}
newStack.size = size
newStack.data = make([]int,size)
return newStack
}
//入栈
func (s Stack) Push(data int) bool {
if(s.top > s.size){
return false
}
s.data[s.top] = data
s.top++
return true
}
//出栈
func (s Stack) Pop() int {
s.top--
data := s.data[s.top]
return data
} |
package main
import (
"fmt"
"strconv"
)
func main(){
var decimal int64
fmt.Println("Enter decimal number")
fmt.Scanln(decimal)
output := strconv.FormatInt(decimal, 2)
fmt.Println("Output ", output)
} |
package main
import (
"mime/multipart"
"net"
"net/http"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/elitah/utils/aes"
"github.com/elitah/utils/atomic"
"github.com/elitah/utils/bufferpool"
"github.com/elitah/utils/cpu"
"github.com/elitah/utils/exepath"
"github.com/elitah/utils/hash"
"github.com/elitah/utils/hex"
"github.com/elitah/utils/httptools"
"github.com/elitah/utils/logs"
"github.com/elitah/utils/mutex"
"github.com/elitah/utils/number"
"github.com/elitah/utils/platform"
"github.com/elitah/utils/random"
"github.com/elitah/utils/sqlite"
"github.com/elitah/utils/vhost"
"github.com/elitah/utils/wait"
)
func main() {
logs.SetLogger(logs.AdapterConsole, `{"level":99,"color":true}`)
logs.EnableFuncCallDepth(true)
logs.SetLogFuncCallDepth(3)
logs.Async()
defer logs.Close()
logs.Info("hello utils")
testAES()
testAtomic()
testNumber()
//testBufferPool()
//testVhost()
//testHttpTools()
testWait()
testExtPath()
testHex()
testRandom()
testPlatform()
testCPU()
testMutex()
testHash()
testSQLite()
}
func testAES() {
logs.Info("--- hello utils/aes test ----------------------------------------------------------------")
if t0 := aes.NewAESTool("123456"); nil != t0 {
if t1 := aes.NewAESTool("123456"); nil != t1 {
t0.EncryptInit()
t0.Write([]byte("exampleplaintext"))
t0.Encrypt(nil)
t1.Write(t0.Bytes())
t1.Decrypt(nil)
logs.Info(t1.String())
}
}
}
func testAtomic() {
logs.Info("--- hello utils/atomic test ----------------------------------------------------------------")
xs32 := atomic.AInt32(0)
logs.Info(xs32.Add(1))
logs.Info(xs32.CAS(1, 0))
logs.Info(xs32.Load())
logs.Info(xs32.Swap(1))
logs.Info(xs32.Load())
logs.Info(xs32.Sub(1))
logs.Info(xs32.Load())
xs64 := atomic.AInt64(0)
logs.Info(xs64.Add(1))
logs.Info(xs64.CAS(1, 0))
logs.Info(xs64.Load())
logs.Info(xs64.Swap(1))
logs.Info(xs64.Load())
logs.Info(xs64.Sub(1))
logs.Info(xs64.Load())
xu32 := atomic.AUint32(0)
logs.Info(xu32.Add(1))
logs.Info(xu32.CAS(1, 0))
logs.Info(xu32.Load())
logs.Info(xu32.Swap(1))
logs.Info(xu32.Load())
logs.Info(xu32.Sub(1))
logs.Info(xu32.Load())
xu64 := atomic.AUint64(0)
logs.Info(xu64.Add(1))
logs.Info(xu64.CAS(1, 0))
logs.Info(xu64.Load())
logs.Info(xu64.Swap(1))
logs.Info(xu64.Load())
logs.Info(xu64.Sub(1))
logs.Info(xu64.Load())
xptr := atomic.AUintptr(0)
logs.Info(xptr.Add(1))
logs.Info(xptr.CAS(1, 0))
logs.Info(xptr.Load())
logs.Info(xptr.Swap(1))
logs.Info(xptr.Load())
logs.Info(xptr.Sub(1))
logs.Info(xptr.Load())
}
func testNumber() {
logs.Info("--- hello utils/number test ----------------------------------------------------------------")
logs.Info("IsNumber: %v", number.IsNumeric(nil))
logs.Info("IsNumber: %v", number.IsNumeric(5))
logs.Info("IsNumber: %v", number.IsNumeric(0x5))
logs.Info("IsNumber: %v", number.IsNumeric(0.1))
if v, err := number.ToInt64(-50); nil == err {
logs.Info("ToInt64: %v", v)
} else {
logs.Info("ToInt64: error: %v", err)
}
if v, err := number.ToInt64(0x50); nil == err {
logs.Info("ToInt64: %v", v)
} else {
logs.Info("ToInt64: error: %v", err)
}
if v, err := number.ToInt64("0x50"); nil == err {
logs.Info("ToInt64: %v", v)
} else {
logs.Info("ToInt64: error: %v", err)
}
}
func testBufferPool() {
logs.Info("--- hello utils/bufferpool test ----------------------------------------------------------------")
logs.Info("--- bufferpool.Get(): start -----------------------------------------------------------------")
b := bufferpool.Get()
logs.Info("--- bufferpool.Get(): done ---------------------------------------------------------------")
logs.Info("--- bufferpool: test function ReadFromLimited ----------------------------------------------------")
r := strings.NewReader("some io.Reader stream to be read\n")
b.ReadFromLimited(r, 10)
logs.Info("bufferpool.ReadFromLimited(): %s", b.String())
b.Reset()
b.ReadFromLimited(r, 10)
logs.Info("bufferpool.ReadFromLimited(): %s", b.String())
logs.Info("--- bufferpool: test TeeReader -------------------------------------------------")
if _b := bufferpool.Get(); nil != _b {
r = strings.NewReader("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789")
b.Reset()
if _r, err := _b.TeeReader(r, 10); nil == err {
b.ReadFromLimited(_r, 20)
}
logs.Info("bufferpool.ReadFromLimited(): b: %s", b.String())
logs.Info("bufferpool.ReadFromLimited(): _b: %s", _b.String())
}
logs.Info("--- bufferpool: test buffer reference count -------------------------------------")
b.AddRefer(6)
for i := 0; !b.IsFree(); i++ {
logs.Info(i, b.Free())
}
time.Sleep(time.Second)
os.Exit(-1)
}
func testVhost() {
logs.Info("--- hello utils/vhost test ----------------------------------------------------------------")
if listener, err := net.Listen("tcp", ":51180"); nil == err {
for {
if conn, err := listener.Accept(); nil == err {
if httpConn, err := vhost.HTTP(conn); nil == err {
logs.Info(httpConn.Host)
httpConn.Close()
} else {
logs.Error(err)
}
conn.Close()
} else {
logs.Error(err)
os.Exit(-1)
}
}
} else {
logs.Error(err)
}
}
func testHttpTools() {
logs.Info("--- hello utils/httptools test ----------------------------------------------------------------")
logs.Info(http.ListenAndServe(":38082", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// 获取通用处理器(调试模式)
// if resp := httptools.NewHttpHandler(r, true); nil != resp {
// 获取通用处理器
if resp := httptools.NewHttpHandler(r, true); nil != resp {
// 调试模式
//resp.Debug(true)
// 释放
defer func() {
if o := resp.Output(w); "" != o {
logs.Info(o)
}
resp.Release()
}()
// 识别路径
switch resp.GetPath() {
case "/":
if resp.HttpOnlyIs("GET") {
resp.SendHttpRedirect("/test")
}
return
case "/post":
if resp.HttpOnlyIs("GET", "POST") {
switch resp.Method {
case "GET":
resp.SendHTML(`<form action="/post" method="post" enctype="multipart/form-data">`)
resp.SendHTML(`<p><input type="file" name="file"></p>`)
resp.SendHTML(`<p><input type="text" name="name"></p>`)
resp.SendHTML(`<p><input type="submit" value="submit"></p>`)
resp.SendHTML(`</form>`)
case "POST":
if err := resp.GetUpload(func(part *multipart.Part) bool {
logs.Info(part)
return true
}); nil == err {
resp.SendHTML(`<h3>ok</h3>`)
} else {
logs.Error(err)
}
}
}
return
case "/test":
if resp.HttpOnlyIs("GET") {
if err := resp.TemplateWrite([]byte(`<html>
<head>
<title>test</title>
</head>
<body>
<p>hello test, <a href="{{ .Path }}">bye</a></p>
</body>
</html>
`), struct {
Path string
}{
Path: "/bye",
}, "text/html"); nil != err {
logs.Error(err)
}
}
return
case "/bye":
if resp.HttpOnlyIs("GET") {
resp.SendJSAlert("提示", "成功", "/")
//
go func() {
time.Sleep(3 * time.Second)
os.Exit(0)
}()
}
return
}
//
resp.NotFound()
//
return
}
w.WriteHeader(http.StatusInternalServerError)
})))
}
func testWait() {
logs.Info("--- hello utils/wait test ----------------------------------------------------------------")
logs.Info("wait.Signal(): start")
wait.Signal(
wait.WithNotify(func(s os.Signal) bool {
logs.Info(s)
return true
}),
wait.WithSignal(syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM),
wait.WithTicket(1, func(t time.Time) {
logs.Info(t)
}),
)
logs.Info("wait.Signal(): done")
}
func testExtPath() {
logs.Info("--- hello utils/exepath test ----------------------------------------------------------------")
logs.Info("exepath.GetExePath():\n\t%s\n", exepath.GetExePath())
logs.Info("exepath.GetExeDir():\n\t%s\n", exepath.GetExeDir())
}
func testHex() {
logs.Info("--- hello utils/hex test ----------------------------------------------------------------")
data := []byte{0x11, 0x22, 0x33, 0x44, 0x55, 0x66}
result := hex.EncodeToStringWithSeq(data, ' ')
logs.Info("hex.EncodeNumberToStringWithSeq(1, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~2, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~3, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~4, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~5, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~6, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~7, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~8, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~9, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~0, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567890", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~01, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678901", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~02, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789012", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~03, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567890123", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~04, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678901234", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1~05, le):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789012345", ' ', true))
logs.Info("hex.EncodeNumberToStringWithSeq(1, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~2, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~3, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~4, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~5, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~6, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~7, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~8, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~9, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~0, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567890", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~01, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678901", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~02, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789012", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~03, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("1234567890123", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~04, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("12345678901234", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq(1~05, be):\n\t%s\n", hex.EncodeNumberToStringWithSeq("123456789012345", ' ', false))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', true, 1))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', true, 3))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', true, 5))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', true, 7))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', false, 1))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', false, 3))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', false, 5))
logs.Info("hex.EncodeNumberToStringWithSeq():\n\t%s\n", hex.EncodeNumberToStringWithSeq(-123456789012345, ' ', false, 7))
logs.Info("hex.EncodeToString():\n\t%s\n", hex.EncodeToString(data))
logs.Info("hex.EncodeToStringWithSeq():\n\t%s\n", result)
if data, err := hex.DecodeStringWithSeq(result); nil == err {
logs.Info("hex.DecodeStringWithSeq():\n\t%x\n", data)
} else {
logs.Info("hex.DecodeStringWithSeq():\n\t%v\n", err)
}
}
func testRandom() {
logs.Info("--- hello utils/random test ----------------------------------------------------------------")
logs.Info("random.ModeALL(64):\n\t%s\n", random.NewRandomString(random.ModeALL, 64))
logs.Info("random.ModeNoLower(64):\n\t%s\n", random.NewRandomString(random.ModeNoLower, 64))
logs.Info("random.ModeNoUpper(64):\n\t%s\n", random.NewRandomString(random.ModeNoUpper, 64))
logs.Info("random.ModeNoNumber(64):\n\t%s\n", random.NewRandomString(random.ModeNoNumber, 64))
logs.Info("random.ModeNoLowerNumber(64):\n\t%s\n", random.NewRandomString(random.ModeNoLowerNumber, 64))
logs.Info("random.ModeNoUpperNumber(64):\n\t%s\n", random.NewRandomString(random.ModeNoUpperNumber, 64))
logs.Info("random.ModeNoLine(64):\n\t%s\n", random.NewRandomString(random.ModeNoLine, 64))
logs.Info("random.ModeNoLowerLine(64):\n\t%s\n", random.NewRandomString(random.ModeNoLowerLine, 64))
logs.Info("random.ModeNoUpperLine(64):\n\t%s\n", random.NewRandomString(random.ModeNoUpperLine, 64))
logs.Info("random.ModeOnlyLower(64):\n\t%s\n", random.NewRandomString(random.ModeOnlyLower, 64))
logs.Info("random.ModeOnlyUpper(64):\n\t%s\n", random.NewRandomString(random.ModeOnlyUpper, 64))
logs.Info("random.ModeOnlyNumber(64):\n\t%s\n", random.NewRandomString(random.ModeOnlyNumber, 64))
logs.Info("random.ModeHexUpper(64):\n\t%s\n", random.NewRandomString(random.ModeHexUpper, 64))
logs.Info("random.ModeHexLower(64):\n\t%s\n", random.NewRandomString(random.ModeHexLower, 64))
logs.Info("random.NewRandomUUID:\n\t%s\n", random.NewRandomUUID())
logs.Info("--------------------------------------------------------------------------------------------")
}
func testPlatform() {
logs.Info("--- hello utils/random test ----------------------------------------------------------------")
logs.Info(platform.GetPlatformInfo())
logs.Info("--------------------------------------------------------------------------------------------")
}
func testCPU() {
logs.Info("--- hello utils/cpu test ----------------------------------------------------------------")
idle0, total0 := cpu.GetCPUTicks()
time.Sleep(1 * time.Second)
idle1, total1 := cpu.GetCPUTicks()
idleTicks := float64(idle1 - idle0)
totalTicks := float64(total1 - total0)
cpuUsage := 100 * (totalTicks - idleTicks) / totalTicks
logs.Info("CPU usage is %.2f [busy: %.0f, total: %.0f]\n", cpuUsage, totalTicks-idleTicks, totalTicks)
logs.Info("--------------------------------------------------------------------------------------------")
}
func testMutex() {
var r1 mutex.Mutex
var r2 mutex.TMutex
var n1 int
var n2 int
logs.Info("--- hello utils/mutex test ----------------------------------------------------------------")
for i := 0; 30 > i; i++ {
go func() {
s := time.Duration(random.NewRandomInt(900) + 100)
for {
if r1.TryLock() {
n1++
r1.Unlock()
}
time.Sleep(s * time.Millisecond)
}
}()
}
for i := 0; 30 > i; i++ {
go func() {
s := time.Duration(random.NewRandomInt(900) + 100)
for {
r1.Lock()
n1++
r1.Unlock()
time.Sleep(s * time.Millisecond)
}
}()
}
for i := 0; 30 > i; i++ {
go func() {
s := time.Duration(random.NewRandomInt(900) + 100)
for {
if r2.TryLock() {
n2++
r2.Unlock()
}
time.Sleep(s * time.Millisecond)
}
}()
}
for i := 0; 30 > i; i++ {
go func() {
s := time.Duration(random.NewRandomInt(900) + 100)
for {
r2.Lock()
n2++
r2.Unlock()
time.Sleep(s * time.Millisecond)
}
}()
}
for i := 0; 20 > i; i++ {
logs.Info("---", i)
if r1.TryLock() {
logs.Info(n1)
r1.Unlock()
}
if r2.TryLock() {
logs.Info(n2)
r2.Unlock()
}
time.Sleep(1 * time.Second)
}
}
func testHash() {
logs.Info("--- hello utils/hash test ----------------------------------------------------------------")
hash.SetGobFormat(true)
logs.Info(hash.HashToBytes("md5", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha1", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha256", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha512", "123", "456", 123, 456))
logs.Info(hash.HashToString("md5", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha1", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha256", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha512", "123", "456", 123, 456))
hash.SetGobFormat(false)
logs.Info(hash.HashToBytes("md5", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha1", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha256", "123", "456", 123, 456))
logs.Info(hash.HashToBytes("sha512", "123", "456", 123, 456))
logs.Info(hash.HashToString("md5", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha1", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha256", "123", "456", 123, 456))
logs.Info(hash.HashToString("sha512", "123", "456", 123, 456))
}
func testSQLite() {
if db := sqlite.NewSQLiteDB(
sqlite.WithBackup("test.db", 10, 2048, 32),
); nil != db {
db.CreateTable("test1", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`)
db.CreateTable("test2", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`, true)
db.CreateTable("test3", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`)
db.CreateTable("test4", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`)
db.CreateTable("test5", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`, true)
db.CreateTable("test6", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`)
db.CreateTable("test7", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`, true)
db.CreateTable("test8", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`)
db.CreateTable("test9", `id INTEGER PRIMARY KEY AUTOINCREMENT,
key INTEGER NOT NULL,
timestamp INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))`, true)
if n, err := db.StartBackup(true); nil == err {
logs.Warn("表同步完成,同步条数为%d", n)
} else {
logs.Error(err)
}
sig := make(chan os.Signal, 1)
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
for {
select {
case c := <-sig:
logs.Warn("Signal: ", c, ", Closing!!!")
db.Close()
return
case <-time.After(100 * time.Millisecond):
//default:
if conn, err := db.GetConn(true); nil == err {
conn.Exec("INSERT INTO test1 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test2 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test3 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test4 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test5 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test6 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test7 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test8 (key) VALUES (?);", time.Now().Unix())
conn.Exec("INSERT INTO test9 (key) VALUES (?);", time.Now().Unix())
} else {
logs.Error(err)
}
}
}
}
}
|
package store
import (
"strconv"
"time"
"github.com/go-redis/redis"
"github.com/mcculleydj/currency-trader/exchange/pkg/common"
)
var client *redis.Client
// RedisConnect provides an interface to Redis
func RedisConnect() error {
client = redis.NewClient(&redis.Options{
Addr: "localhost:6379",
})
_, err := client.Ping().Result()
return err
}
// RedisClose terminates the connection to Redis
func RedisClose() error {
return client.Close()
}
// Refresh sets the live key with the latest timestamp
// and populates the hash map with the latest data
func Refresh(res *common.ResponseBody) error {
// Currency Layer refereshes every 60 seconds
// however, cannot afford to make that many API calls
// so expiry is set for 1h
expiry := time.Duration(1 * time.Hour)
err := client.Set(res.Source+"_live", res.Timestamp, expiry).Err()
if err != nil {
return err
}
return client.HMSet(res.Source, res.CastQuotes()).Err()
}
// Fetch gets the timestamp and exchange rates from Redis
func Fetch(currency string) (int64, map[string]float64, error) {
timestamp, err := client.Get(currency + "_live").Result()
if err != nil {
return 0, nil, err
}
ts, err := strconv.ParseInt(timestamp, 10, 64)
if err != nil {
return 0, nil, err
}
quotes, err := client.HGetAll(currency).Result()
if err != nil {
return 0, nil, err
}
m := map[string]float64{}
for k, v := range quotes {
f, err := strconv.ParseFloat(v, 64)
if err != nil {
return 0, nil, err
}
m[k] = f
}
return ts, m, nil
}
|
package sfen
import (
"context"
"fmt"
"io"
"sort"
"strings"
)
type Surface struct {
phase Player
board [9][9]*Piece
captured []*Piece
nextStep int
}
func NewSurfaceEmpty() *Surface {
return &Surface{
phase: Player_BLACK,
nextStep: 1,
}
}
func NewSurface(pos string) (*Surface, error) {
var sf, pl, cp string
var step int
if _, err := fmt.Sscanf(pos, "%s %s %s %d", &sf, &pl, &cp, &step); err != nil {
return nil, err
}
parser := newPosParser(sf)
b, err := parser.parseBoard()
if err != nil {
return nil, err
}
var Player Player
switch pl {
case "b":
Player = Player_BLACK
case "w":
Player = Player_WHITE
}
captured := parseCaptured(cp)
sort.Sort(pieceSlice(captured))
return &Surface{
phase: Player,
board: b,
captured: captured,
nextStep: step,
}, nil
}
func NewSurfaceStartpos() *Surface {
s := &Surface{}
s.InitStartpos()
return s
}
func (s *Surface) InitStartpos() {
var board [9][9]*Piece
board[Y_a][X_9] = &Piece{
Player: Player_WHITE,
Type: Piece_KYOU,
}
board[Y_a][X_8] = &Piece{
Player: Player_WHITE,
Type: Piece_KEI,
}
board[Y_a][X_7] = &Piece{
Player: Player_WHITE,
Type: Piece_GIN,
}
board[Y_a][X_6] = &Piece{
Player: Player_WHITE,
Type: Piece_KIN,
}
board[Y_a][X_5] = &Piece{
Player: Player_WHITE,
Type: Piece_GYOKU,
}
board[Y_a][X_4] = &Piece{
Player: Player_WHITE,
Type: Piece_KIN,
}
board[Y_a][X_3] = &Piece{
Player: Player_WHITE,
Type: Piece_GIN,
}
board[Y_a][X_2] = &Piece{
Player: Player_WHITE,
Type: Piece_KEI,
}
board[Y_a][X_1] = &Piece{
Player: Player_WHITE,
Type: Piece_KYOU,
}
board[Y_b][X_2] = &Piece{
Player: Player_WHITE,
Type: Piece_KAKU,
}
board[Y_b][X_8] = &Piece{
Player: Player_WHITE,
Type: Piece_HISHA,
}
board[Y_i][X_9] = &Piece{
Player: Player_BLACK,
Type: Piece_KYOU,
}
board[Y_i][X_8] = &Piece{
Player: Player_BLACK,
Type: Piece_KEI,
}
board[Y_i][X_7] = &Piece{
Player: Player_BLACK,
Type: Piece_GIN,
}
board[Y_i][X_6] = &Piece{
Player: Player_BLACK,
Type: Piece_KIN,
}
board[Y_i][X_5] = &Piece{
Player: Player_BLACK,
Type: Piece_GYOKU,
}
board[Y_i][X_4] = &Piece{
Player: Player_BLACK,
Type: Piece_KIN,
}
board[Y_i][X_3] = &Piece{
Player: Player_BLACK,
Type: Piece_GIN,
}
board[Y_i][X_2] = &Piece{
Player: Player_BLACK,
Type: Piece_KEI,
}
board[Y_i][X_1] = &Piece{
Player: Player_BLACK,
Type: Piece_KYOU,
}
board[Y_h][X_8] = &Piece{
Player: Player_BLACK,
Type: Piece_KAKU,
}
board[Y_h][X_2] = &Piece{
Player: Player_BLACK,
Type: Piece_HISHA,
}
for _, x := range PosXs {
board[Y_c][x] = &Piece{
Player: Player_WHITE,
Type: Piece_FU,
}
board[Y_g][x] = &Piece{
Player: Player_BLACK,
Type: Piece_FU,
}
}
s.board = board
s.phase = Player_BLACK
s.nextStep = 1
}
func (s *Surface) PrintAA(out io.Writer) error {
for _, x := range posX {
fmt.Fprintf(out, " %c", x)
}
fmt.Fprintln(out)
for _, _ = range posX {
fmt.Fprint(out, "--")
}
fmt.Fprintln(out, "--")
for y, xs := range s.board {
for _, p := range xs {
fmt.Fprint(out, p.aa())
}
fmt.Fprintf(out, " | %c", posY[y])
fmt.Fprintln(out)
}
fmt.Fprint(out, "captured: ")
for _, p := range s.captured {
fmt.Fprintf(out, "%c", p.a())
}
fmt.Fprintln(out)
return nil
}
func printCaptured(captured []*Piece) string {
if len(captured) == 0 {
return "-"
}
var cps []string
count := 1
var prev *Piece
for _, cp := range captured {
if prev == nil {
prev = cp
continue
} else if prev.equal(cp) {
count++
continue
}
p := sfenPiece[prev.Type : prev.Type+1]
if prev.Player == Player_WHITE {
p = strings.ToLower(p)
}
if count != 1 {
p = fmt.Sprintf("%d%s", count, p)
}
cps = append(cps, p)
count = 1
prev = cp
}
p := sfenPiece[prev.Type : prev.Type+1]
if prev.Player == Player_WHITE {
p = strings.ToLower(p)
}
if count != 1 {
p = fmt.Sprintf("%d%s", count, p)
}
cps = append(cps, p)
return strings.Join(cps, "")
}
func (s *Surface) PrintSFEN(w io.Writer) error {
var lines []string
for _, xs := range s.board {
var sp int
var line string
for _, p := range xs {
if p.isEmpty() {
sp++
continue
}
if sp != 0 {
line += fmt.Sprintf("%d", sp)
sp = 0
}
line += p.sfen()
}
if sp != 0 {
line += fmt.Sprintf("%d", sp)
}
lines = append(lines, line)
}
var phase string
switch s.phase {
case Player_WHITE:
phase = "w"
case Player_BLACK:
phase = "b"
}
_, err := fmt.Fprintf(w,
"%s %s %s %d",
strings.Join(lines, "/"),
phase,
printCaptured(s.captured),
s.nextStep)
return err
}
func (s *Surface) move(m *move) error {
var p, pp *Piece
if m.putted != Piece_NULL {
for i, cp := range s.captured {
if cp.Player == s.phase && cp.Type == m.putted {
p = s.captured[i]
s.captured = append(s.captured[:i], s.captured[i+1:]...)
break
}
}
if p == nil {
return fmt.Errorf("captured Piece not found: Piece=%d, Player=%d", m.putted, s.phase)
}
} else {
p, s.board[m.from.Y][m.from.X] = s.board[m.from.Y][m.from.X], nil
}
p.Promoted = p.Promoted || m.promoted
pp, s.board[m.to.Y][m.to.X] = s.board[m.to.Y][m.to.X], p
if !pp.isEmpty() {
pp.Player = pp.Player.flip()
pp.Promoted = false
s.captured = append(s.captured, pp)
}
s.nextStep++
s.phase = s.phase.flip()
sort.Sort(pieceSlice(s.captured))
return nil
}
func (s *Surface) Move(move string) error {
parser := newMoveParser(move)
m, err := parser.parseMove()
if err != nil {
return err
}
return s.move(m)
}
func (s *Surface) SetStep(step int) {
s.nextStep = step
}
func (s *Surface) SetPlayer(player Player) {
s.phase = player
}
func (s *Surface) SetPiece(pos *Pos, piece *Piece) {
if pos.IsEmpty() {
s.captured = append(s.captured, piece)
sort.Sort(pieceSlice(s.captured))
return
}
s.board[pos.Y][pos.X] = piece
}
func (s *Surface) GetPiece(x PosX, y PosY) *Piece {
return s.board[y][x]
}
func (s *Surface) GetCaptured() []*Piece {
return s.captured
}
func (s *Surface) Scan(ctx context.Context, f func(*Pos, *Piece)) error {
for _, y := range PosYs {
for _, x := range PosXs {
select {
case <-ctx.Done():
return ctx.Err()
default:
f(&Pos{X: x, Y: y}, s.board[y][x])
}
}
}
for _, p := range s.captured {
select {
case <-ctx.Done():
return ctx.Err()
default:
f(nil, p)
}
}
return nil
}
|
package analysis
import (
"fmt"
"go/token"
"go/types"
"log"
"regexp"
"strconv"
"strings"
"sync"
"github.com/frk/gosql/internal/config"
"github.com/frk/gosql/internal/typesutil"
"github.com/frk/tagutil"
)
var _ = log.Println
var _ = fmt.Println
var (
// NOTE(mkopriva): Identifiers MUST begin with a letter (a-z) or an underscore (_).
// Subsequent characters in an identifier can be letters, underscores, and digits (0-9).
// Matches a valid identifier.
rxIdent = regexp.MustCompile(`^[A-Za-z_]\w*$`)
// Matches a valid db relation identifier.
// - Valid format: [schema_name.]relation_name[:alias_name]
rxRelIdent = regexp.MustCompile(`^(?:[A-Za-z_]\w*\.)?[A-Za-z_]\w*(?:\:[A-Za-z_]\w*)?$`)
// Matches a valid table column reference.
// - Valid format: [rel_name_or_alias.]column_name
rxColIdent = regexp.MustCompile(`^(?:[A-Za-z_]\w*\.)?[A-Za-z_]\w*$`)
// Matches a few reserved identifiers.
rxReserved = regexp.MustCompile(`^(?i:true|false|` +
`current_date|current_time|current_timestamp|` +
`current_role|current_schema|current_user|` +
`localtime|localtimestamp|` +
`session_user)$`)
// Matches coalesce or coalesce(<value>) where <value> is expected to
// be a single value literal.
rxCoalesce = regexp.MustCompile(`(?i)^coalesce$|^coalesce\((.*)\)$`)
)
// analysis holds the state of the analyzer.
type analysis struct {
cfg config.Config
fset *token.FileSet
// The named type under analysis.
named *types.Named
// The package path of the type under analysis.
pkgPath string
// If the type under analysis a "filter" type this field will hold
// the result of the analysis, otherwise it will be nil.
filter *FilterStruct
// If the type under analysis a "query" type this field will hold
// the result of the analysis, otherwise it will be nil.
query *QueryStruct
// ...
info *Info
}
// Info holds information related to an analyzed TargetStruct. If the analysis
// returns an error, the collected information will be incomplete.
type Info struct {
// The FileSet associated with the analyzed TargetStruct.
FileSet *token.FileSet
// The package path of the analyzed TargetStruct.
PkgPath string
// The type name of the analyzed TargetStruct.
TypeName string
// The soruce position of the TargetStruct's type name.
TypeNamePos token.Pos
// FieldMap maintains a map of pointers of arbitrary type that represent
// the result of analyzed fields, to the fields' related go/types specific
// information. Intended for error reporting by the backend type-checker.
FieldMap map[FieldPtr]FieldVar
// RelSpace maintains a set of *unique* relation names or aliases that map
// onto their respective RelIdent values that were parsed from struct tags.
RelSpace map[string]RelIdent
// The analyzed struct.
Struct TargetStruct
}
// Run analyzes the given named type which is expected to be a struct type whose name
// prefix matches one of the allowed prefixes. It panics if the named type is not actually
// a struct type or if its name does not start with one of the predefined prefixes.
func Run(fset *token.FileSet, named *types.Named, pos token.Pos, cfg config.Config) (*Info, error) {
structType, ok := named.Underlying().(*types.Struct)
if !ok {
panic(named.Obj().Name() + " must be a struct type.") // this shouldn't happen
}
a := new(analysis)
a.cfg = cfg
a.fset = fset
a.named = named
a.pkgPath = named.Obj().Pkg().Path()
a.info = new(Info)
a.info.FileSet = fset
a.info.PkgPath = a.pkgPath
a.info.TypeName = named.Obj().Name()
a.info.TypeNamePos = pos
a.info.FieldMap = make(map[FieldPtr]FieldVar)
a.info.RelSpace = make(map[string]RelIdent)
if strings.HasPrefix(strings.ToLower(named.Obj().Name()), "filter") {
s, err := analyzeFilterStruct(a, structType)
if err != nil {
return nil, err
}
a.info.Struct = s
} else {
s, err := analyzeQueryStruct(a, structType)
if err != nil {
return nil, err
}
a.info.Struct = s
}
return a.info, nil
}
func (a *analysis) error(code errorCode, f *types.Var, blockName, tagString, tagExpr, tagError string) error {
e := &anError{Code: code, BlockName: blockName, TagString: tagString, TagExpr: tagExpr, TagError: tagError}
if f != nil {
p := a.fset.Position(f.Pos())
e.PkgPath = a.named.Obj().Pkg().Path()
e.TargetName = a.named.Obj().Name()
e.FieldType = f.Type().String()
e.FieldTypeKind = analyzeTypeKind(f.Type()).String()
e.FieldName = f.Name()
e.FileName = p.Filename
e.FileLine = p.Line
} else {
p := a.fset.Position(a.named.Obj().Pos())
e.PkgPath = a.named.Obj().Pkg().Path()
e.TargetName = a.named.Obj().Name()
e.FileName = p.Filename
e.FileLine = p.Line
}
if a.query != nil && a.query.Rel != nil {
e.RelField = a.query.Rel.FieldName
e.RelType = a.query.Rel.Type
} else if a.filter != nil && a.filter.Rel != nil {
e.RelField = a.filter.Rel.FieldName
e.RelType = a.filter.Rel.Type
}
return e
}
// analyzeFilterStruct ...
func analyzeFilterStruct(a *analysis, structType *types.Struct) (*FilterStruct, error) {
a.filter = new(FilterStruct)
a.filter.TypeName = a.named.Obj().Name()
for i := 0; i < structType.NumFields(); i++ {
fvar := structType.Field(i)
ftag := structType.Tag(i)
tag := tagutil.New(ftag)
// Ensure that there is only one field with the "rel" tag.
if _, ok := tag["rel"]; ok {
if a.filter.Rel != nil {
return nil, a.error(errConflictingRelTag, fvar, "", ftag, "", "")
}
rid, ecode := parseRelIdent(tag.First("rel"))
if ecode > 0 {
return nil, a.error(ecode, fvar, "", ftag, "", tag.First("rel"))
} else if ecode, errval := addToRelSpace(a, rid); ecode > 0 {
// NOTE(mkopriva): Because of the "a.filter.Rel != nil" check above and the
// fact that FilterXxx types don't accept any other relation-specifying
// fields, this branch will actually not run, nevertheless it is left here
// just in case the implementation changes allowing for this error to occur.
return nil, a.error(ecode, fvar, "", ftag, "", errval)
}
a.filter.Rel = new(RelField)
a.filter.Rel.FieldName = fvar.Name()
a.filter.Rel.Id = rid
a.info.FieldMap[a.filter.Rel] = FieldVar{Var: fvar, Tag: ftag}
if err := analyzeRelType(a, &a.filter.Rel.Type, fvar); err != nil {
return nil, err
}
if a.filter.Rel.Type.IsIter {
return nil, a.error(errIllegalIteratorField, fvar, "", ftag, "", "")
}
continue
}
// TODO(mkopriva): allow for embedding a struct with "common feature fields",
// and make sure to also allow imported and local-unexported struct types.
if dirname := typesutil.GetDirectiveName(fvar); fvar.Name() == "_" && len(dirname) > 0 {
// fields with gosql directive types
if strings.ToLower(dirname) == "textsearch" {
if err := analyzeTextSearchDirective(a, fvar, ftag); err != nil {
return nil, err
}
} else {
return nil, a.error(errIllegalQueryField, fvar, "", "", "", "")
}
} else {
// fields with specific names / types
if typesutil.ImplementsGosqlFilterConstructor(fvar.Type()) {
if err := analyzeFilterConstructorField(a, fvar, ftag); err != nil {
return nil, err
}
}
}
}
if a.filter.Rel == nil {
return nil, a.error(errMissingRelField, nil, "", "", "", "") // TODO test
}
if a.filter.FilterConstructor == nil {
return nil, a.error(errMissingFilterConstructor, nil, "", "", "", "") // TODO test
}
return a.filter, nil
}
// analyzeQueryStruct runs the analysis of a QueryStruct.
func analyzeQueryStruct(a *analysis, structType *types.Struct) (*QueryStruct, error) {
a.query = new(QueryStruct)
a.query.TypeName = a.named.Obj().Name()
key := tolower(a.query.TypeName)
if len(key) > 5 {
key = key[:6]
}
switch key {
case "insert":
a.query.Kind = QueryKindInsert
case "update":
a.query.Kind = QueryKindUpdate
case "select":
a.query.Kind = QueryKindSelect
case "delete":
a.query.Kind = QueryKindDelete
default:
panic(a.query.TypeName + " struct type has unsupported name prefix.") // this shouldn't happen
}
// Find and anlyze the "rel" field.
for i := 0; i < structType.NumFields(); i++ {
ftag := structType.Tag(i)
fvar := structType.Field(i)
tag := tagutil.New(ftag)
if _, ok := tag["rel"]; ok {
if err := analyzeQueryStructRelField(a, fvar, ftag, tag.First("rel")); err != nil {
return nil, err
}
}
}
if a.query.Rel == nil {
return nil, a.error(errMissingRelField, nil, "", "", "", "")
}
// Analyze the rest of the query struct's fields.
for i := 0; i < structType.NumFields(); i++ {
ftag := structType.Tag(i)
fvar := structType.Field(i)
tag := tagutil.New(ftag)
if _, ok := tag["rel"]; ok {
continue
}
if dirname := typesutil.GetDirectiveName(fvar); fvar.Name() == "_" && len(dirname) > 0 {
// fields with gosql directive types
if err := analyzeQueryStructDirective(a, fvar, ftag, dirname); err != nil {
return nil, err
}
} else {
// fields with specific names / types
if err := analyzeQueryStructField(a, fvar, ftag); err != nil {
return nil, err
}
}
}
// TODO(mkopriva): if QueryKind is Select, Update, or Insert, and the analyzed
// RelType.Fields slice is empty (for Select also check ResultType.Fields), then fail.
// TODO(mkopriva): allow for embedding a struct with "common feature fields",
// and make sure to also allow imported and local-unexported struct types.
//
// TODO(mkopriva): if QueryKind is Update and the record (single or slice) does not
// have a primary key AND there's no WhereStruct, no filter, no all directive
// return an error. That case suggests that all records should be updated
// however the all directive must be provided explicitly, as a way to
// ensure the programmer does not, by mistake, declare a query that
// updates all records in a table.
return a.query, nil
}
// analyzeQueryStructRelField [ ... ]
func analyzeQueryStructRelField(a *analysis, f *types.Var, ftag, reltag string) error {
if a.query.Rel != nil {
return a.error(errConflictingRelTag, f, "", ftag, "", "")
}
rid, ecode := parseRelIdent(reltag)
if ecode > 0 {
return a.error(ecode, f, "", ftag, "", reltag)
} else if ecode, errval := addToRelSpace(a, rid); ecode > 0 {
// NOTE(mkopriva): Because of the "a.query.Rel != nil" check above and the
// fact that the rel field in query types is intentionally analyzed before
// any other field, this branch will actually not run, nevertheless it is left here
// just in case the implementation changes allowing for this error to occur.
return a.error(ecode, f, "", ftag, "", errval)
}
a.query.Rel = new(RelField)
a.query.Rel.FieldName = f.Name()
a.query.Rel.Id = rid
switch fname := strings.ToLower(a.query.Rel.FieldName); {
default:
if err := analyzeRelType(a, &a.query.Rel.Type, f); err != nil {
return err
}
if (a.query.Kind == QueryKindInsert || a.query.Kind == QueryKindUpdate) && a.query.Rel.Type.IsIter {
return a.error(errIllegalQueryField, f, "", ftag, "", "") // TODO test
}
case fname == "count" && isIntegerType(f.Type()):
if a.query.Kind != QueryKindSelect {
return a.error(errIllegalQueryField, f, "", ftag, "", "")
}
a.query.Kind = QueryKindSelectCount
case fname == "exists" && isBoolType(f.Type()):
if a.query.Kind != QueryKindSelect {
return a.error(errIllegalQueryField, f, "", ftag, "", "")
}
a.query.Kind = QueryKindSelectExists
case fname == "notexists" && isBoolType(f.Type()):
if a.query.Kind != QueryKindSelect {
return a.error(errIllegalQueryField, f, "", ftag, "", "")
}
a.query.Kind = QueryKindSelectNotExists
case fname == "_" && typesutil.IsDirective("Relation", f.Type()):
if a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", ftag, "", "")
}
a.query.Rel.IsDirective = true
}
a.info.FieldMap[a.query.Rel] = FieldVar{Var: f, Tag: ftag}
return nil
}
// analyzeQueryStructDirective [ ... ]
func analyzeQueryStructDirective(a *analysis, f *types.Var, tag string, dirname string) error {
analyzers := map[string]func(*analysis, *types.Var, string) error{
"all": analyzeAllDirective,
"default": analyzeDefaultDirective,
"force": analyzeForceDirective,
"optional": analyzeOptionalDirective,
"return": analyzeReturnDirective,
"limit": analyzeLimitFieldOrDirective,
"offset": analyzeOffsetFieldOrDirective,
"orderby": analyzeOrderByDirective,
"override": analyzeOverrideDirective,
}
if afunc, ok := analyzers[strings.ToLower(dirname)]; ok {
return afunc(a, f, tag)
}
// illegal directive field
return a.error(errIllegalQueryField, f, "", "", "", "")
}
// analyzeQueryStructField [ ... ]
func analyzeQueryStructField(a *analysis, f *types.Var, tag string) error {
analyzers := map[string]func(*analysis, *types.Var, string) error{
"where": analyzeWhereStruct,
"join": analyzeJoinStruct,
"from": analyzeJoinStruct,
"using": analyzeJoinStruct,
"onconflict": analyzeOnConflictStruct,
"result": analyzeResultField,
"limit": analyzeLimitFieldOrDirective,
"offset": analyzeOffsetFieldOrDirective,
"rowsaffected": analyzeRowsAffectedField,
}
if afunc, ok := analyzers[tolower(f.Name())]; ok {
return afunc(a, f, tag)
}
// if no match by field name, look for specific field types
if isAccessible(a, f, a.named) {
switch {
case isFilterType(f.Type()):
if err := analyzeFilterField(a, f, tag); err != nil {
return err
}
case isErrorHandler(f.Type()):
if err := analyzeErrorHandlerField(a, f, tag, false); err != nil {
return err
}
case isErrorInfoHandler(f.Type()):
if err := analyzeErrorHandlerField(a, f, tag, true); err != nil {
return err
}
case typesutil.IsContext(f.Type()):
if err := analyzeContextField(a, f, tag); err != nil {
return err
}
}
}
return nil
}
// analyzeRelType [ ... ]
func analyzeRelType(a *analysis, rt *RelType, field *types.Var) error {
rt.FieldMap = make(map[FieldPtr]FieldVar)
defer func() {
// NOTE(mkopriva): this step is necessary because of the cache.
//
// If there were no cache, each call to analyzeRelType would
// traverse each field of the rt relType and could therefore
// store the field info directly into a.info.FieldMap.
//
// However, because the cache is in place the fields are not traversed
// for cached relTypes and the a.info.FieldMap is then not populated.
for k, v := range rt.FieldMap {
a.info.FieldMap[k] = v
}
}()
ftyp := field.Type()
cacheKey := ftyp.String()
named, ok := ftyp.(*types.Named)
if ok {
ftyp = named.Underlying()
cacheKey = named.String()
}
relTypeCache.RLock()
v := relTypeCache.m[cacheKey]
relTypeCache.RUnlock()
if v != nil {
*rt = *v
return nil
}
// Check whether the relation field's type is an interface or a function,
// if so, it is then expected to be an iterator, and it is analyzed as such.
//
// Failure of the iterator analysis will cause the whole analysis to exit
// as there's currently no support for non-iterator interfaces nor functions.
if iface, ok := ftyp.(*types.Interface); ok {
var isValid bool
if named, isValid = analyzeIteratorInterface(a, rt, iface, named); !isValid {
return a.error(errBadIterTypeInterface, field, "", "", "", "")
}
} else if sig, ok := ftyp.(*types.Signature); ok {
var isValid bool
if named, isValid = analyzeIteratorFunction(a, rt, sig); !isValid {
return a.error(errBadIterTypeFunc, field, "", "", "", "")
}
} else {
// If not an iterator, check for slices, arrays, and pointers.
if slice, ok := ftyp.(*types.Slice); ok { // allows []T / []*T
ftyp = slice.Elem()
rt.IsSlice = true
} else if array, ok := ftyp.(*types.Array); ok { // allows [N]T / [N]*T
ftyp = array.Elem()
rt.IsArray = true
rt.ArrayLen = array.Len()
}
if ptr, ok := ftyp.(*types.Pointer); ok { // allows *T
ftyp = ptr.Elem()
rt.IsPointer = true
}
// Get the name of the base type, if applicable.
if rt.IsSlice || rt.IsArray || rt.IsPointer {
if named, ok = ftyp.(*types.Named); !ok {
// Fail if the type is a slice, an array, or a pointer
// while its base type remains unnamed.
return a.error(errBadRelType, field, "", "", "", "")
}
}
}
if named != nil {
pkg := named.Obj().Pkg()
rt.Base.Name = named.Obj().Name()
rt.Base.PkgPath = pkg.Path()
rt.Base.PkgName = pkg.Name()
rt.Base.PkgLocal = pkg.Name()
rt.Base.IsImported = isImportedType(a, named)
rt.IsAfterScanner = typesutil.ImplementsAfterScanner(named)
ftyp = named.Underlying()
relTypeCache.Lock()
relTypeCache.m[cacheKey] = rt
relTypeCache.Unlock()
}
rt.Base.Kind = analyzeTypeKind(ftyp)
if rt.Base.Kind != TypeKindStruct {
return a.error(errBadRelType, field, "", "", "", "")
}
styp := ftyp.(*types.Struct)
return analyzeFieldInfoList(a, rt, styp)
}
// analyzeFieldInfoList
func analyzeFieldInfoList(a *analysis, rt *RelType, styp *types.Struct) error {
// The loopstate type holds the state of a loop over a struct's fields.
type loopstate struct {
styp *types.Struct // the struct type whose fields are being analyzed
typ *TypeInfo // info on the struct type; holds the resulting slice of analyzed FieldInfo
idx int // keeps track of the field index
pfx string // column prefix
selector []*FieldSelectorNode
}
// LIFO stack of states used for depth first traversal of struct fields.
stack := []*loopstate{{styp: styp, typ: &rt.Base}}
stackloop:
for len(stack) > 0 {
loop := stack[len(stack)-1]
for loop.idx < loop.styp.NumFields() {
ftag := loop.styp.Tag(loop.idx)
fvar := loop.styp.Field(loop.idx)
tag := tagutil.New(ftag)
sqltag := tag.First("sql")
// Instead of incrementing the index in the for-statement
// it is done here manually to ensure that it is not skipped
// when continuing to the outer loop.
loop.idx++
// Ignore the field if:
// - no column name or sql tag was provided
if sqltag == "" ||
// - explicitly marked to be ignored
sqltag == "-" ||
// - has blank name, i.e. it's practically inaccessible
fvar.Name() == "_" ||
// - it's unexported and the field's struct type is imported
(!fvar.Exported() && loop.typ.IsImported) {
continue
}
f := new(FieldInfo)
f.Tag = tag
f.Name = fvar.Name()
f.IsEmbedded = fvar.Embedded()
f.IsExported = fvar.Exported()
rt.FieldMap[f] = FieldVar{Var: fvar, Tag: ftag}
// Analyze the field's type.
ftyp := fvar.Type()
f.Type, ftyp = analyzeTypeInfo(a, ftyp)
// If the field's type is a struct and the `sql` tag's
// value starts with the ">" (descend) marker, then it is
// considered to be a "parent" field element whose child
// fields need to be analyzed as well.
if f.Type.Is(TypeKindStruct) && strings.HasPrefix(sqltag, ">") {
loop2 := new(loopstate)
loop2.styp = ftyp.(*types.Struct)
loop2.typ = &f.Type
loop2.pfx = loop.pfx + strings.TrimPrefix(sqltag, ">")
// Allocate selector of the appropriate size an copy it.
loop2.selector = make([]*FieldSelectorNode, len(loop.selector))
_ = copy(loop2.selector, loop.selector)
// If the parent node is a pointer to a struct,
// get the struct type info.
typ := f.Type
if typ.Kind == TypeKindPtr {
typ = *typ.Elem
}
node := new(FieldSelectorNode)
node.Name = f.Name
node.Tag = f.Tag
node.IsEmbedded = f.IsEmbedded
node.IsExported = f.IsExported
node.TypeName = typ.Name
node.TypePkgPath = typ.PkgPath
node.TypePkgName = typ.PkgName
node.TypePkgLocal = typ.PkgLocal
node.IsImported = typ.IsImported
node.IsPointer = (f.Type.Kind == TypeKindPtr)
node.ReadOnly = tag.HasOption("sql", "ro")
node.WriteOnly = tag.HasOption("sql", "wo")
loop2.selector = append(loop2.selector, node)
stack = append(stack, loop2)
continue stackloop
}
// Resolve the column id.
cid, ecode, eval := parseColIdent(a, loop.pfx+sqltag)
if ecode > 0 {
return a.error(ecode, fvar, "", ftag, "", eval)
}
// TODO check the the chan, func, and interface type
// in association with the write/read?
// If the field is not a struct to be descended,
// it is considered to be a "leaf" field and as
// such the analysis of leaf-specific information
// needs to be carried out.
f.ColIdent = cid
f.Selector = loop.selector
f.NullEmpty = tag.HasOption("sql", "nullempty")
f.ReadOnly = tag.HasOption("sql", "ro")
f.WriteOnly = tag.HasOption("sql", "wo")
f.UseAdd = tag.HasOption("sql", "add")
f.UseDefault = tag.HasOption("sql", "default")
f.UseCoalesce, f.CoalesceValue = parseCoalesceInfo(tag)
if err := parseFilterColumnKey(a, f); err != nil {
return err
}
// Add the field to the list.
rt.Fields = append(rt.Fields, f)
a.info.FieldMap[f] = FieldVar{Var: fvar, Tag: ftag}
}
stack = stack[:len(stack)-1]
}
return nil
}
// analyzeTypeInfo function analyzes the given type and returns the result. The analysis
// looks only for information of "named types" and in case of slice, array, map, or
// pointer types it will analyze the element type of those types. The second return
// value is the types.Type representation of the base element type of the given type.
func analyzeTypeInfo(a *analysis, tt types.Type) (typ TypeInfo, base types.Type) {
base = tt
if named, ok := base.(*types.Named); ok {
pkg := named.Obj().Pkg()
typ.Name = named.Obj().Name()
typ.PkgPath = pkg.Path()
typ.PkgName = pkg.Name()
typ.PkgLocal = pkg.Name()
typ.IsImported = isImportedType(a, named)
typ.IsScanner = typesutil.ImplementsScanner(named)
typ.IsValuer = typesutil.ImplementsValuer(named)
typ.IsJSONMarshaler = typesutil.ImplementsJSONMarshaler(named)
typ.IsJSONUnmarshaler = typesutil.ImplementsJSONUnmarshaler(named)
typ.IsXMLMarshaler = typesutil.ImplementsXMLMarshaler(named)
typ.IsXMLUnmarshaler = typesutil.ImplementsXMLUnmarshaler(named)
base = named.Underlying()
}
typ.Kind = analyzeTypeKind(base)
var elem TypeInfo // element info
switch T := base.(type) {
case *types.Basic:
typ.IsRune = T.Name() == "rune"
typ.IsByte = T.Name() == "byte"
case *types.Slice:
elem, base = analyzeTypeInfo(a, T.Elem())
typ.Elem = &elem
case *types.Array:
elem, base = analyzeTypeInfo(a, T.Elem())
typ.Elem = &elem
typ.ArrayLen = T.Len()
case *types.Map:
key, _ := analyzeTypeInfo(a, T.Key())
elem, base = analyzeTypeInfo(a, T.Elem())
typ.Key = &key
typ.Elem = &elem
case *types.Pointer:
elem, base = analyzeTypeInfo(a, T.Elem())
typ.Elem = &elem
case *types.Interface:
typ.IsEmptyInterface = typesutil.IsEmptyInterface(T)
// If base is an unnamed interface type check at least whether
// or not it declares, or embeds, one of the relevant methods.
if typ.Name == "" {
typ.IsScanner = typesutil.IsScanner(T)
typ.IsValuer = typesutil.IsValuer(T)
}
}
return typ, base
}
// analyzeIteratorInterface [ ... ]
func analyzeIteratorInterface(a *analysis, rt *RelType, iface *types.Interface, named *types.Named) (out *types.Named, isValid bool) {
if iface.NumExplicitMethods() != 1 {
return nil, false
}
mth := iface.ExplicitMethod(0)
if !isAccessible(a, mth, named) {
return nil, false
}
sig := mth.Type().(*types.Signature)
out, isValid = analyzeIteratorFunction(a, rt, sig)
if !isValid {
return nil, false
}
rt.IterMethod = mth.Name()
return out, true
}
// analyzeIteratorFunction [ ... ]
func analyzeIteratorFunction(a *analysis, rt *RelType, sig *types.Signature) (out *types.Named, isValid bool) {
// Must take 1 argument and return one value of type error. "func(T) error"
if sig.Params().Len() != 1 || sig.Results().Len() != 1 || !typesutil.IsError(sig.Results().At(0).Type()) {
return nil, false
}
typ := sig.Params().At(0).Type()
if ptr, ok := typ.(*types.Pointer); ok { // allows *T
typ = ptr.Elem()
rt.IsPointer = true
}
// Make sure that the argument type is a named struct type.
named, ok := typ.(*types.Named)
if !ok {
return nil, false
} else if _, ok := named.Underlying().(*types.Struct); !ok {
return nil, false
}
rt.IsIter = true
return named, true
}
////////////////////////////////////////////////////////////////////////////////
// Where Struct Analysis
//
// analyzeWhereStruct
func analyzeWhereStruct(a *analysis, f *types.Var, tag string) (err error) {
if !a.query.Kind.isSelect() && a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Kind == QueryKindUpdate && a.query.Rel.Type.IsSlice {
return a.error(errIllegalSliceUpdateModifier, f, "", tag, "", "")
}
if a.query.All != nil || a.query.Where != nil || a.query.Filter != nil {
return a.error(errConflictingWhere, f, "", tag, "", "")
}
ns, err := typesutil.GetStruct(f)
if err != nil { // fails only if non struct
return a.error(errBadFieldTypeStruct, f, "", tag, "", "")
}
// The loopstate type holds the state of a loop over a struct's fields.
type loopstate struct {
where *WhereStruct
items []WhereItem
ns *typesutil.NamedStruct // the struct type of the WhereStruct
idx int // keeps track of the field index
}
// root holds the reference to the root level search conditions
root := &loopstate{ns: ns}
// LIFO stack of states used for depth first traversal of struct fields.
stack := []*loopstate{root}
stackloop:
for len(stack) > 0 {
loop := stack[len(stack)-1]
for loop.idx < loop.ns.Struct.NumFields() {
fvar := loop.ns.Struct.Field(loop.idx)
ftag := loop.ns.Struct.Tag(loop.idx)
tag := tagutil.New(ftag)
sqltag := tag.First("sql")
// Instead of incrementing the index in the for-statement
// it is done here manually to ensure that it is not skipped
// when continuing to the outer loop.
loop.idx++
if sqltag == "-" || sqltag == "" {
continue
}
// Skip the field if it's unexported and the ns.Struct's
// type is imported. Unless it is one of the directive
// fields that do not require direct access at runtime.
if fvar.Name() != "_" && !isAccessible(a, fvar, ns.Named) {
continue
}
// Analyze the bool operation for any but the first
// item in a WhereStruct. Fail if a value was provided
// but it is not "or" nor "and".
if len(loop.items) > 0 {
item := new(WhereBoolTag)
item.Value = BoolAnd // default to "and"
if val := tolower(tag.First("bool")); len(val) > 0 {
if val == "or" {
item.Value = BoolOr
} else if val != "and" {
return a.error(errBadBoolTagValue, fvar, f.Name(), ftag, "", val)
}
}
loop.items = append(loop.items, item)
}
// Nested wherefields are marked with ">" and should be
// analyzed before any other fields in the current block.
if sqltag == ">" {
ns, err := typesutil.GetStruct(fvar)
if err != nil {
return a.error(errBadFieldTypeStruct, fvar, f.Name(), "", "", "")
}
loop2 := new(loopstate)
loop2.ns = ns
loop2.where = new(WhereStruct)
loop2.where.FieldName = fvar.Name()
loop.items = append(loop.items, loop2.where)
a.info.FieldMap[loop2.where] = FieldVar{Var: fvar, Tag: ftag}
stack = append(stack, loop2)
continue stackloop
}
lhs, op, op2, rhs := parsePredicateExpr(sqltag)
// Analyze directive where item.
if fvar.Name() == "_" {
if !typesutil.IsDirective("Column", fvar.Type()) {
continue
}
// If the expression in a gosql.Column tag's value
// contains a right-hand-side, it is expected to be
// either another column or a value-literal to which
// the main column should be compared.
if len(rhs) > 0 {
cid, ecode, eval := parseColIdent(a, lhs)
if ecode > 0 {
return a.error(ecode, fvar, f.Name(), ftag, sqltag, eval)
}
item := new(WhereColumnDirective)
item.LHSColIdent = cid
item.Predicate = stringToPredicate[op]
item.Quantifier = stringToQuantifier[op2]
if cid, ecode, eval := parseColIdent(a, rhs); ecode > 0 {
if ecode != errBadColIdTagValue {
return a.error(ecode, fvar, f.Name(), ftag, sqltag, eval)
}
// assume literal expression
item.RHSLiteral = rhs
} else {
item.RHSColIdent = cid
}
if item.Predicate.IsUnary() {
return a.error(errIllegalUnaryPredicate, fvar, f.Name(), ftag, sqltag, op)
} else if item.Quantifier > 0 && !item.Predicate.CanQuantify() {
return a.error(errIllegalPredicateQuantifier, fvar, f.Name(), ftag, sqltag, op2)
}
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
loop.items = append(loop.items, item)
continue
}
// Assume column with unary predicate.
cid, ecode, eval := parseColIdent(a, lhs)
if ecode > 0 {
return a.error(ecode, fvar, f.Name(), ftag, sqltag, eval)
}
// If no operator was provided, default to "istrue"
if len(op) == 0 {
op = "istrue"
}
item := new(WhereColumnDirective)
item.LHSColIdent = cid
item.Predicate = stringToPredicate[op]
if !item.Predicate.IsUnary() {
return a.error(errBadDirectiveBooleanExpr, fvar, f.Name(), ftag, "", sqltag)
} else if len(op2) > 0 {
return a.error(errIllegalPredicateQuantifier, fvar, f.Name(), ftag, sqltag, op2)
}
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
loop.items = append(loop.items, item)
continue
}
// Check whether the field is supposed to be used to
// produce a [NOT] BETWEEN [SYMMETRIC] predicate clause.
//
// A valid "between" field MUST be of type struct with
// the number of fields equal to 2, where each of the
// fields is marked with an "x" or a "y" in their `sql`
// tag to indicate their position in the clause.
if strings.Contains(op, "between") {
if len(op2) > 0 {
return a.error(errIllegalPredicateQuantifier, fvar, f.Name(), ftag, sqltag, op2) // TODO test
}
ns, err := typesutil.GetStruct(fvar)
if err != nil {
return a.error(errBadBetweenPredicate, fvar, f.Name(), ftag, "", "")
} else if ns.Struct.NumFields() != 2 {
return a.error(errBadBetweenPredicate, fvar, f.Name(), ftag, "", "")
}
var lower, upper RangeBound
for i := 0; i < 2; i++ {
f := fvar // for access to the parent "between" struct
fvar := ns.Struct.Field(i)
ftag := ns.Struct.Tag(i)
tag := tagutil.New(ftag)
if fvar.Name() == "_" && typesutil.IsDirective("Column", fvar.Type()) {
cid, ecode, eval := parseColIdent(a, tag.First("sql"))
if ecode > 0 {
return a.error(ecode, fvar, f.Name(), ftag, tag.First("sql"), eval)
}
item := new(BetweenColumnDirective)
item.ColIdent = cid
if v := tolower(tag.Second("sql")); v == "x" || v == "lower" {
lower = item
} else if v == "y" || v == "upper" {
upper = item
}
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
} else if isAccessible(a, fvar, ns.Named) {
item := new(BetweenStructField)
item.Name = fvar.Name()
item.Type, _ = analyzeTypeInfo(a, fvar.Type())
if v := tolower(tag.First("sql")); v == "x" || v == "lower" {
lower = item
} else if v == "y" || v == "upper" {
upper = item
}
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
}
}
if lower == nil || upper == nil {
return a.error(errBadBetweenPredicate, fvar, f.Name(), ftag, "", "")
}
cid, ecode, eval := parseColIdent(a, lhs)
if ecode > 0 {
return a.error(ecode, fvar, f.Name(), ftag, sqltag, eval)
}
item := new(WhereBetweenStruct)
item.FieldName = fvar.Name()
item.ColIdent = cid
item.Predicate = stringToPredicate[op]
item.LowerBound = lower
item.UpperBound = upper
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
loop.items = append(loop.items, item)
continue
}
// Analyze field where item.
cid, ecode, eval := parseColIdent(a, lhs)
if ecode > 0 {
return a.error(ecode, fvar, f.Name(), ftag, lhs, eval)
}
// If no predicate was provided default to "="
if len(op) == 0 {
op = "="
}
item := new(WhereStructField)
item.Name = fvar.Name()
item.Type, _ = analyzeTypeInfo(a, fvar.Type())
item.ColIdent = cid
item.Predicate = stringToPredicate[op]
item.Quantifier = stringToQuantifier[op2]
item.FuncName = parseFuncName(tag["sql"][1:])
if item.Predicate.IsUnary() {
return a.error(errIllegalUnaryPredicate, fvar, f.Name(), ftag, sqltag, op)
} else if item.Quantifier > 0 && !item.Predicate.CanQuantify() {
return a.error(errIllegalPredicateQuantifier, fvar, f.Name(), ftag, sqltag, op2)
} else if item.Quantifier > 0 && !item.Type.IsSequence() {
return a.error(errIllegalFieldQuantifier, fvar, f.Name(), ftag, sqltag, op2)
} else if item.Predicate.IsArray() && !item.Type.IsSequence() {
return a.error(errIllegalListPredicate, fvar, f.Name(), ftag, sqltag, op)
}
a.info.FieldMap[item] = FieldVar{Var: fvar, Tag: ftag}
loop.items = append(loop.items, item)
}
if loop.where != nil {
loop.where.Items = loop.items
}
stack = stack[:len(stack)-1]
}
a.query.Where = new(WhereStruct)
a.query.Where.FieldName = f.Name()
a.query.Where.Items = root.items
a.info.FieldMap[a.query.Where] = FieldVar{Var: f, Tag: tag}
// XXX if a.info.TypeName == "DeleteWithUsingJoinBlock1Query" {
// XXX log.Printf("%#v\n", a.query.Where.Items)
// XXX }
return nil
}
////////////////////////////////////////////////////////////////////////////////
// Join Struct Analysis
//
// analyzeJoinStruct
func analyzeJoinStruct(a *analysis, f *types.Var, tag string) (err error) {
fname := tolower(f.Name())
if fname == "join" && !a.query.Kind.isSelect() {
return a.error(errIllegalQueryField, f, "", tag, "", "")
} else if fname == "from" && a.query.Kind != QueryKindUpdate {
return a.error(errIllegalQueryField, f, "", tag, "", "")
} else if fname == "using" && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
ns, err := typesutil.GetStruct(f)
if err != nil {
return a.error(errBadFieldTypeStruct, f, "", tag, "", "")
}
join := new(JoinStruct)
join.FieldName = f.Name()
for i := 0; i < ns.Struct.NumFields(); i++ {
ftag := ns.Struct.Tag(i)
fvar := ns.Struct.Field(i)
tag := tagutil.New(ftag)
sqltag := tag.First("sql")
if sqltag == "-" || sqltag == "" {
continue
}
// In a JoinStruct all fields are expected to be directives
// with the blank identifier as their name.
if fvar.Name() != "_" {
continue
}
switch dirName := typesutil.GetDirectiveName(fvar); tolower(dirName) {
case "relation":
if err := analyzeJoinStructRelationDirective(a, join, fvar, ftag); err != nil {
return err
}
case "leftjoin", "rightjoin", "fulljoin", "crossjoin", "innerjoin":
if err := analyzeJoinStructJoinDirective(a, join, dirName, fvar, ftag); err != nil {
return err
}
default:
return a.error(errIllegalStructDirective, fvar, f.Name(), ftag, "", "")
}
}
a.query.Join = join
a.info.FieldMap[a.query.Join] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeJoinStructRelationDirective
func analyzeJoinStructRelationDirective(a *analysis, j *JoinStruct, f *types.Var, ftag string) (err error) {
if kind := tolower(j.FieldName); kind != "from" && kind != "using" {
return a.error(errIllegalStructDirective, f, j.FieldName, ftag, "", "")
} else if j.Relation != nil {
return a.error(errConflictingRelationDirective, f, j.FieldName, ftag, "", "")
}
tag := tagutil.New(ftag)
rid, ecode := parseRelIdent(tag.First("sql"))
if ecode > 0 {
return a.error(ecode, f, j.FieldName, ftag, "", tag.First("sql"))
} else if ecode, errval := addToRelSpace(a, rid); ecode > 0 {
return a.error(ecode, f, j.FieldName, ftag, "", errval)
}
j.Relation = new(RelationDirective)
j.Relation.RelIdent = rid
a.info.FieldMap[j.Relation] = FieldVar{Var: f, Tag: ftag}
return nil
}
// analyzeJoinStructJoinDirective
func analyzeJoinStructJoinDirective(a *analysis, j *JoinStruct, dirName string, f *types.Var, ftag string) (err error) {
tag := tagutil.New(ftag)
rid, ecode := parseRelIdent(tag.First("sql"))
if ecode > 0 {
return a.error(ecode, f, j.FieldName, ftag, "", tag.First("sql"))
} else if ecode, errval := addToRelSpace(a, rid); ecode > 0 {
return a.error(ecode, f, j.FieldName, ftag, "", errval)
}
dir := new(JoinDirective)
dir.RelIdent = rid
dir.JoinType = stringToJoinType[dirName]
for _, val := range tag["sql"][1:] {
vals := strings.Split(val, ";")
for i, val := range vals {
// ✅ The left-hand side MUST be a valid column identifier.
// - If the right-hand side IS present, then:
// ✅ The right-hand side MUST be a valid column identifier or a literal.
// ✅ The op MUST be present and it MUST be a binary predicate.
// - If op2 IS present, then:
// ✅ The op MUST be quantifiable.
// ✅ The op2 MUST be a valid quantifier.
// - If op2 IS NOT present, then:
// - If the right-hand side IS NOT present, then:
// ✅ The op MUST be a valid unary_predicate
// ✅ The op2 MUST be empty
lhs, op, op2, rhs := parsePredicateExpr(val)
cid, ecode, eval := parseColIdent(a, lhs)
if ecode > 0 {
return a.error(ecode, f, j.FieldName, ftag, val, eval)
}
// NOTE(mkopriva): At the moment a join condition's left-hand-side
// column MUST always reference a column of the relation being joined,
// so to avoid confusion make sure that cid has either no qualifier or,
// if it has one, it matches the alias of the joined table.
//
// TODO(mkopriva): Remove this limitation and properly handle the
// operands regardless of which side they are positioned in.
if len(cid.Qualifier) > 0 && (len(rid.Alias) > 0 && rid.Alias != cid.Qualifier) ||
(len(rid.Alias) == 0 && rid.Name != cid.Qualifier) {
return a.error(errBadJoinConditionLHS, f, j.FieldName, ftag, val, lhs)
}
item := new(JoinConditionTagItem)
item.LHSColIdent = cid
item.Predicate = stringToPredicate[op]
item.Quantifier = stringToQuantifier[op2]
// binary expression?
if len(rhs) > 0 {
if cid, ecode, eval := parseColIdent(a, rhs); ecode > 0 {
if ecode != errBadColIdTagValue {
return a.error(ecode, f, j.FieldName, ftag, val, eval)
}
// assume literal expression
item.RHSLiteral = rhs
} else {
item.RHSColIdent = cid
}
if item.Predicate.IsUnary() {
return a.error(errIllegalUnaryPredicate, f, j.FieldName, ftag, val, op)
} else if item.Quantifier > 0 && !item.Predicate.CanQuantify() {
return a.error(errIllegalPredicateQuantifier, f, j.FieldName, ftag, val, op2)
}
} else { // unary expression?
// If no operator was provided, default to "istrue"
if len(op) == 0 {
item.Predicate = stringToPredicate["istrue"]
}
// TODO
if !item.Predicate.IsUnary() {
return a.error(errBadDirectiveBooleanExpr, f, j.FieldName, ftag, "", val)
} else if len(op2) > 0 {
return a.error(errIllegalPredicateQuantifier, f, j.FieldName, ftag, val, op2)
}
}
if len(dir.TagItems) > 0 && i == 0 {
dir.TagItems = append(dir.TagItems, &JoinBoolTagItem{BoolAnd})
} else if len(dir.TagItems) > 0 && i > 0 {
dir.TagItems = append(dir.TagItems, &JoinBoolTagItem{BoolOr})
}
dir.TagItems = append(dir.TagItems, item)
}
}
j.Directives = append(j.Directives, dir)
a.info.FieldMap[dir] = FieldVar{Var: f, Tag: ftag}
return nil
}
////////////////////////////////////////////////////////////////////////////////
// On Conflict Struct Analysis
//
// analyzeOnConflictStruct analyzes the given field as an "onconflict" struct.
// The structTag argument is used for error reporting.
//
// ✅ The kind of the target query MUST be "insert".
// ✅ The type of the given field MUST be a struct type.
// ✅ The struct type MUST contain exactly 1 "conflict_action" directive.
// ✅ The struct type MUST contain exactly 1 "conflict_target" directive, if it
//
// contains the gosql.Update "conflict_action" directive.
//
// ✅ The struct type MAY contain, at most, 1 "conflict_target" directive, if it
//
// contains the gosql.Ignore "conflict_action" directive.
func analyzeOnConflictStruct(a *analysis, f *types.Var, tag string) (err error) {
if a.query.Kind != QueryKindInsert {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
ns, err := typesutil.GetStruct(f)
if err != nil {
return a.error(errBadFieldTypeStruct, f, "", tag, "", "")
}
onConflict := new(OnConflictStruct)
onConflict.FieldName = f.Name()
for i := 0; i < ns.Struct.NumFields(); i++ {
fvar := ns.Struct.Field(i)
ftag := ns.Struct.Tag(i)
// In an OnConflictStruct all fields are expected to be directives
// with the blank identifier as their name.
if fvar.Name() != "_" {
continue
}
switch tolower(typesutil.GetDirectiveName(fvar)) {
case "column":
if err = analyzeOnConflictColumnDirective(a, onConflict, fvar, ftag); err != nil {
return err
}
case "index":
if err = analyzeOnConflictIndexDirective(a, onConflict, fvar, ftag); err != nil {
return err
}
case "constraint":
if err = analyzeOnConflictConstraintDirective(a, onConflict, fvar, ftag); err != nil {
return err
}
case "ignore":
if err = analyzeOnConflictIgnoreDirective(a, onConflict, fvar, ftag); err != nil {
return err
}
case "update":
if err = analyzeOnConflictUpdateDirective(a, onConflict, fvar, ftag); err != nil {
return err
}
default:
return a.error(errIllegalStructDirective, fvar, f.Name(), ftag, "", "")
}
}
if onConflict.Update != nil && (onConflict.Column == nil && onConflict.Index == nil && onConflict.Constraint == nil) {
return a.error(errMissingOnConflictTarget, f, "", tag, "", "")
}
a.query.OnConflict = onConflict
a.info.FieldMap[onConflict] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOnConflictColumnDirective analyzes the given field and its associated
// tag as a "gosql.Column" directive.
//
// ✅ The given OnConflictStruct MUST NOT have any other "conflict_target" fields set.
// ✅ The tag MUST contain a valid identifier.
func analyzeOnConflictColumnDirective(a *analysis, oc *OnConflictStruct, f *types.Var, tag string) (err error) {
if oc.Column != nil || oc.Index != nil || oc.Constraint != nil {
return a.error(errConflictingOnConfictTarget, f, oc.FieldName, tag, "", "")
}
slice := tagutil.New(tag)["sql"]
ids, ecode, eval := parseColIdents(a, slice)
if ecode > 0 {
return a.error(ecode, f, oc.FieldName, tag, "", eval)
}
oc.Column = new(ColumnDirective)
oc.Column.ColIdents = ids
a.info.FieldMap[oc.Column] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOnConflictIndexDirective analyzes the given field and its associated
// tag as a "gosql.Index" directive.
//
// ✅ The given OnConflictStruct MUST NOT have any other "conflict_target" fields set.
// ✅ The tag MUST contain a valid identifier.
func analyzeOnConflictIndexDirective(a *analysis, oc *OnConflictStruct, f *types.Var, tag string) (err error) {
if oc.Column != nil || oc.Index != nil || oc.Constraint != nil {
return a.error(errConflictingOnConfictTarget, f, oc.FieldName, tag, "", "")
}
name := tagutil.New(tag).First("sql")
if !rxIdent.MatchString(name) {
return a.error(errBadIdentTagValue, f, oc.FieldName, tag, "", "")
}
oc.Index = new(IndexDirective)
oc.Index.Name = name
a.info.FieldMap[oc.Index] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOnConflictConstraintDirective analyzes the given field and its associated
// tag as a "gosql.Constraint" directive.
//
// ✅ The given OnConflictStruct MUST NOT have any other "conflict_target" fields set.
// ✅ The tag MUST contain a valid identifier.
func analyzeOnConflictConstraintDirective(a *analysis, oc *OnConflictStruct, f *types.Var, tag string) (err error) {
if oc.Column != nil || oc.Index != nil || oc.Constraint != nil {
return a.error(errConflictingOnConfictTarget, f, oc.FieldName, tag, "", "")
}
name := tagutil.New(tag).First("sql")
if !rxIdent.MatchString(name) {
return a.error(errBadIdentTagValue, f, oc.FieldName, tag, "", "")
}
oc.Constraint = new(ConstraintDirective)
oc.Constraint.Name = name
a.info.FieldMap[oc.Constraint] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOnConflictIgnoreDirective analyzes the given field as a "gosql.Ignore" directive.
//
// ✅ The given OnConflictStruct MUST NOT have any other "conflict_action" fields set.
func analyzeOnConflictIgnoreDirective(a *analysis, oc *OnConflictStruct, f *types.Var, tag string) (err error) {
if oc.Ignore != nil || oc.Update != nil {
return a.error(errConflictingOnConfictAction, f, oc.FieldName, tag, "", "")
}
oc.Ignore = new(IgnoreDirective)
a.info.FieldMap[oc.Ignore] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOnConflictUpdateDirective analyzes the given field and its associated
// tag as a "gosql.Update" directive.
//
// ✅ The given OnConflictStruct MUST NOT have any other "conflict_action" fields set.
func analyzeOnConflictUpdateDirective(a *analysis, oc *OnConflictStruct, f *types.Var, tag string) (err error) {
if oc.Ignore != nil || oc.Update != nil {
return a.error(errConflictingOnConfictAction, f, oc.FieldName, tag, "", "")
}
slice := tagutil.New(tag)["sql"]
list, ecode, eval := parseColIdentList(a, slice)
if ecode > 0 {
return a.error(ecode, f, oc.FieldName, tag, "", eval)
}
oc.Update = new(UpdateDirective)
oc.Update.ColIdentList = list
a.info.FieldMap[oc.Update] = FieldVar{Var: f, Tag: tag}
return nil
}
////////////////////////////////////////////////////////////////////////////////
// Plain Field Analysis
//
// analyzeLimitFieldOrDirective analyzes the given field, which is expected to be either
// the gosql.Limit directive or a plain integer field. The tag argument, if not
// empty, is expected to hold a positive integer.
func analyzeLimitFieldOrDirective(a *analysis, f *types.Var, tag string) error {
if !a.query.Kind.isSelect() {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Limit != nil {
return a.error(errConflictingFieldOrDirective, f, "", tag, "", "")
}
val := tagutil.New(tag).First("sql")
limit := new(LimitField)
if name := f.Name(); name != "_" {
if !isIntegerType(f.Type()) {
return a.error(errBadFieldTypeInt, f, "", tag, "", "")
}
limit.Name = name
} else if len(val) == 0 {
return a.error(errMissingTagValue, f, "", tag, "", "")
}
if len(val) > 0 {
u64, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return a.error(errBadUIntegerTagValue, f, "", tag, "", val)
}
limit.Value = u64
}
a.query.Limit = limit
a.info.FieldMap[a.query.Limit] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOffsetFieldOrDirective analyzes the given field, which is expected to be either
// the gosql.Offset directive or a plain integer field. The tag argument,
// if not empty, is expected to hold a positive integer.
func analyzeOffsetFieldOrDirective(a *analysis, f *types.Var, tag string) error {
if !a.query.Kind.isSelect() {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Offset != nil {
return a.error(errConflictingFieldOrDirective, f, "", tag, "", "")
}
val := tagutil.New(tag).First("sql")
offset := new(OffsetField)
if name := f.Name(); name != "_" {
if !isIntegerType(f.Type()) {
return a.error(errBadFieldTypeInt, f, "", tag, "", "")
}
offset.Name = name
} else if len(val) == 0 {
return a.error(errMissingTagValue, f, "", tag, "", "")
}
if len(val) > 0 {
u64, err := strconv.ParseUint(val, 10, 64)
if err != nil {
return a.error(errBadUIntegerTagValue, f, "", tag, "", val)
}
offset.Value = u64
}
a.query.Offset = offset
a.info.FieldMap[a.query.Offset] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeErrorHandlerField(a *analysis, f *types.Var, tag string, isInfo bool) error {
if a.query.ErrorHandler != nil {
return a.error(errConflictingFieldOrDirective, f, "", tag, "", "")
}
a.query.ErrorHandler = new(ErrorHandlerField)
a.query.ErrorHandler.Name = f.Name()
a.query.ErrorHandler.IsInfo = isInfo
a.info.FieldMap[a.query.ErrorHandler] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeContextField(a *analysis, f *types.Var, tag string) error {
if a.query.Context != nil {
return a.error(errConflictingFieldOrDirective, f, "", tag, "", "")
}
a.query.Context = new(ContextField)
a.query.Context.Name = f.Name()
a.info.FieldMap[a.query.Context] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeFilterField(a *analysis, f *types.Var, tag string) error {
if !a.query.Kind.isSelect() && a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Kind == QueryKindUpdate && a.query.Rel.Type.IsSlice {
return a.error(errIllegalSliceUpdateModifier, f, "", tag, "", "")
}
if a.query.All != nil || a.query.Where != nil || a.query.Filter != nil {
return a.error(errConflictingWhere, f, "", tag, "", "")
}
a.query.Filter = new(FilterField)
a.query.Filter.Name = f.Name()
a.info.FieldMap[a.query.Filter] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeFilterConstructorField(a *analysis, f *types.Var, tag string) error {
if a.filter.FilterConstructor != nil {
return a.error(errConflictingFilterConstructor, f, "", tag, "", "")
}
a.filter.FilterConstructor = new(FilterConstructorField)
a.filter.FilterConstructor.Name = f.Name()
a.info.FieldMap[a.filter.FilterConstructor] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeResultField(a *analysis, f *types.Var, tag string) error {
if a.query.Kind != QueryKindInsert && a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Return != nil || a.query.Result != nil || a.query.RowsAffected != nil {
return a.error(errConflictingResultTarget, f, "", tag, "", "")
}
a.query.Result = new(ResultField)
a.query.Result.FieldName = f.Name()
if err := analyzeRelType(a, &a.query.Result.Type, f); err != nil {
return err
}
a.info.FieldMap[a.query.Result] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeRowsAffectedField(a *analysis, f *types.Var, tag string) error {
if a.query.Kind != QueryKindInsert && a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Return != nil || a.query.Result != nil || a.query.RowsAffected != nil {
return a.error(errConflictingResultTarget, f, "", tag, "", "")
}
ftyp := f.Type()
if !isIntegerType(ftyp) {
return a.error(errBadFieldTypeInt, f, "", tag, "", "")
}
a.query.RowsAffected = new(RowsAffectedField)
a.query.RowsAffected.Name = f.Name()
a.query.RowsAffected.TypeKind = analyzeTypeKind(ftyp)
a.info.FieldMap[a.query.RowsAffected] = FieldVar{Var: f, Tag: tag}
return nil
}
////////////////////////////////////////////////////////////////////////////////
// Directive Fields Analysis
//
// analyzeOrderByDirective
func analyzeOrderByDirective(a *analysis, f *types.Var, tag string) (err error) {
if !a.query.Kind.isSelect() {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
tags := tagutil.New(tag)["sql"]
if len(tags) == 0 {
return a.error(errMissingTagColumnList, f, "", tag, "", "")
}
var items []OrderByTagItem
for _, val := range tags {
val = strings.TrimSpace(val)
if len(val) == 0 {
continue
}
var item OrderByTagItem
if val[0] == '-' {
item.Direction = OrderDesc
val = val[1:]
}
if i := strings.Index(val, ":"); i > -1 {
if val[i+1:] == "nullsfirst" {
item.Nulls = NullsFirst
} else if val[i+1:] == "nullslast" {
item.Nulls = NullsLast
} else {
return a.error(errBadNullsOrderTagValue, f, "", val, "", val[i+1:])
}
val = val[:i]
}
cid, ecode, eval := parseColIdent(a, val)
if ecode > 0 {
return a.error(ecode, f, "", tag, val, eval)
}
item.ColIdent = cid
items = append(items, item)
}
a.query.OrderBy = new(OrderByDirective)
a.query.OrderBy.Items = items
a.info.FieldMap[a.query.OrderBy] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeOverrideDirective
func analyzeOverrideDirective(a *analysis, f *types.Var, tag string) (err error) {
if a.query.Kind != QueryKindInsert {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
var kind OverridingKind
switch val := tolower(tagutil.New(tag).First("sql")); val {
case "system":
kind = OverridingSystem
case "user":
kind = OverridingUser
default:
return a.error(errBadOverrideTagValue, f, "", tag, "", val)
}
a.query.Override = new(OverrideDirective)
a.query.Override.Kind = kind
a.info.FieldMap[a.query.Override] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeReturnDirective(a *analysis, f *types.Var, tag string) error {
if len(a.query.Rel.Type.Fields) == 0 {
return a.error(errMissingRelField, f, "", tag, "", "") // TODO test
}
if a.query.Kind != QueryKindInsert && a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Return != nil || a.query.Result != nil || a.query.RowsAffected != nil {
return a.error(errConflictingResultTarget, f, "", tag, "", "")
}
t := tagutil.New(tag)
list, ecode, eval := parseColIdentList(a, t["sql"])
if ecode > 0 {
return a.error(ecode, f, "", tag, t.Get("sql"), eval)
}
// Make sure that the column ids have a matching field.
for _, id := range list.Items {
if !a.query.Rel.Type.HasFieldWithColumn(id.Name) {
return a.error(errColumnFieldUnknown, f, "", tag, t.Get("sql"), id.String())
}
}
a.query.Return = new(ReturnDirective)
a.query.Return.ColIdentList = list
a.info.FieldMap[a.query.Return] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeAllDirective(a *analysis, f *types.Var, tag string) error {
if a.query.Kind != QueryKindUpdate && a.query.Kind != QueryKindDelete {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
if a.query.Kind == QueryKindUpdate && a.query.Rel.Type.IsSlice {
return a.error(errIllegalSliceUpdateModifier, f, "", tag, "", "")
}
if a.query.All != nil || a.query.Where != nil || a.query.Filter != nil {
return a.error(errConflictingWhere, f, "", tag, "", "")
}
a.query.All = new(AllDirective)
a.info.FieldMap[a.query.All] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeDefaultDirective(a *analysis, f *types.Var, tag string) error {
if a.query.Kind != QueryKindInsert && a.query.Kind != QueryKindUpdate {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
t := tagutil.New(tag)
list, ecode, eval := parseColIdentList(a, t["sql"])
if ecode > 0 {
return a.error(ecode, f, "", tag, t.Get("sql"), eval)
}
a.query.Default = new(DefaultDirective)
a.query.Default.ColIdentList = list
a.info.FieldMap[a.query.Default] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeForceDirective(a *analysis, f *types.Var, tag string) error {
if a.query.Kind != QueryKindInsert && a.query.Kind != QueryKindUpdate {
// TODO test
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
t := tagutil.New(tag)
list, ecode, eval := parseColIdentList(a, t["sql"])
if ecode > 0 {
// TODO test
return a.error(ecode, f, "", tag, t.Get("sql"), eval)
}
// Make sure that the column ids have a matching field.
for _, id := range list.Items {
if !a.query.Rel.Type.HasFieldWithColumn(id.Name) {
// TODO test
return a.error(errColumnFieldUnknown, f, "", tag, t.Get("sql"), id.String())
}
}
a.query.Force = new(ForceDirective)
a.query.Force.ColIdentList = list
a.info.FieldMap[a.query.Force] = FieldVar{Var: f, Tag: tag}
return nil
}
func analyzeOptionalDirective(a *analysis, f *types.Var, tag string) error {
if !a.query.Kind.isSelect() {
return a.error(errIllegalQueryField, f, "", tag, "", "")
}
t := tagutil.New(tag)
list, ecode, eval := parseColIdentList(a, t["sql"])
if ecode > 0 {
return a.error(ecode, f, "", tag, t.Get("sql"), eval)
}
// Make sure that the column ids have a matching field.
for _, id := range list.Items {
if !a.query.Rel.Type.HasFieldWithColumn(id.Name) {
return a.error(errColumnFieldUnknown, f, "", tag, t.Get("sql"), id.String())
}
}
a.query.Optional = new(OptionalDirective)
a.query.Optional.ColIdentList = list
a.info.FieldMap[a.query.Optional] = FieldVar{Var: f, Tag: tag}
return nil
}
// analyzeTextSearchDirective analyzes the given field and its tag as the gosql.TextSearch
// directive and sets the result to the given analysis' filter.
func analyzeTextSearchDirective(a *analysis, f *types.Var, tag string) error {
tval := tagutil.New(tag).First("sql")
tval = strings.ToLower(strings.TrimSpace(tval))
cid, ecode, eval := parseColIdent(a, tval)
if ecode > 0 {
return a.error(ecode, f, "", tag, tval, eval)
}
a.filter.TextSearch = new(TextSearchDirective)
a.filter.TextSearch.ColIdent = cid
a.info.FieldMap[a.filter.TextSearch] = FieldVar{Var: f, Tag: tag}
return nil
}
func addToRelSpace(a *analysis, id RelIdent) (ecode errorCode, errval string) {
if a.info.RelSpace == nil {
a.info.RelSpace = make(map[string]RelIdent)
}
if len(id.Alias) > 0 {
if _, ok := a.info.RelSpace[id.Alias]; ok {
return errConflictingRelAlias, id.Alias
}
a.info.RelSpace[id.Alias] = id
return 0, ""
}
if _, ok := a.info.RelSpace[id.Name]; ok {
return errConflictingRelName, id.Name
}
a.info.RelSpace[id.Name] = id
return 0, ""
}
////////////////////////////////////////////////////////////////////////////////
// Misc. Analysis
//
// analyzeTypeKind returns the TypeKind for the given types.Type.
func analyzeTypeKind(typ types.Type) TypeKind {
switch x := typ.(type) {
case *types.Basic:
return typesBasicKindToTypeKind[x.Kind()]
case *types.Array:
return TypeKindArray
case *types.Chan:
return TypeKindChan
case *types.Signature:
return TypeKindFunc
case *types.Interface:
return TypeKindInterface
case *types.Map:
return TypeKindMap
case *types.Pointer:
return TypeKindPtr
case *types.Slice:
return TypeKindSlice
case *types.Struct:
return TypeKindStruct
case *types.Named:
return analyzeTypeKind(x.Underlying())
}
return 0 // unsupported / unknown
}
////////////////////////////////////////////////////////////////////////////////
// Parsers
//
// parsePredicateExpr parses the given string as a predicate expression and
// returns the individual elements of that expression. The expected format is:
// { column [ predicate-type [ quantifier ] { column | literal } ] }
func parsePredicateExpr(expr string) (lhs, cop, qua, rhs string) {
expr = strings.TrimSpace(expr)
for i := range expr {
switch expr[i] {
case '=': // =
lhs, cop, rhs = expr[:i], expr[i:i+1], expr[i+1:]
case '!': // !=, !~, !~*
if len(expr[i:]) > 2 && (expr[i+1] == '~' && expr[i+2] == '*') {
lhs, cop, rhs = expr[:i], expr[i:i+3], expr[i+3:]
} else if len(expr[i:]) > 1 && (expr[i+1] == '=' || expr[i+1] == '~') {
lhs, cop, rhs = expr[:i], expr[i:i+2], expr[i+2:]
}
case '<': // <, <=, <>
if len(expr[i:]) > 1 && (expr[i+1] == '=' || expr[i+1] == '>') {
lhs, cop, rhs = expr[:i], expr[i:i+2], expr[i+2:]
} else {
lhs, cop, rhs = expr[:i], expr[i:i+1], expr[i+1:]
}
case '>': // >, >=
if len(expr[i:]) > 1 && expr[i+1] == '=' {
lhs, cop, rhs = expr[:i], expr[i:i+2], expr[i+2:]
} else {
lhs, cop, rhs = expr[:i], expr[i:i+1], expr[i+1:]
}
case '~': // ~, ~*
if len(expr[i:]) > 1 && expr[i+1] == '*' {
lhs, cop, rhs = expr[:i], expr[i:i+2], expr[i+2:]
} else {
lhs, cop, rhs = expr[:i], expr[i:i+1], expr[i+1:]
}
case ' ':
var (
j = i + 1
x = strings.ToLower(expr)
pred1 string // 1st part of predicate (not | is)
pred2 string // 2nd part of predicate (distinct | true | null | ...)
)
if n := len(x[j:]); n > 3 && x[j:j+3] == "not" {
pred1, pred2 = x[j:j+3], x[j+3:]
} else if n := len(x[j:]); n > 2 && x[j:j+2] == "is" {
pred1, pred2 = x[j:j+2], x[j+2:]
}
if len(pred2) > 0 {
for _, adj := range predicateAdjectives {
if pred2[0] != adj[0] {
continue
}
if n := len(adj); len(pred2) >= n && pred2[:n] == adj && (len(pred2) == n || pred2[n] == ' ') {
lhs = expr[:i]
cop = pred1 + pred2[:n]
rhs = expr[j+len(cop):]
break
}
}
}
if len(cop) == 0 {
continue
}
default:
continue
}
break // if "continue" wasn't executed, exit the loop
}
lhs = strings.TrimSpace(lhs)
cop = strings.TrimSpace(cop)
rhs = strings.TrimSpace(rhs)
if len(rhs) > 0 {
x := strings.ToLower(rhs)
switch x[0] {
case 'a': // ANY or ALL
n := len("any") // any and all have the same length so we test against both at the same time
if len(x) >= n && (x[:n] == "any" || x[:n] == "all") && (len(x) == n || x[n] == ' ') {
qua, rhs = x[:n], rhs[n:]
}
case 's': // SOME
n := len("some")
if len(x) >= n && x[:n] == "some" && (len(x) == n || x[n] == ' ') {
qua, rhs = x[:n], rhs[n:]
}
}
qua = strings.TrimSpace(qua)
rhs = strings.TrimSpace(rhs)
}
if len(lhs) == 0 {
return expr, "", "", "" // default
}
return lhs, cop, qua, rhs
}
// parseRelIdent parses the given string as a relation identifier and returns the result.
//
// ✅ The string MUST be in the expected format, which is: "[qualifier.]name[:alias]".
func parseRelIdent(val string) (id RelIdent, ecode errorCode) {
if !rxRelIdent.MatchString(val) {
return id, errBadRelIdTagValue
}
if i := strings.LastIndexByte(val, '.'); i > -1 {
id.Qualifier = val[:i]
val = val[i+1:]
}
if i := strings.LastIndexByte(val, ':'); i > -1 {
id.Alias = val[i+1:]
val = val[:i]
}
id.Name = val
return id, 0
}
// parseColIdent parses the given string as a column identifier and returns the result.
//
// ✅ The string MUST be in the expected format, which is: "[qualifier.]name".
func parseColIdent(a *analysis, val string) (id ColIdent, ecode errorCode, eval string) {
if !isColIdent(val) {
return id, errBadColIdTagValue, val
}
if i := strings.LastIndexByte(val, '.'); i > -1 {
id.Qualifier = val[:i]
if _, ok := a.info.RelSpace[id.Qualifier]; !ok {
return id, errUnknownColumnQualifier, id.Qualifier
}
val = val[i+1:]
}
id.Name = val
return id, 0, ""
}
// parseColIdents parses the individual strings in the given slice as
// column identifiers and returns the result as []ColIdent.
//
// ✅ The individual strings MUST be in the expected format, which is: "[qualifier.]name".
func parseColIdents(a *analysis, tag []string) (ids []ColIdent, ecode errorCode, eval string) {
if len(tag) == 0 {
return nil, errMissingTagColumnList, ""
}
ids = make([]ColIdent, len(tag))
for i, val := range tag {
id, ecode, eval := parseColIdent(a, val)
if ecode > 0 {
return nil, ecode, eval
}
ids[i] = id
}
return ids, 0, ""
}
// parseColIdentList parses the individual strings in the given slice as
// column identifiers and returns the result as ColIdentList.
//
// ✅ A slice of length=1 holding a "*" string value MAY be use instead of column ids.
// ✅ The individual strings MUST be in the expected format, which is: "[qualifier.]name".
func parseColIdentList(a *analysis, tag []string) (list ColIdentList, ecode errorCode, eval string) {
if len(tag) == 1 && tag[0] == "*" {
list.All = true
return list, 0, ""
}
items, ecode, eval := parseColIdents(a, tag)
if ecode > 0 {
return list, ecode, eval
}
list.Items = items
return list, 0, ""
}
// parseCoalesceInfo
func parseCoalesceInfo(tag tagutil.Tag) (use bool, val string) {
if sqltag := tag["sql"]; len(sqltag) > 0 {
for _, opt := range sqltag[1:] {
if strings.HasPrefix(opt, "coalesce") {
use = true
if match := rxCoalesce.FindStringSubmatch(opt); len(match) > 1 {
val = match[1]
}
break
}
}
}
return use, val
}
func parseFuncName(tagvals []string) FuncName {
for _, v := range tagvals {
if len(v) > 0 && v[0] == '@' {
return FuncName(strings.ToLower(v[1:]))
}
}
return ""
}
// parseFilterColumnKey
func parseFilterColumnKey(a *analysis, f *FieldInfo) error {
tag := a.cfg.FilterColumnKeyTag.Value
sep := a.cfg.FilterColumnKeySeparator.Value
base := a.cfg.FilterColumnKeyBase.Value
selector := f.Selector
fcktag := f.Tag["fck"]
if len(fcktag) == 0 {
for i, node := range selector {
if fcktag = node.Tag["fck"]; len(fcktag) > 0 {
selector = selector[i:]
break
}
}
}
// if present, use the fck tag to override the global config
if len(fcktag) > 0 {
for _, opt := range fcktag {
// TODO(mkopriva): add error reporting for invalid
// "fck" option keys and/or values.
var optKey, optVal string
if i := strings.IndexByte(opt, ':'); i > -1 {
optKey, optVal = opt[:i], opt[i+1:]
}
switch optKey {
case "tag":
tag = optVal
case "sep":
sep = optVal
case "base":
base, _ = strconv.ParseBool(optVal)
}
}
}
// use field tag
if len(tag) > 0 {
if !base {
f.FilterColumnKey = joinFieldTag(f, selector, tag, sep)
} else {
if key := f.Tag.First(tag); key != "-" {
f.FilterColumnKey = key
}
}
} else {
// use field name
if !base {
f.FilterColumnKey = joinFieldName(f, selector, sep)
} else {
f.FilterColumnKey = f.Name
}
}
return nil
}
// joinFieldName
func joinFieldName(f *FieldInfo, sel []*FieldSelectorNode, sep string) (key string) {
for _, node := range sel {
key += node.Name + sep
}
return key + f.Name
}
// joinFieldTag
func joinFieldTag(f *FieldInfo, sel []*FieldSelectorNode, tag, sep string) (key string) {
items := make([]string, 0)
for _, node := range sel {
k := node.Tag.First(tag)
if k == "-" {
return ""
}
if k == "" {
continue
}
items = append(items, k)
}
k := f.Tag.First(tag)
if k == "-" {
return ""
}
if k != "" {
items = append(items, k)
}
return strings.Join(items, sep)
}
////////////////////////////////////////////////////////////////////////////////
// Helper Methods & Functions
//
// containts reports whether or not the list contains the given column identifier.
func (cl *ColIdentList) Contains(cid ColIdent) bool {
if cl.All {
return true
}
for i := 0; i < len(cl.Items); i++ {
if cl.Items[i].Name == cid.Name {
return true
}
}
return false
}
// isImportedType reports whether or not the given type is imported based on
// on the package in which the target of the analysis is declared.
func isImportedType(a *analysis, named *types.Named) bool {
return named != nil && named.Obj().Pkg().Path() != a.pkgPath
}
// isAccessible reports whether or not the given value is accessible from
// the package in which the target of the analysis is declared.
func isAccessible(a *analysis, x exportable, named *types.Named) bool {
return x.Name() != "_" && (x.Exported() || !isImportedType(a, named))
}
// exportable is implemented by both types.Var and types.Func.
type exportable interface {
Name() string
Exported() bool
}
// isIntegerType reports whether or not the given type is one of the basic (un)signed integer types.
func isIntegerType(typ types.Type) bool {
basic, ok := typ.(*types.Basic)
if !ok {
return false
}
kind := basic.Kind()
return types.Int <= kind && kind <= types.Uint64
}
// isBoolType reports whether or not the given type is a boolean.
func isBoolType(typ types.Type) bool {
basic, ok := typ.(*types.Basic)
if !ok {
return false
}
return basic.Kind() == types.Bool
}
// isErrorHandler reports whether or not the given type implements the gosql.ErrorHandler interface.
func isErrorHandler(typ types.Type) bool {
named, ok := typ.(*types.Named)
if !ok {
return false
}
return typesutil.ImplementsErrorHandler(named)
}
// isErrorInfoHandler reports whether or not the given type implements the gosql.ErrorInfoHandler interface.
func isErrorInfoHandler(typ types.Type) bool {
named, ok := typ.(*types.Named)
if !ok {
return false
}
return typesutil.ImplementsErrorInfoHandler(named)
}
// isFilterType reports whether or not the given type is the gosql.Filter type.
func isFilterType(typ types.Type) bool {
named, ok := typ.(*types.Named)
if !ok {
return false
}
name := named.Obj().Name()
if name != "Filter" {
return false
}
path := named.Obj().Pkg().Path()
return strings.HasSuffix(path, "github.com/frk/gosql")
}
// isColIdent reports whether or not the given value is a valid column identifier.
func isColIdent(val string) bool {
return rxColIdent.MatchString(val) && !rxReserved.MatchString(val)
}
// tolower normalizes the given string by converting it to lower case and
// also trimming any extra white-space.
func tolower(s string) string {
return strings.ToLower(strings.TrimSpace(s))
}
////////////////////////////////////////////////////////////////////////////////
// TypeInfo Helper Methods
//
func (t *TypeInfo) GenericLiteral() LiteralType {
if t.Kind.IsBasic() {
if t.IsByte {
return "byte"
}
if t.IsRune {
return "rune"
}
return LiteralType(typeKinds[t.Kind])
}
return t.literal(false, true)
}
func (t *TypeInfo) Literal() LiteralType {
return t.literal(false, true)
}
func (t *TypeInfo) literal(pkgLocal, elidePtr bool) LiteralType {
if len(t.Name) > 0 {
if pkgLocal && len(t.PkgLocal) > 0 && t.PkgLocal != "." {
return LiteralType(t.PkgLocal + "." + t.Name)
} else if len(t.PkgName) > 0 {
return LiteralType(t.PkgName + "." + t.Name)
}
return LiteralType(t.Name)
}
switch t.Kind {
default: // assume builtin basic
return LiteralType(typeKinds[t.Kind])
case TypeKindArray:
return LiteralType("["+strconv.FormatInt(t.ArrayLen, 10)+"]") + t.Elem.literal(pkgLocal, false)
case TypeKindSlice:
return "[]" + t.Elem.literal(pkgLocal, false)
case TypeKindMap:
return LiteralType("map["+t.Key.literal(pkgLocal, false)+"]") + t.Elem.literal(pkgLocal, false)
case TypeKindPtr:
if elidePtr {
return t.Elem.literal(pkgLocal, false)
} else {
return "*" + t.Elem.literal(pkgLocal, false)
}
return "*" + t.Elem.literal(pkgLocal, false)
case TypeKindUint8:
if t.IsByte {
return "byte"
}
return "uint8"
case TypeKindInt32:
if t.IsRune {
return "rune"
}
return "int32"
case TypeKindInterface:
if t.IsEmptyInterface {
return "interface{}"
}
return "<unsupported>"
case TypeKindStruct, TypeKindChan, TypeKindFunc:
return "<unsupported>"
}
return "<unknown>"
}
// Is reports whether or not t represents a type whose kind matches one of
// the provided TypeKinds or a pointer to one of the provided TypeKinds.
func (t *TypeInfo) Is(kk ...TypeKind) bool {
for _, k := range kk {
if t.Kind == k || (t.Kind == TypeKindPtr && t.Elem.Kind == k) {
return true
}
}
return false
}
// IsSliceKind reports whether or not t represents a slice type whose elem type
// is one of the provided TypeKinds.
func (t *TypeInfo) IsSliceKind(kk ...TypeKind) bool {
if t.Kind == TypeKindSlice {
for _, k := range kk {
if t.Elem.Kind == k {
return true
}
}
}
return false
}
// IsArray helper reports whether or not the type is of the array kind.
func (t *TypeInfo) IsArray() bool {
return t.Kind == TypeKindArray
}
// IsSlice helper reports whether or not the type is of the slice kind.
func (t *TypeInfo) IsSlice() bool {
return t.Kind == TypeKindSlice
}
// IsSequence helper reports whether or not the type is of the slice or array kind.
func (t *TypeInfo) IsSequence() bool {
return t.IsSlice() || t.IsArray()
}
// isNilable reports whether or not t represents a type that can be nil.
func (t *TypeInfo) IsNilable() bool {
return t.Is(TypeKindPtr, TypeKindSlice, TypeKindArray, TypeKindMap, TypeKindInterface)
}
// Indicates whether or not the MarshalJSON method can be called on the type.
func (t *TypeInfo) ImplementsJSONMarshaler() bool {
return t.IsJSONMarshaler || (t.Kind == TypeKindPtr && t.Elem.IsJSONMarshaler)
}
// Indicates whether or not the UnmarshalJSON method can be called on the type.
func (t *TypeInfo) ImplementsJSONUnmarshaler() bool {
return t.IsJSONUnmarshaler || (t.Kind == TypeKindPtr && t.Elem.IsJSONUnmarshaler)
}
// Indicates whether or not the MarshalXML method can be called on the type.
func (t *TypeInfo) ImplementsXMLMarshaler() bool {
return t.IsXMLMarshaler || (t.Kind == TypeKindPtr && t.Elem.IsXMLMarshaler)
}
// Indicates whether or not the UnmarshalXML method can be called on the type.
func (t *TypeInfo) ImplementsXMLUnmarshaler() bool {
return t.IsXMLUnmarshaler || (t.Kind == TypeKindPtr && t.Elem.IsXMLUnmarshaler)
}
// Indicates whether or not an instance of the type's Kind is illegal to be used with encoding/json.
func (t *TypeInfo) IsJSONIllegal() bool {
return t.Is(TypeKindChan, TypeKindFunc, TypeKindComplex64, TypeKindComplex128)
}
// Indicates whether or not an instance of the type's Kind is illegal to be used with encoding/xml.
func (t *TypeInfo) IsXMLIllegal() bool {
return t.Is(TypeKindChan, TypeKindFunc, TypeKindMap)
}
// //////////////////////////////////////////////////////////////////////////////
// Cache
var relTypeCache = struct {
sync.RWMutex
m map[string]*RelType
}{m: make(map[string]*RelType)}
|
package repository
import (
"HumoAcademy/models"
"fmt"
"github.com/jmoiron/sqlx"
)
type MainPagePostgres struct {
db *sqlx.DB
}
func NewMainPagePostgres(db *sqlx.DB) *MainPagePostgres {
return &MainPagePostgres{db: db}
}
func (r *MainPagePostgres) GetAll() (models.MainPageContent, error) {
var Content models.MainPageContent
queryNews := fmt.Sprintf("SELECT id, title, short_desc, img FROM news WHERE status=true")
err := r.db.Select(&Content.News, queryNews)
if err != nil {
return models.MainPageContent{}, err
}
queryCourses := fmt.Sprintf("SELECT id, title, course_durance, img FROM courses WHERE status=true")
err = r.db.Select(&Content.Courses, queryCourses)
if err != nil {
return models.MainPageContent{}, err
}
return Content, nil
}
func (r *MainPagePostgres) AddUserForNews (user models.SubscribedUsers) error {
query := fmt.Sprintf("INSERT INTO subscribed_users (email) VALUES($1)")
_, err := r.db.Exec(query, user.Email)
if err != nil {
return err
}
return nil
} |
package main
import "fmt"
/*
+ slice tham chiếu đến 1 mảng, mô tả 1 phần hoặc toàn bộ mảng
+ slice có kích thước động nên ko phải khai báo size khi khởi tạo
*/
func main() {
// khai báo silce
var slice []int
fmt.Println(slice)
//khai bao va khoi tao
var slice1 = []int{1, 2, 3, 4}
fmt.Println(slice1)
// tham chiếu đến mảng
var array = [4]int{1, 2, 3, 4}
slice2 := array[1:3] // gán vào slice2 từ array[1] -> array[3-1]
fmt.Println(slice2)
/*
slice2 := array[:] // lấy toàn bộ array gán vào slice2
slice2 := array[1:] // lấy từ array[1] đến hết
*/
//slice tham chiếu đến mảng, nên khi đó, các thay đổi trong slice sẽ thay đổi array đc tham chiếu
/*
var array = [4]int {1,2,3,4}
slice2 := array[:]
slice2[0] = 55
fmt.Println(slice2) // [55, 2, 3, 4]
fmt.Println(array) // [55, 2, 3, 4]
*/
/*
phân biệt len và cap trong slice
+ len là số phần tử trong slice
fmt.Println(len(slice2)) // 4
+ cap (capacity) : số lượng phần tử tính từ vị trí bắt đầu gán array vào slice đến cuối
nếu gọi start = index của phần tử đầu tiên gán vào slice
cap(slice) = len(array) - start
*/
// một số hàm lm việc với slice
// make : khởi tạo slice với len và cap cung cấp
//slice3 := make([]int, 2, 5)
// len = 2, cap = 5
//slice4 := make([]int, 2)
// len = 2, cap = 2
// append : thêm phần tử vào slice
var slice5 []int
slice5 = append(slice5, 11)
// copy (return số phần tử đc copy):
src := []string{"A", "B", "C", "D"}
dest := make([]string, 2)
number := copy(dest, src)
fmt.Println(number) // 2
fmt.Println(dest) // [A, B]
// nối 2 slice
//des = append(slice1[:], slice2[:])
}
|
package libp2pquic
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"sync"
ma "gx/ipfs/QmNTCey11oxhb1AxDnQBRHtdhap6Ctud872NjAYPYYXPuc/go-multiaddr"
ic "gx/ipfs/QmNiJiXwWE3kRhZrC5ej3kSjWHm337pYfhjLGSCDNKJP2s/go-libp2p-crypto"
peer "gx/ipfs/QmPJxxDsX2UbchSHobbYuvz7qnyJTFKvaKMzE2rZWJ4x5B/go-libp2p-peer"
quic "gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go"
tpt "gx/ipfs/QmUDtgnEr7FFrtK2LQM2dFzTNWghnrApBDcU3iHEJz8eQS/go-libp2p-transport"
"gx/ipfs/QmYW5GSNZboYmtWWhzYr4yS3WDAuettAMh3iDLn5nGvwtc/mafmt"
manet "gx/ipfs/QmZcLBXKaFe8ND5YHPkJRAwmhJGrVsi1JqDZNyJ4nRK5Mj/go-multiaddr-net"
)
var quicConfig = &quic.Config{
Versions: []quic.VersionNumber{quic.VersionMilestone0_10_0},
MaxIncomingStreams: 1000,
MaxIncomingUniStreams: -1, // disable unidirectional streams
MaxReceiveStreamFlowControlWindow: 3 * (1 << 20), // 3 MB
MaxReceiveConnectionFlowControlWindow: 4.5 * (1 << 20), // 4.5 MB
AcceptCookie: func(clientAddr net.Addr, cookie *quic.Cookie) bool {
// TODO(#6): require source address validation when under load
return true
},
KeepAlive: true,
}
type connManager struct {
connIPv4Once sync.Once
connIPv4 net.PacketConn
connIPv6Once sync.Once
connIPv6 net.PacketConn
}
func (c *connManager) GetConnForAddr(network string) (net.PacketConn, error) {
switch network {
case "udp4":
var err error
c.connIPv4Once.Do(func() {
c.connIPv4, err = c.createConn(network, "0.0.0.0:0")
})
return c.connIPv4, err
case "udp6":
var err error
c.connIPv6Once.Do(func() {
c.connIPv6, err = c.createConn(network, ":0")
})
return c.connIPv6, err
default:
return nil, fmt.Errorf("unsupported network: %s", network)
}
}
func (c *connManager) createConn(network, host string) (net.PacketConn, error) {
addr, err := net.ResolveUDPAddr(network, host)
if err != nil {
return nil, err
}
return net.ListenUDP(network, addr)
}
// The Transport implements the tpt.Transport interface for QUIC connections.
type transport struct {
privKey ic.PrivKey
localPeer peer.ID
tlsConf *tls.Config
connManager *connManager
}
var _ tpt.Transport = &transport{}
// NewTransport creates a new QUIC transport
func NewTransport(key ic.PrivKey) (tpt.Transport, error) {
localPeer, err := peer.IDFromPrivateKey(key)
if err != nil {
return nil, err
}
tlsConf, err := generateConfig(key)
if err != nil {
return nil, err
}
return &transport{
privKey: key,
localPeer: localPeer,
tlsConf: tlsConf,
connManager: &connManager{},
}, nil
}
// Dial dials a new QUIC connection
func (t *transport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (tpt.Conn, error) {
network, host, err := manet.DialArgs(raddr)
if err != nil {
return nil, err
}
pconn, err := t.connManager.GetConnForAddr(network)
if err != nil {
return nil, err
}
addr, err := fromQuicMultiaddr(raddr)
if err != nil {
return nil, err
}
var remotePubKey ic.PubKey
tlsConf := t.tlsConf.Clone()
// We need to check the peer ID in the VerifyPeerCertificate callback.
// The tls.Config it is also used for listening, and we might also have concurrent dials.
// Clone it so we can check for the specific peer ID we're dialing here.
tlsConf.VerifyPeerCertificate = func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
chain := make([]*x509.Certificate, len(rawCerts))
for i := 0; i < len(rawCerts); i++ {
cert, err := x509.ParseCertificate(rawCerts[i])
if err != nil {
return err
}
chain[i] = cert
}
var err error
remotePubKey, err = getRemotePubKey(chain)
if err != nil {
return err
}
if !p.MatchesPublicKey(remotePubKey) {
return errors.New("peer IDs don't match")
}
return nil
}
sess, err := quic.DialContext(ctx, pconn, addr, host, tlsConf, quicConfig)
if err != nil {
return nil, err
}
localMultiaddr, err := toQuicMultiaddr(sess.LocalAddr())
if err != nil {
return nil, err
}
return &conn{
sess: sess,
transport: t,
privKey: t.privKey,
localPeer: t.localPeer,
localMultiaddr: localMultiaddr,
remotePubKey: remotePubKey,
remotePeerID: p,
remoteMultiaddr: raddr,
}, nil
}
// CanDial determines if we can dial to an address
func (t *transport) CanDial(addr ma.Multiaddr) bool {
return mafmt.QUIC.Matches(addr)
}
// Listen listens for new QUIC connections on the passed multiaddr.
func (t *transport) Listen(addr ma.Multiaddr) (tpt.Listener, error) {
return newListener(addr, t, t.localPeer, t.privKey, t.tlsConf)
}
// Proxy returns true if this transport proxies.
func (t *transport) Proxy() bool {
return false
}
// Protocols returns the set of protocols handled by this transport.
func (t *transport) Protocols() []int {
return []int{ma.P_QUIC}
}
func (t *transport) String() string {
return "QUIC"
}
|
package business
import (
"gitlab.wallstcn.com/matrix/xgbkb/types"
)
var createProductStmt = "CREATE (p:Product {name: $name, imgActivated: $imgActivated, imgNormal: $imgNormal}) RETURN p"
func CreateProduct(productIn *types.ProductIn) (interface{}, error) {
paramsMap := make(map[string]interface{})
paramsMap["name"] = productIn.Name
paramsMap["imgActivated"] = productIn.ImgActivated
paramsMap["imgNormal"] = productIn.ImgNormal
return Neo4jSingleQuery(createProductStmt, paramsMap, false)
}
var updateProductStmt = `
MATCH (p:Product {name: $oldName}) WHERE id(p) = $id
SET p.name = $newName, p.imgActivated = $imgActivated, p.imgNormal = $imgNormal
RETURN p
`
func UpdateProduct(id int64, oldName string, productIn *types.ProductIn) (interface{}, error) {
paramsMap := make(map[string]interface{})
paramsMap["id"] = id
paramsMap["oldName"] = oldName
paramsMap["newName"] = productIn.Name
paramsMap["imgActivated"] = productIn.ImgActivated
paramsMap["imgNormal"] = productIn.ImgNormal
return Neo4jSingleQuery(updateProductStmt, paramsMap, false)
}
func IsValidProductName(name string) bool {
return true
}
|
package main
import "fmt"
// 接口,定义一个能叫的类型
type speaker interface {
speak() //只要实现了speak方法的变量都是speaker类型
}
type cat struct{}
type dog struct{}
type person struct{}
func (c cat) speak() {
fmt.Printf("喵喵喵\n")
}
func (d dog) speak() {
fmt.Printf("汪汪汪\n")
}
func (p person) speak() {
fmt.Printf("啊啊啊\n")
}
func da(x speaker) {
x.speak()
}
func main() {
var (
c1 cat
d1 dog
p1 person
)
da(c1)
da(d1)
da(p1)
var ss speaker
ss = c1
ss = d1
ss = p1
fmt.Println(ss)
}
|
/*
Copyright © 2020 Denis Rendler <connect@rendler.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"github.com/spf13/cobra"
"github.com/koderhut/safenotes/internal/utilities/logs"
"github.com/koderhut/safenotes/webapp/contracts"
)
// statsCmd represents the stats command
var statsCmd = &cobra.Command{
Use: "stats",
Short: "Retrieve the webservice stats",
Long: `Retrieve the stats available from the `,
Run: func(cmd *cobra.Command, args []string) {
statsUrl := url.URL{
Scheme: "http",
Host: fmt.Sprintf("%s:%s","localhost", cfg.Server.Port),
Path: "/stats",
User: url.UserPassword(cfg.Server.Auth.User, cfg.Server.Auth.Pass),
}
req, err := http.NewRequest(http.MethodGet, statsUrl.String(), nil)
if err != nil {
logs.Writer.Critical(err.Error())
}
client := http.Client{}
resp, err := client.Do(req)
if err != nil {
logs.Writer.Critical(err.Error())
}
var result contracts.StatsMessage
json.NewDecoder(resp.Body).Decode(&result)
logs.Writer.Info(fmt.Sprintf("Stats for safenotes service:\n\nCurrent Number of stored notes: %d\nTotal number of stored notes: %d\n", result.StoredNotes, result.TotalNotes))
},
}
func init() {
rootCmd.AddCommand(statsCmd)
}
|
package main
import (
"encoding/json"
"fmt"
"os"
"database/sql"
_ "github.com/mattn/go-sqlite3"
"log"
"net/http"
"goji.io"
"goji.io/pat"
"golang.org/x/net/context"
"strings"
)
var db *sql.DB
func getSongInfoBySongName(ctx context.Context, w http.ResponseWriter, r *http.Request) {
//connecting to database
db, err := sql.Open("sqlite3", "./jrdd.db")
if err != nil {
fmt.Println("error connecting database")
log.Fatal(err)
} else {
fmt.Println("database connection established")
defer db.Close()
}
songName := pat.Param(ctx, "songName")
//get song by song
rows, err := db.Query("select songs.song, songs.artist, genres.name, CAST(count(songs.length) as INTEGER) from songs join genres on songs.genre = genres.ID where songs.song = ?", songName)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var SongName_ string
var SongArtist_ string
var SongGenreName_ string
var SongLength_ int
if err := rows.Scan(&SongName_, &SongArtist_, &SongGenreName_, &SongLength_); err != nil {
log.Fatal(err)
}else{
type SongInfo struct {
Song string
Artist string
Genre string
Length int
}
group := SongInfo{
Song: SongName_,
Artist: SongArtist_,
Genre: SongGenreName_,
Length: SongLength_,
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
fmt.Fprintf(w, "%s", b)
//fmt.Printf("%s --> %s %s %s %s\n", songName, SongName_, SongArtist_, SongGenreName_, SongLength_)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func getSongInfoBySongArtist(ctx context.Context, w http.ResponseWriter, r *http.Request) {
//connecting to database
db, err := sql.Open("sqlite3", "./jrdd.db")
if err != nil {
fmt.Println("error connecting database")
log.Fatal(err)
} else {
fmt.Println("database connection established")
defer db.Close()
}
songArtist := pat.Param(ctx, "songArtist")
//get song by song
rows, err := db.Query("select songs.song, songs.artist, genres.name, CAST(count(songs.length) as INTEGER) from songs join genres on songs.genre = genres.ID where songs.artist = ?", songArtist)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var SongName_ string
var SongArtist_ string
var SongGenreName_ string
var SongLength_ int
if err := rows.Scan(&SongName_, &SongArtist_, &SongGenreName_, &SongLength_); err != nil {
log.Fatal(err)
}else{
type SongInfo struct {
Song string
Artist string
Genre string
Length int
}
group := SongInfo{
Song: SongName_,
Artist: SongArtist_,
Genre: SongGenreName_,
Length: SongLength_,
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
fmt.Fprintf(w, "%s", b)
//fmt.Printf("%s --> %s %s %s %s\n", songArtist, SongName_, SongArtist_, SongGenreName_, SongLength_)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func getSongInfoBySongGenre(ctx context.Context, w http.ResponseWriter, r *http.Request) {
//connecting to database
db, err := sql.Open("sqlite3", "./jrdd.db")
if err != nil {
fmt.Println("error connecting database")
log.Fatal(err)
} else {
fmt.Println("database connection established")
defer db.Close()
}
songGenreName := pat.Param(ctx, "songGenreName")
//get song by song
rows, err := db.Query("select songs.song, songs.artist, genres.name, CAST(count(songs.length) as INTEGER) from songs join genres on songs.genre = genres.ID where genres.name = ?", songGenreName)
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var SongName_ string
var SongArtist_ string
var SongGenreName_ string
var SongLength_ int
if err := rows.Scan(&SongName_, &SongArtist_, &SongGenreName_, &SongLength_); err != nil {
log.Fatal(err)
}else{
type SongInfo struct {
Song string
Artist string
Genre string
Length int
}
group := SongInfo{
Song: SongName_,
Artist: SongArtist_,
Genre: SongGenreName_,
Length: SongLength_,
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
fmt.Fprintf(w, "%s", b)
//fmt.Printf("%s --> %s %s %s %s\n", songGenreName, SongName_, SongArtist_, SongGenreName_, SongLength_)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func getGenresInfo(ctx context.Context, w http.ResponseWriter, r *http.Request) {
//connecting to database
db, err := sql.Open("sqlite3", "./jrdd.db")
if err != nil {
fmt.Println("error connecting database")
log.Fatal(err)
} else {
fmt.Println("database connection established")
defer db.Close()
}
rows, err := db.Query("select genres.name, CAST(count(songs.song) as INTEGER) , CAST(sum(songs.length) as INTEGER) from songs join genres on songs.genre = genres.ID GROUP BY genres.ID")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var GenreName_ string
var TotalSongs_ int
var TotalLength_ int
if err := rows.Scan(&GenreName_, &TotalSongs_, &TotalLength_); err != nil {
log.Fatal(err)
}else{
type SongInfo struct {
Genre string
TotalSongs int
TotalLength int
}
group := SongInfo{
Genre: GenreName_,
TotalSongs: TotalSongs_,
TotalLength: TotalLength_,
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
fmt.Fprintf(w, "%s", b)
//fmt.Printf("%s --> %s %s %s %s\n", songGenreName, SongName_, SongArtist_, SongGenreName_, SongLength_)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func getSongByLengthRange(ctx context.Context, w http.ResponseWriter, r *http.Request) {
//connecting to database
db, err := sql.Open("sqlite3", "./jrdd.db")
if err != nil {
fmt.Println("error connecting database")
log.Fatal(err)
} else {
fmt.Println("database connection established")
defer db.Close()
}
songLengthRange := pat.Param(ctx, "songLengthRange")
var lengthRange []string = strings.Split(songLengthRange, " ")
//get song by song
rows, err := db.Query("select songs.song, CAST(songs.length as INTEGER) from songs join genres on songs.genre = genres.ID WHERE songs.length BETWEEN ? AND ? ORDER BY songs.length", lengthRange[0], lengthRange[1])
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
var SongName_ string
var SongLength_ int
if err := rows.Scan(&SongName_, &SongLength_); err != nil {
log.Fatal(err)
}else{
type SongInfo struct {
Song string
Length int
}
group := SongInfo{
Song: SongName_,
Length: SongLength_,
}
b, err := json.Marshal(group)
if err != nil {
fmt.Println("error:", err)
}
os.Stdout.Write(b)
fmt.Fprintf(w, "%s", b)
}
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
}
func main() {
mux := goji.NewMux()
mux.HandleFuncC(pat.Get("/byArtist/:songArtist"), getSongInfoBySongArtist)
mux.HandleFuncC(pat.Get("/bySong/:songName"), getSongInfoBySongName)
mux.HandleFuncC(pat.Get("/byGenre/:songGenreName"), getSongInfoBySongGenre)
mux.HandleFuncC(pat.Get("/genreInfo/"), getGenresInfo)
mux.HandleFuncC(pat.Get("/songLengthRange/:songLengthRange"), getSongByLengthRange)
http.ListenAndServe("localhost:8000", mux)
}
|
package main
import "fmt"
type Vertex struct {
X int
Y int
}
/*
Struct literals - all the different ways to construct structs
*/
var (
v1 = Vertex{1, 2} // has type Vertex
v2 = Vertex{X: 30} // Y : 0 is implicit
v3 = Vertex{} // X : 0 and Y : 0 are implicit
p = &Vertex{4, 5} // has type *Vertex
)
func main() {
fmt.Println(v1, v2, v3, p)
} |
package erratum
import "errors"
func Use(ro ResourceOpener, input string) (e error) {
var resource Resource
var err error
defer func() {
rec := recover()
if rec != nil {
if errorType, isFrobError := rec.(FrobError); isFrobError {
resource.Defrob(errorType.defrobTag)
}
resource.Close()
e = errors.New("meh")
}
}()
for {
resource, err = ro()
if err == nil {
break
}
if _, isTransientError := err.(TransientError); !isTransientError {
return errors.New("too awesome")
}
}
resource.Frob(input)
resource.Close()
return nil
}
|
package main
import "fmt"
func main() {
// `type` is for aliases type of data's
// poc here, example below
// type byte = uint8
// type rune = int32
// type uint = uint
type cek bool
var adalahBenar cek = true
// type salah false
if adalahBenar {
fmt.Println("benar")
}
type married = bool
isMarried := true
if isMarried {
fmt.Println("sudah rabi")
}
}
|
package main
import (
"fmt"
"reflect"
"strings"
)
type Foo struct {
A int
B string
}
func main() {
sl := []int{1, 2, 3}
greeting := "hello"
greetingPtr := &greeting
f := Foo{A: 10, B: "Salutations"}
fp := &f
slType := reflect.TypeOf(sl)
gType := reflect.TypeOf(greeting)
grpType := reflect.TypeOf(greetingPtr)
fType := reflect.TypeOf(f)
fpType := reflect.TypeOf(fp)
examiner(slType, 0)
examiner(gType, 0)
examiner(grpType, 0)
examiner(fType, 0)
examiner(fpType, 0)
}
func examiner(t reflect.Type, depth int) {
fmt.Println(strings.Repeat("\t", depth), "Type is", t.Name(), "and kind is", t.Kind())
switch t.Kind() {
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
fmt.Println(strings.Repeat("\t", depth+1), "Contained type:")
examiner(t.Elem(), depth+1)
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
fmt.Println(strings.Repeat("\t", depth+1), "Field", i+1, "name is", f.Name, "type is", f.Type.Name(), "and kind is", f.Type.Kind())
if f.Tag != "" {
fmt.Println(strings.Repeat("\t", depth+2), "Tag is", f.Tag)
fmt.Println(strings.Repeat("\t", depth+2), "tag1 is", f.Tag.Get("tag1"), "tag2 is", f.Tag.Get("tag2"))
}
}
}
}
|
package main
import (
"github.com/cemalkilic/jsonServer/config"
"github.com/cemalkilic/jsonServer/controllers"
"github.com/cemalkilic/jsonServer/database"
"github.com/cemalkilic/jsonServer/middlewares"
"github.com/cemalkilic/jsonServer/service"
"github.com/cemalkilic/jsonServer/utils/validator"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
)
func main() {
router := gin.Default()
router.Use(cors.Default())
router.StaticFile("/", "./frontend/build/index.html")
router.Static("/static", "./frontend/build/static")
cfg, _ := config.LoadConfig(".")
mysqlHandler := database.NewMySQLDBHandler(cfg)
dataStore := database.GetSQLDataStore(mysqlHandler)
userStore := database.GetSQLUserStore(mysqlHandler)
v := validator.NewValidator()
customEndpointController := controllers.NewCustomEndpointController(dataStore, v)
customEndpointController.SetDB(dataStore)
loginService := service.DBLoginService(userStore, v)
jwtService := service.JWTAuthService(cfg)
loginController := controllers.NewLoginController(loginService, jwtService)
router.POST("/login", loginController.Login)
router.POST("/signup", loginController.Signup)
router.GET("/user/me", middlewares.AuthorizeJWT(jwtService), func(context *gin.Context) {
context.JSON(200, gin.H{
"success": true,
})
})
// Default handler to handle user routes
router.NoRoute(customEndpointController.GetCustomEndpoint)
router.POST("/addEndpoint", middlewares.AuthorizeJWT(jwtService), customEndpointController.AddCustomEndpoint)
router.Run(cfg.ServerAddress)
}
|
/*
* @lc app=leetcode.cn id=1387 lang=golang
*
* [1387] 将整数按权重排序
*/
package main
import (
"sort"
)
// @lc code=start
type Weight struct {
Val int
Step int
}
var stepMap = map[int]int{
0: 1,
1: 0,
2: 1,
}
func caculateStep(val int) int {
if step, ok := stepMap[val]; ok {
return step
} else {
if val%2 == 0 {
stepMap[val] = 1 + caculateStep(val/2)
} else {
stepMap[val] = 1 + caculateStep(val*3+1)
}
return stepMap[val]
}
}
func getKth(lo int, hi int, k int) int {
var weights = make([]Weight, hi-lo+1)
for i := lo; i <= hi; i++ {
step := caculateStep(i)
weights[i-lo] = Weight{
Val: i,
Step: step,
}
}
sort.Slice(weights, func(i, j int) bool {
if weights[i].Step == weights[j].Step {
return weights[i].Val < weights[j].Val
}
return weights[i].Step < weights[j].Step
})
return weights[k-1].Val
}
// func main() {
// fmt.Println(getKth(12, 15, 2))
// fmt.Println(getKth(1, 1, 1))
// fmt.Println(getKth(7, 11, 4))
// fmt.Println(getKth(10, 20, 5))
// fmt.Println(getKth(1, 1000, 777))
// }
// @lc code=end
|
package main
import (
"errors"
"fmt"
"log"
"math"
)
// ErrNorgateMath - this is idiomatic to have error variables start with err.
// We are using Err because we want it to be accessible outside the package
var ErrNorgateMath = errors.New("norgate math: square root of negative number")
func main() {
fmt.Printf("%T\n", ErrNorgateMath)
_, err := sqrt(-10)
if err != nil {
log.Fatalln(err)
}
}
func sqrt(f float64) (float64, error) {
if f < 0 {
return 0, ErrNorgateMath
}
// implementaion
return math.Sqrt(f), nil
}
/*
see use of errors.New in standard library:
http://golang.org/src/pkg/bufio/bufio.go
http://golang.org/src/pkg/io/io.go
*/
// go run main.go
// *errors.errorString
// 2020/06/24 08:01:09 norgate math: square root of negative number
// exit status 1
|
package routers
import (
"beego-blog/controllers"
"beego-blog/controllers/admin"
"github.com/astaxie/beego"
)
func init() {
beego.Router("/", &controllers.MainController{})
ns :=
beego.NewNamespace("/admin",
beego.NSRouter("/", &admin.IndexController{}, "get:Index"),
beego.NSRouter("/login", &admin.IndexController{}, "*:Login"),
beego.NSRouter("/dashboard", &admin.DashboardController{}, "get:Dashboard"),
beego.NSNamespace("/user",
beego.NSRouter("/", &admin.UserController{}, "get:UserList"),
beego.NSRouter("/add", &admin.UserController{}, "get:UserAdd"),
beego.NSRouter("/add", &admin.UserController{}, "post:Add"),
beego.NSRouter("/edit/:id", &admin.UserController{}, "get:EditUser"),
),
)
beego.AddNamespace(ns)
}
|
package controllers
import "github.com/superbet-group/code-cadets-2021/homework_4/03_bet_acceptance_api/internal/api/controllers/models"
type BetValidator interface {
BetIsValid(betDto models.BetDto) bool
}
|
package validations
import (
"testing"
"github.com/andrewesteves/taskee-api/entities"
)
func TestProjectFields(t *testing.T) {
project := entities.Project{
Description: "Awesome project",
}
actual := len(ProjectStore(project))
expected := 0
if actual != expected {
t.Errorf("actual: %d, expected: %d", actual, expected)
}
}
func TestProjectWithoutDescription(t *testing.T) {
project := entities.Project{}
actual := len(ProjectStore(project))
expected := 1
if actual != expected {
t.Errorf("actual: %d, expected: %d", actual, expected)
}
}
|
package main
/*
* @lc app=leetcode id=105 lang=golang
*
* [105] Construct Binary Tree from Preorder and Inorder Traversal
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
func buildTree(preorder []int, inorder []int) *TreeNode {
if len(preorder) == 0 {
return nil
}
root := &TreeNode{preorder[0], nil, nil}
idx := 0
for inorder[idx] != preorder[0] {
idx++
}
root.Left = buildTree(preorder[1:1+idx], inorder[:idx])
root.Right = buildTree(preorder[1+idx:], inorder[idx+1:])
return root
}
|
package mcservice
import (
"log"
)
func (s *MCService) liststreamkeyitems(req *JSONRequest) (*JSONResponse, error) {
if len(req.Params) < 2 {
return nil, errNumParameter
}
_, ok := req.Params[0].(string)
if ok != true {
return nil, errParameter
}
_, ok = req.Params[1].(string)
if ok != true {
return nil, errParameter
}
rsp, err := s.platformAPI(req)
if rsp.Result != nil {
items, ok := rsp.Result.([]interface{})
if ok != true {
log.Printf("unexpected result: %+v", rsp.Result)
return nil, errInternal
}
for i := range items {
item, ok := items[i].(map[string]interface{})
if ok != true {
log.Printf("unexpected result: %+v", items[i])
return nil, errInternal
}
data := item["data"].(string)
if err != nil {
log.Printf("could not decode hex: %s", err)
return nil, errInternal
}
plaintext, err := s.boxer.UnBox(data)
if err != nil {
log.Printf("could not decrypt: %s", err)
return nil, errInternal
}
item["data"] = plaintext
}
}
return rsp, err
}
|
package main
import (
"log"
"strconv"
)
type SiteConfig struct {
BlogName string
BlogDescription string
URLFormat string
PostxPage int
}
func InitConfig() {
type Option struct {
OptionName string
OptionValue string
}
var options []*Option
dbrSess := connection.NewSession(nil)
_, err := dbrSess.Select("option_name, option_value").
From("wp_options").
LoadStructs(&options)
if err != nil {
log.Println(err.Error())
}
for _, option := range options {
switch option.OptionName {
case "blogname":
siteConfig.BlogName = option.OptionValue
case "blogdescription":
siteConfig.BlogDescription = option.OptionValue
case "posts_per_page":
siteConfig.PostxPage, _ = strconv.Atoi(option.OptionValue)
}
}
siteConfig.URLFormat = "YMD"
}
|
package pathfileops
import (
"errors"
"fmt"
"os"
"sort"
"strings"
)
// SortFileMgrByAbsPathCaseSensitive - Sorts an array of File Managers
// (FileMgr) by absolute path, filename and file extension. This sorting
// operation is performed as a 'Case Sensitive' sort meaning the upper
// and lower case characters are significant.
//
// This method is designed to be used with the 'Go' Sort package:
// https://golang.org/pkg/sort/
//
// Example Usage:
//
// sort.Sort(SortFileMgrByAbsPathCaseSensitive(FileMgrArray))
//
type SortFileMgrByAbsPathCaseSensitive []FileMgr
// Len - Required by the sort.Interface
func (sortAbsPathSens SortFileMgrByAbsPathCaseSensitive) Len() int {
return len(sortAbsPathSens)
}
// Swap - Required by the sort.Interface
func (sortAbsPathSens SortFileMgrByAbsPathCaseSensitive) Swap(i, j int) {
sortAbsPathSens[i], sortAbsPathSens[j] = sortAbsPathSens[j], sortAbsPathSens[i]
}
// Less - required by the sort.Interface
func (sortAbsPathSens SortFileMgrByAbsPathCaseSensitive) Less(i, j int) bool {
return sortAbsPathSens[i].absolutePathFileName < sortAbsPathSens[j].absolutePathFileName
}
// SortFileMgrByAbsPathCaseInSensitive - Sort by File Managers by
// absolute path, filename and file extension. This sorting operation
// is performed as a 'Case Insensitive' sort meaning the upper and
// lower case characters are not significant. All sort comparisons
// are therefore made by using lower case versions of the absolute
// path, filename and file extension.
//
// This method is designed to be used with the 'Go' Sort package:
// https://golang.org/pkg/sort/
//
// Example Usage:
//
// sort.Sort(SortFileMgrByAbsPathCaseInSensitive(FileMgrArray))
//
type SortFileMgrByAbsPathCaseInSensitive []FileMgr
// Len - Required by the sort.Interface
func (sortAbsPathInSens SortFileMgrByAbsPathCaseInSensitive) Len() int {
return len(sortAbsPathInSens)
}
// Swap - Required by the sort.Interface
func (sortAbsPathInSens SortFileMgrByAbsPathCaseInSensitive) Swap(i, j int) {
sortAbsPathInSens[i], sortAbsPathInSens[j] = sortAbsPathInSens[j], sortAbsPathInSens[i]
}
// Less - required by the sort.Interface
func (sortAbsPathInSens SortFileMgrByAbsPathCaseInSensitive) Less(i, j int) bool {
return strings.ToLower(sortAbsPathInSens[i].absolutePathFileName) <
strings.ToLower(sortAbsPathInSens[j].absolutePathFileName)
}
// FileMgrCollection - Manages a collection of FileMgr
// instances.
//
// Dependencies:
// 'FileMgrCollection' depends on type, 'FileHelper'
// which is located in source code file 'filehelper.go'.
type FileMgrCollection struct {
fileMgrs []FileMgr
}
// AddFileMgr - Adds a FileMgr object to the collection
func (fMgrs *FileMgrCollection) AddFileMgr(fMgr FileMgr) {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr.CopyOut())
}
// AddFileMgrByDirFileNameExt - Add a new File Manager using
// input parameters 'directory' and 'pathFileNameExt'.
func (fMgrs *FileMgrCollection) AddFileMgrByDirFileNameExt(
directory DirMgr,
fileNameExt string) error {
ePrefix := "FileMgrCollection.AddFileMgrByDirFileNameExt() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
fMgr := FileMgr{}
fMgrHlpr := fileMgrHelper{}
isEmpty, err :=
fMgrHlpr.setFileMgrDirMgrFileName(&fMgr, directory, fileNameExt, ePrefix)
if err != nil {
return err
}
if isEmpty {
return fmt.Errorf(ePrefix+
"Error: The FileMgr instance generated by input parameters 'directory' and "+
"'fileNameExt' is Empty!\n"+
"directory='%v'\nfileNameExt='%v'\n",
directory.absolutePath, fileNameExt)
}
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr)
return nil
}
// AddFileMgrByPathFileNameExt - Add a new File Manager based on
// input parameter 'pathFileNameExt' which includes the full path
// name, file name and file extension.
func (fMgrs *FileMgrCollection) AddFileMgrByPathFileNameExt(
pathFileNameExt string) error {
ePrefix := "FileMgrCollection.AddFileMgrByPathFileNameExt() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
fMgrHlpr := fileMgrHelper{}
fMgr := FileMgr{}
isEmpty, err := fMgrHlpr.setFileMgrPathFileName(
&fMgr,
pathFileNameExt,
ePrefix)
// fMgr, err := FileMgr{}.NewFromPathFileNameExtStr(pathFileNameExt)
if err != nil {
return fmt.Errorf(ePrefix+
"Error returned from fMgrHlpr.setFileMgrPathFileName(pathFileNameExt).\n"+
"pathFileNameExt='%v'\nError='%v'\n", pathFileNameExt, err.Error())
}
if isEmpty {
return fmt.Errorf(ePrefix+
"ERROR: The generated File Manager instance is EMPTY!\n"+
"pathFileNameExt='%v'\n", pathFileNameExt)
}
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr)
return nil
}
// AddFileMgrByDirStrFileNameStr - Adds a FileMgr object to the
// collection based on input parameter strings, 'pathName' and
// 'fileNameExt'.
//
func (fMgrs *FileMgrCollection) AddFileMgrByDirStrFileNameStr(
pathName string,
fileNameExt string) error {
ePrefix := "FileMgrCollection.AddFileMgrByDirStrFileNameStr() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
dMgrHlpr := dirMgrHelper{}
dMgr := DirMgr{}
isEmpty, err := dMgrHlpr.setDirMgr(
&dMgr,
pathName,
ePrefix,
"dMgr",
"pathName")
if err != nil {
return err
}
if isEmpty {
return fmt.Errorf(ePrefix+"ERROR: Directory Manager created "+
"from 'pathName' is EMPTY!\n"+
"pathName='%v'", pathName)
}
fMgrHlpr := fileMgrHelper{}
fMgr := FileMgr{}
isEmpty,
err = fMgrHlpr.setFileMgrDirMgrFileName(
&fMgr,
dMgr,
fileNameExt,
ePrefix)
if err != nil {
return err
}
if isEmpty {
return fmt.Errorf(ePrefix+"ERROR: File Manager created "+
"from 'pathName' and 'fileNameExt' is EMPTY!\n"+
"pathName='%v'\n"+
"fileNameExt='%v'",
pathName,
fileNameExt)
}
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr)
return nil
}
// AddFileMgrByFileInfo - Adds a File Manager object to the collection based on input from
// a directory path string and a os.FileInfo object.
func (fMgrs *FileMgrCollection) AddFileMgrByFileInfo(pathName string, info os.FileInfo) error {
ePrefix := "FileMgrCollection) AddFileMgrByFileInfo() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
dMgrHlpr := dirMgrHelper{}
dMgr := DirMgr{}
isEmpty, err := dMgrHlpr.setDirMgr(
&dMgr,
pathName,
ePrefix,
"dMgr",
"pathName")
if err != nil {
return err
}
if isEmpty {
return fmt.Errorf(ePrefix+"ERROR: Directory Manager created "+
"from 'pathName' is EMPTY!\n"+
"pathName='%v'", pathName)
}
fMgrHlpr := fileMgrHelper{}
fMgr := FileMgr{}
isEmpty,
err = fMgrHlpr.setFileMgrDirMgrFileName(
&fMgr,
dMgr,
info.Name(),
ePrefix)
if err != nil {
return err
}
if isEmpty {
return fmt.Errorf("ERROR: File Manager created "+
"from 'pathName' and 'info' is EMPTY!\n"+
"pathName='%v'\n"+
"info.Name()='%v'",
pathName,
info.Name())
}
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr)
return nil
}
// AddFileMgrCollection - Adds another collection of File Manager (FileMgr)
// objects to the current collection.
func (fMgrs *FileMgrCollection) AddFileMgrCollection(fMgrs2 *FileMgrCollection) {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if fMgrs2.fileMgrs == nil {
fMgrs2.fileMgrs = make([]FileMgr, 0, 50)
}
lOmc2 := len(fMgrs2.fileMgrs)
if lOmc2 == 0 {
return
}
for i := 0; i < lOmc2; i++ {
fMgrs.AddFileMgr(fMgrs2.fileMgrs[i].CopyOut())
}
return
}
// CopyFilesToDir - Copies all the files in the File Manager Collection to
// the specified target directory.
//
func (fMgrs *FileMgrCollection) CopyFilesToDir(targetDirectory DirMgr) error {
ePrefix := "FileMgrCollection.CopyFilesToDir() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
maxLen := len(fMgrs.fileMgrs)
if maxLen == 0 {
return errors.New(ePrefix + "ERROR - Collection contains ZERO File Managers!")
}
for i := 0; i < maxLen; i++ {
err := fMgrs.fileMgrs[i].CopyFileToDirByIoByLink(targetDirectory)
if err != nil {
return fmt.Errorf(ePrefix+
"Copy Failure on index='%v' file='%v'. Error='%v'",
i, fMgrs.fileMgrs[i].absolutePathFileName, err.Error())
}
}
return nil
}
// CopyOut - Returns an FileMgrCollection which is an
// exact duplicate of the current FileMgrCollection
func (fMgrs *FileMgrCollection) CopyOut() (FileMgrCollection, error) {
ePrefix := "FileMgrCollection.CopyOut() "
fMgrs2 := FileMgrCollection{}
fMgrs2.fileMgrs = make([]FileMgr, 0, 50)
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
lOmc := len(fMgrs.fileMgrs)
if lOmc == 0 {
return FileMgrCollection{},
errors.New(ePrefix +
"Error: This File Manager Collection ('FileMgrCollection') is EMPTY! ")
}
for i := 0; i < lOmc; i++ {
fMgrs2.AddFileMgr(fMgrs.fileMgrs[i].CopyOut())
}
return fMgrs2, nil
}
// DeleteAtIndex - Deletes a member File Manager from the
// collection at the index specified by input parameter 'idx'.
//
// If successful, at the completion of this method, the File
// Manager Collection array will have a length which is one
// less than the starting array length.
//
func (fMgrs *FileMgrCollection) DeleteAtIndex(idx int) error {
ePrefix := "FileMgrCollection.DeleteAtIndex() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if idx < 0 {
return fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is less than zero. "+
"Index Out-Of-Range! idx='%v'", idx)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return errors.New(ePrefix +
"Error: The File Manager Collection, 'FileMgrCollection', is EMPTY!")
}
if idx >= arrayLen {
return fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is greater than the "+
"length of the collection index. Index Out-Of-Range! "+
"idx='%v' Array Length='%v' ", idx, arrayLen)
}
if arrayLen == 1 {
fMgrs.fileMgrs = make([]FileMgr, 0, 100)
} else if idx == 0 {
// arrayLen > 1 and requested idx = 0
fMgrs.fileMgrs = fMgrs.fileMgrs[1:]
} else if idx == arrayLen-1 {
// arrayLen > 1 and requested idx = last element index
fMgrs.fileMgrs = fMgrs.fileMgrs[0 : arrayLen-1]
} else {
// arrayLen > 1 and idx is in between
// first and last elements
fMgrs.fileMgrs =
append(fMgrs.fileMgrs[0:idx], fMgrs.fileMgrs[idx+1:]...)
}
return nil
}
// FindFiles - Searches the current FileMgrCollection and returns a new
// FileMgrCollection containing FileMgr objects which match the specified
// search criteria.
//
func (fMgrs *FileMgrCollection) FindFiles(
fileSelectionCriteria FileSelectionCriteria) (FileMgrCollection, error) {
ePrefix := "FileMgrCollection.FindFiles() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
lDirCol := len(fMgrs.fileMgrs)
if lDirCol == 0 {
return FileMgrCollection{}.New(), nil
}
fh := FileHelper{}
var isMatchedFile bool
var err error
fMgrs2 := FileMgrCollection{}.New()
for i := 0; i < lDirCol; i++ {
fMgr := fMgrs.fileMgrs[i]
if fMgr.actualFileInfo.isFInfoInitialized {
isMatchedFile, err = fh.FilterFileName(fMgr.actualFileInfo, fileSelectionCriteria)
if err != nil {
return FileMgrCollection{},
fmt.Errorf(ePrefix+
"Error returned by "+
"fh.FilterFileName(fMgr.actualFileInfo, fileSelectionCriteria) "+
"fMgr.actualFileInfo.Name()='%v' Error='%v'",
fMgr.actualFileInfo.Name(), err.Error())
}
} else {
fip := FileInfoPlus{}
fip.SetName(fMgr.fileNameExt)
isMatchedFile, err = fh.FilterFileName(fip, fileSelectionCriteria)
if err != nil {
return FileMgrCollection{}, fmt.Errorf(ePrefix+
"Error returned by fh.FilterFileName(fip, fileSelectionCriteria) "+
"fip.Name()='%v' Error='%v'", fip.Name(), err.Error())
}
}
if isMatchedFile {
fMgrs2.AddFileMgr(fMgr)
}
}
return fMgrs2, nil
}
// GetFileMgrArray - Returns the entire Directory Manager Array managed
// by this collection.
//
// ------------------------------------------------------------------------
//
// Input Parameters:
//
// None
//
// ------------------------------------------------------------------------
//
// Return Values:
//
// []FileMgr - The array of of FileMgr instances maintained by this
// collection.
//
func (fMgrs *FileMgrCollection) GetFileMgrArray() []FileMgr {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 10)
}
return fMgrs.fileMgrs
}
// GetFileMgrAtIndex - If successful, this method returns a pointer to
// the FileMgr instance at the array index specified. The 'Peek' and 'Pop'
// methods below return FileMgr objects using a 'deep' copy and therefore
// offer better protection against data corruption.
//
func (fMgrs *FileMgrCollection) GetFileMgrAtIndex(idx int) (*FileMgr, error) {
ePrefix := "FileMgrCollection.GetFileMgrAtIndex() "
emptyFileMgr := FileMgr{}
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return &emptyFileMgr,
fmt.Errorf(ePrefix +
"Error: This File Manager Collection ('FileMgrCollection') is EMPTY!")
}
if idx < 0 || idx >= arrayLen {
return &emptyFileMgr,
fmt.Errorf(ePrefix+
"Error: The input parameter, 'idx', is OUT OF RANGE! idx='%v'. \n"+
"The minimum index is '0'. "+
"The maximum index is '%v'. ", idx, arrayLen-1)
}
return &fMgrs.fileMgrs[idx], nil
}
// GetNumOfFileMgrs - returns the array length of the
// of the File Manager Collection, 'FileMgrCollection'.
// Effectively the returned integer is a count of the
// number of File Managers (FileMgr's) in the Collection.
//
func (fMgrs *FileMgrCollection) GetNumOfFileMgrs() int {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
return len(fMgrs.fileMgrs)
}
// GetNumOfFiles - returns the array length of the
// of the File Manager Collection, 'FileMgrCollection'.
// Effectively the returned integer is a count of the
// number of files or File Managers (FileMgr's) in the
// Collection.
//
func (fMgrs *FileMgrCollection) GetNumOfFiles() int {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
return len(fMgrs.fileMgrs)
}
// GetTotalFileBytes - Returns the total number of file bytes
// represented by all files in the collection.
//
func (fMgrs *FileMgrCollection) GetTotalFileBytes() uint64 {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
return 0
}
totalFileBytes := uint64(0)
for i := 0; i < len(fMgrs.fileMgrs); i++ {
if fMgrs.fileMgrs[i].actualFileInfo.isFInfoInitialized {
totalFileBytes += uint64(fMgrs.fileMgrs[i].actualFileInfo.Size())
}
}
return totalFileBytes
}
// InsertFileMgrAtIndex - Inserts a new File Manager into the collection at
// array 'index'. The new File Manager is passed as input parameter 'fMgr'.
//
// If input parameter 'index' is less than zero, an error will be returned. If
// 'index' exceeds the value of the last index in the collection, 'fMgr' will be
// added to the end of the collection at the next legal index.
//
func (fMgrs *FileMgrCollection) InsertFileMgrAtIndex(fMgr FileMgr, index int) error {
ePrefix := "FileMgrCollection.InsertFileMgrAtIndex() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if index < 0 {
return fmt.Errorf(ePrefix+
"Error: Input parameter 'index' is LESS THAN ZERO! "+
"index='%v' ", index)
}
lenfMgrs := len(fMgrs.fileMgrs)
if index >= lenfMgrs {
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr.CopyOut())
return nil
}
newFileMgrs := make([]FileMgr, 0, 100)
if index == 0 {
newFileMgrs = append(newFileMgrs, fMgr.CopyOut())
fMgrs.fileMgrs = append(newFileMgrs, fMgrs.fileMgrs...)
return nil
}
newFileMgrs = append(newFileMgrs, fMgrs.fileMgrs[index:]...)
fMgrs.fileMgrs = append(fMgrs.fileMgrs[:index])
fMgrs.fileMgrs = append(fMgrs.fileMgrs, fMgr.CopyOut())
fMgrs.fileMgrs = append(fMgrs.fileMgrs, newFileMgrs...)
return nil
}
// New - Creates and returns a new, empty and properly initialized
// File Manager Collection ('FileMgrCollection').
func (fMgrs FileMgrCollection) New() FileMgrCollection {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
newFMgrCol := FileMgrCollection{}
newFMgrCol.fileMgrs = make([]FileMgr, 0, 100)
return newFMgrCol
}
// PopFileMgrAtIndex - Returns a deep copy of the File Manager
// ('FileMgr') object located at index, 'idx', in the
// File Manager Collection ('FileMgrCollection') array.
//
// As a 'Pop' method, the original File Manager ('FileMgr')
// object is deleted from the File Manager Collection
// ('FileMgrCollection') array.
//
// Therefore, at the completion of this method, the File Manager
// Collection array has a length which is one less than the
// starting array length.
//
func (fMgrs *FileMgrCollection) PopFileMgrAtIndex(idx int) (FileMgr, error) {
ePrefix := "FileMgrCollection.PopFileMgrAtIndex() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if idx < 0 {
return FileMgr{}, fmt.Errorf(ePrefix+
"Error: Input Parameter is less than zero. Index Out-Of-Range! idx='%v'", idx)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return FileMgr{},
errors.New(ePrefix +
"Error: The File Manager Collection, 'FileMgrCollection', is EMPTY!")
}
if idx >= arrayLen {
return FileMgr{}, fmt.Errorf(ePrefix+
"Error: Input Parameter, 'idx' is greater than the length of the "+
"collection index. Index Out-Of-Range! "+
"idx='%v' Array Length='%v' ", idx, arrayLen)
}
if idx == 0 {
return fMgrs.PopFirstFileMgr()
}
if idx == arrayLen-1 {
return fMgrs.PopLastFileMgr()
}
fmgr := fMgrs.fileMgrs[idx].CopyOut()
fMgrs.fileMgrs = append(fMgrs.fileMgrs[0:idx], fMgrs.fileMgrs[idx+1:]...)
return fmgr, nil
}
// PopFirstFileMgr - Returns a deep copy of the first File Manager
// ('FileMgr') object in the File Manager Collection array. As a
// 'Pop' method, the original File Manager ('FileMgr') object is
// deleted from the File Manager Collection ('FileMgrCollection')
// array.
//
// Therefore at the completion of this method, the File Manager
// Collection array has a length which is one less than the starting
// array length.
//
func (fMgrs *FileMgrCollection) PopFirstFileMgr() (FileMgr, error) {
ePrefix := "FileMgrCollection.PopFirstFileMgr() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if len(fMgrs.fileMgrs) == 0 {
return FileMgr{},
errors.New(ePrefix +
"Error: The File Manager Collection, 'FileMgrCollection' is EMPTY!")
}
fMgr := fMgrs.fileMgrs[0].CopyOut()
fMgrs.fileMgrs = fMgrs.fileMgrs[1:]
return fMgr, nil
}
// PopLastFileMgr - Returns a deep copy of the last File Manager
// ('FileMgr') object in the File Manager Collection array. As a
// 'Pop' method, the original File Manager ('FileMgr') object is
// deleted from the File Manager Collection ('FileMgrCollection')
// array.
//
// Therefore at the completion of this method, the File Manager
// Collection array has a length which is one less than the starting
// array length.
//
func (fMgrs *FileMgrCollection) PopLastFileMgr() (FileMgr, error) {
ePrefix := "FileMgrCollection.PopLastFileMgr() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return FileMgr{}, errors.New(ePrefix +
"Error: The File Manager Collection, 'FileMgrCollection', is EMPTY!")
}
fmgr := fMgrs.fileMgrs[arrayLen-1].CopyOut()
fMgrs.fileMgrs = fMgrs.fileMgrs[0 : arrayLen-1]
return fmgr, nil
}
// PeekFileMgrAtIndex - Returns a deep copy of the File Manager
// ('FileMgr') object located at array index 'idx' in the File
// Manager Collection ('FileMgrCollection'). This is a 'Peek'
// method and therefore the original File Manager ('FileMgr')
// object is NOT deleted from the File Manager Collection
// ('FileMgrCollection') array.
//
// At the completion of this method, the length of the File
// Manager Collection ('FileMgrCollection') array will remain
// unchanged.
//
func (fMgrs *FileMgrCollection) PeekFileMgrAtIndex(idx int) (FileMgr, error) {
ePrefix := "FileMgrCollection.PeekFileMgrAtIndex() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return FileMgr{},
errors.New(ePrefix +
"Error: The File Manager Collection, 'FileMgrCollection' is EMPTY!")
}
if idx < 0 {
return FileMgr{},
fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is less than zero. "+
"Index Out-Of-Range! idx='%v'", idx)
}
if idx >= arrayLen {
return FileMgr{},
fmt.Errorf(ePrefix+
"Error: Input Parameter 'idx' is greater than the length "+
"of the collection array. "+
"Index Out-Of-Range! idx='%v' Array Length='%v' ",
idx, arrayLen)
}
return fMgrs.fileMgrs[idx].CopyOut(), nil
}
// PeekFirstFileMgr - Returns a deep copy of the first File
// Manager ('FileMgr') object in the File Manager Collection
// ('FileMgrCollection'). This is a 'Peek' method and therefore
// the original File Manager ('FileMgr') object is NOT deleted
// from the File Manager Collection ('FileMgrCollection')
// array.
//
// At the completion of this method, the length of the File
// Manager Collection ('FileMgrCollection') array will remain
// unchanged.
//
func (fMgrs *FileMgrCollection) PeekFirstFileMgr() (FileMgr, error) {
ePrefix := "FileMgrCollection.PeekFirstFileMgr() "
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if len(fMgrs.fileMgrs) == 0 {
return FileMgr{},
errors.New(ePrefix +
"Error: The File Manager Collection ('FileMgrCollection') is EMPTY!")
}
return fMgrs.fileMgrs[0].CopyOut(), nil
}
// PeekLastFileMgr - Returns a deep copy of the last File Manager
// ('FileMgr') object in the File Manager Collection
// ('FileMgrCollection').
//
// This is a 'Peek' method and therefore the original File Manager
// ('FileMgr') object is NOT deleted from the File Manager Collection
// ('FileMgrCollection') array.
//
// At the completion of this method, the length of the File Manager
// Collection ('FileMgrCollection') array will remain unchanged.
//
func (fMgrs *FileMgrCollection) PeekLastFileMgr() (FileMgr, error) {
ePrefix := "FileMgrCollection.PeekLastFileMgr()"
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
arrayLen := len(fMgrs.fileMgrs)
if arrayLen == 0 {
return FileMgr{},
errors.New(ePrefix +
"Error: The File Manager Collection ('FileMgrCollection') is EMPTY!")
}
return fMgrs.fileMgrs[arrayLen-1].CopyOut(), nil
}
// SortByAbsPathFileName - Sorts the collection array of file managers
// by absolute path, file name and file extension.
//
// If the input parameter 'caseInsensitiveSort' is set to 'true', it means
// that upper and lower case characters are NOT significant in the sorting
// operation. The sort operation therefore uses lower case versions of
// absolute path, file name and file extension for comparison purposes.
//
// On the other hand, if input parameter 'caseInsensitiveSort' is set to 'false',
// it means that upper and lower chase characters ARE significant to the sort
// operation.
//
func (fMgrs *FileMgrCollection) SortByAbsPathFileName(caseInsensitiveSort bool) {
if fMgrs.fileMgrs == nil {
fMgrs.fileMgrs = make([]FileMgr, 0, 50)
}
if len(fMgrs.fileMgrs) == 0 {
return
}
if caseInsensitiveSort {
sort.Sort(SortFileMgrByAbsPathCaseInSensitive(fMgrs.fileMgrs))
} else {
sort.Sort(SortFileMgrByAbsPathCaseSensitive(fMgrs.fileMgrs))
}
}
|
package main
import (
"fmt"
"github.com/codegangsta/cli"
)
func buildListCommand(stageList *StageList) cli.Command {
return cli.Command{
Name: "list",
Usage: "List available stages",
Action: func(c *cli.Context) {
fmt.Printf("Basic stages:\n")
for _, stageName := range stageList.Stages {
fmt.Printf("- %v\n", stageName)
}
fmt.Printf("\nCompound stages:\n")
for k, v := range stageList.CompoundStages {
fmt.Printf("- %v\n", k)
for _, stageName := range v {
fmt.Printf("\t %v\n", stageName)
}
fmt.Printf("\n")
}
},
}
}
func buildStageCommand(stageName string, config *BuildConfig) cli.Command {
return cli.Command{
Name: stageName,
Usage: stageName,
Action: func(c *cli.Context) {
runCommand(c.Command.Name, config)
},
}
}
func createCliApp(stageList *StageList, config *BuildConfig) *cli.App {
app := cli.NewApp()
app.Name = "bldr"
app.Usage = "Build stuff"
app.Commands = []cli.Command{}
app.Commands = append(app.Commands, buildListCommand(stageList))
for _, stageName := range stageList.Stages {
app.Commands = append(app.Commands, buildStageCommand(stageName, config))
}
return app
}
|
package srv
import (
"context"
"log"
"net/http"
"github.com/bcspragu/Radiotation/db"
oidc "github.com/coreos/go-oidc"
)
func (s *Srv) serveVerifyToken(w http.ResponseWriter, r *http.Request) {
token := r.PostFormValue("token")
ti, err := s.verifyIdToken(token)
if err != nil {
log.Printf("verifyIdToken(%s): %v", token, err)
return
}
var name struct {
First string `json:"given_name"`
Last string `json:"family_name"`
}
if err := ti.Claims(&name); err != nil {
log.Printf("token.Claims: %v", err)
return
}
// If the token is good, store the information in the user's encrypted cookie
u := db.GoogleUser(ti.Subject, name.First, name.Last)
s.createUser(w, u)
w.Write([]byte("success"))
}
func (s *Srv) verifyIdToken(rawIDToken string) (*oidc.IDToken, error) {
// Verify the token
idToken, err := s.googleVerifier.Verify(context.Background(), rawIDToken)
if err != nil {
return nil, err
}
return idToken, err
}
|
package main
import (
"context"
"log"
"net"
"net/http"
"os"
"os/signal"
"syscall"
"time"
"github.com/go-chi/chi"
"github.com/lestrrat-go/server-starter/listener"
"golang.org/x/sync/errgroup"
)
//go:generate wire ./...
func NewListener() (net.Listener, error) {
return net.Listen("tcp", "127.0.0.1:8080")
}
func NewServerStarterListener() (net.Listener, error) {
listeners, err := listener.ListenAll()
if err != nil {
return nil, err
}
if len(listeners) == 0 {
return nil, err
}
return listeners[0], nil
}
func NewServer() (*http.Server, error) {
r := chi.NewRouter()
r.Mount("/", NewAssetsHandler())
r.Route("/api", func(r chi.Router) {
r.Get("/hello", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("welcome"))
})
})
return &http.Server{Handler: r}, nil
}
type App struct {
listener net.Listener
server *http.Server
}
func (a *App) Start() error {
eg := errgroup.Group{}
eg.Go(func() error {
if err := a.server.Serve(a.listener); err != nil && err != http.ErrServerClosed {
return err
}
return nil
})
eg.Go(func() error {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM, os.Interrupt)
<-signalChan
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
return a.server.Shutdown(ctx)
})
return eg.Wait()
}
func start() error {
app, cleanup, err := InitializeApp()
if err != nil {
return err
}
defer cleanup()
return app.Start()
}
func main() {
if err := start(); err != nil {
log.Fatal(err)
}
}
|
package leetcode
import "testing"
func TestToGoatLatin(t *testing.T) {
if toGoatLatin("I speak Goat Latin") != "Imaa peaksmaaa oatGmaaaa atinLmaaaaa" {
t.Fatal()
}
}
|
/*
* @lc app=leetcode.cn id=84 lang=golang
*
* [84] 柱状图中最大的矩形
*/
package main
import "fmt"
// @lc code=start
func max(a, b int) int {
if a > b {
return a
}
return b
}
/*
单调栈
func largestRectangleArea(heights []int) int {
var cur,curHeight, left, right, curWidth, ans int
newHeights := make([]int, len(heights)+2)
stack := make([]int, 0)
for i := 1; i < len(heights)+1; i++ {
newHeights[i] = heights[i-1]
}
for i, v := range newHeights {
for len(stack) != 0 && v < newHeights[stack[len(stack)-1]] {
cur = stack[len(stack)-1]
stack = stack[:len(stack)-1]
curHeight = newHeights[cur]
left = stack[len(stack)-1]
right = i
curWidth = right - left - 1
ans = max(ans, curHeight*curWidth)
}
stack = append(stack, i)
}
return ans
} */
func largestRectangleArea(heights []int) int {
var ans int
stack := []int{}
heightLen := len(heights)
left, right := make([]int, heightLen), make([]int, heightLen)
for i := 0; i < heightLen; i++ {
for len(stack) > 0 && heights[stack[len(stack)-1]] >= heights[i] {
stack = stack[:len(stack)-1]
}
if len(stack) == 0 {
left[i] = -1
} else {
left[i] = stack[len(stack)-1]
}
stack = append(stack, i)
}
stack = []int{}
for i := heightLen - 1; i >= 0; i-- {
for len(stack) > 0 && heights[stack[len(stack)-1]] >= heights[i] {
stack = stack[:len(stack)-1]
}
if len(stack) == 0 {
right[i] = heightLen
} else {
right[i] = stack[len(stack)-1]
}
stack = append(stack, i)
}
for i := 0; i < heightLen; i++ {
ans = max(ans, (right[i]-left[i]-1)*heights[i])
}
return ans
}
// @lc code=end
func main() {
fmt.Println(largestRectangleArea([]int{0, 9}))
}
|
package usecase
import (
"github.com/taniwhy/mochi-match-rest/domain/models"
"github.com/taniwhy/mochi-match-rest/domain/repository"
)
// RoomReservationUseCase :
type RoomReservationUseCase interface {
FindAllRoomReservation() ([]*models.RoomReservation, error)
FindRoomReservationByID(id int64) (*models.RoomReservation, error)
InsertRoomReservation(roomReservation *models.RoomReservation) error
UpdateRoomReservation(roomReservation *models.RoomReservation) error
DeleteRoomReservation(roomReservation *models.RoomReservation) error
}
type roomReservationUsecase struct {
roomReservationRepository repository.RoomReservationRepository
}
// NewRoomReservationUsecase :
func NewRoomReservationUsecase(rR repository.RoomReservationRepository) RoomReservationUseCase {
return &roomReservationUsecase{
roomReservationRepository: rR,
}
}
func (rU roomReservationUsecase) FindAllRoomReservation() ([]*models.RoomReservation, error) {
roomReservations, err := rU.roomReservationRepository.FindAllRoomReservation()
if err != nil {
return nil, err
}
return roomReservations, nil
}
func (rU roomReservationUsecase) FindRoomReservationByID(id int64) (*models.RoomReservation, error) {
roomReservation, err := rU.roomReservationRepository.FindRoomReservationByID(id)
if err != nil {
return nil, err
}
return roomReservation, nil
}
func (rU roomReservationUsecase) InsertRoomReservation(roomReservation *models.RoomReservation) error {
err := rU.roomReservationRepository.InsertRoomReservation(roomReservation)
if err != nil {
return err
}
return nil
}
func (rU roomReservationUsecase) UpdateRoomReservation(room *models.RoomReservation) error {
err := rU.roomReservationRepository.UpdateRoomReservation(room)
if err != nil {
return err
}
return nil
}
func (rU roomReservationUsecase) DeleteRoomReservation(room *models.RoomReservation) error {
err := rU.roomReservationRepository.DeleteRoomReservation(room)
if err != nil {
return err
}
return nil
}
|
package ksqlclient
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"ksql_operator/ksqlclient/swagger"
"net/http"
"net/url"
"path"
)
type CommandStatus string
const (
ContentType = "application/vnd.ksql.v1+json"
ErrUnexpected = Error("Unexpected response")
ErrCodeNotFound = 40001
)
type Error string
func (e Error) Error() string { return string(e) }
type client struct {
baseURL *url.URL
userName string
password string
}
func New(baseUrl string, username string, password string) (*client, error) {
url, err := url.Parse(baseUrl)
return &client{baseURL: url, userName: username, password: password}, err
}
// execute a ksql Describe statement for @name
// result is either swagger.ModelError{} or swagger.DescribeResult{}
func (c client) Describe(ctx context.Context, name string) (interface{}, error) {
return c.Execute(ctx, fmt.Sprintf("DESCRIBE %s;", name), &[]swagger.DescribeResultItem{})
}
// execute a ksql Explain statement for @name
// result is either swagger.ModelError{} or swagger.ExplainResult{}
func (c client) Explain(ctx context.Context, name string) (interface{}, error) {
return c.Execute(ctx, fmt.Sprintf("EXPLAIN %s;", name), &[]swagger.DescribeResultItem{})
}
// execute a ksql CreateDropTerminate statement for @name
// result is either swagger.ModelError{} or swagger.CreateDropTerminateResponse{}
func (c client) CreateDropTerminate(ctx context.Context, sql string) (interface{}, error) {
return c.Execute(ctx, sql, &[]swagger.CreateDropTerminateResponseItem{})
}
// execute a kql Statement
// the result is either swagger.ModelError{} or @result
func (c client) Execute(ctx context.Context, ksql string, result interface{}) (interface{}, error) {
u, err := c.baseURL.Parse("ksql")
if err != nil {
return nil, err
}
requestBody := swagger.Statement{
Ksql: ksql,
}
requestBodyJson, err := json.Marshal(requestBody)
if err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodPost, u.String(), bytes.NewBuffer(requestBodyJson))
if err != nil {
return nil, err
}
if c.userName != "" {
req.SetBasicAuth(c.userName, c.password)
}
req.Header.Set("Content-Type", ContentType)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
err := json.Unmarshal(body, result)
if err != nil {
return nil, errors.New(fmt.Sprintf("error unmarshalling response, err: %v, body: '%s'", err, string(body)))
}
return result, nil
}
if resp.StatusCode == 400 {
r := swagger.ModelError{}
err := json.Unmarshal(body, &r)
if err != nil {
return nil, errors.New(fmt.Sprintf("error unmarshalling response, err: %v, body: '%s'", err, string(body)))
}
return &r, nil
}
return nil, fmt.Errorf("unexpected response '%d' with body '%s'", resp.StatusCode, body)
}
// get the status of the commandID
func (c client) Status(tx context.Context, commandID string) (interface{}, error) {
url, err := c.baseURL.Parse(path.Join("status", commandID))
if err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodGet, url.String(), nil)
if err != nil {
return nil, err
}
if c.userName != "" {
req.SetBasicAuth(c.userName, c.password)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == http.StatusOK {
result := &swagger.StatusResponse{}
err := json.Unmarshal(body, result)
if err != nil {
return nil, errors.New(fmt.Sprintf("error unmarshalling response, err: %v, body: '%s'", err, string(body)))
}
return result, nil
}
if resp.StatusCode == http.StatusNotFound {
return &swagger.ModelError{
ErrorCode: 404,
}, nil
}
return nil, fmt.Errorf("%s code %d with body %s", ErrUnexpected, resp.StatusCode, string(body))
}
|
package page
// 4096 bits
// 24 bits meta-data header
// slots and tuples
import (
"errors"
"fmt"
//"github.com/chaitya62/noobdb/type"
)
const TABLE_NAME_LIMT = 2048
const COLUMN_NAME_LIMT = 2048
const SLOT_OFFSET = 24
const SLOT_ID_SIZE = 4
const TUPLE_LOCATION_SIZE = 2
const SLOT_SIZE = 6
type SchemaPage struct {
PageImpl
}
func (sp *SchemaPage) Init() {
sp.SetFreeSpacePointer(4096)
}
func (sp *SchemaPage) shallowCopy(_page Page) {
sp._data = _page.GetData()
}
func (sp *SchemaPage) GetHeader() []byte {
// returning a slice returns the address space and not the copy
// will have to re-think later if this is causes any unexpect behaviour
return sp._data[:24]
}
// right now giving it only 2 bytes
// we have space for more
// but with 2 byte it means we can only have a page with maxsize 2^16 -> 65535
func (sp *SchemaPage) GetFreeSpacePointer() uint16 {
return (uint16(sp._data[4]) | uint16(sp._data[5])<<8)
}
func (sp *SchemaPage) GetNumberOfTuples() uint16 {
return (uint16(sp._data[6]) | uint16(sp._data[7])<<8)
}
func (sp *SchemaPage) UpdateNumberOfTuples(fsp uint16) error {
sp._data[6] = byte(fsp)
sp._data[7] = byte(fsp >> 8)
return nil
}
// probably should be private
func (sp *SchemaPage) SetFreeSpacePointer(fsp uint16) error {
sp._data[4] = byte(fsp)
sp._data[5] = byte(fsp >> 8)
return nil
}
func (sp *SchemaPage) GetSlotStart(i uint16) uint16 {
x := SLOT_OFFSET + (SLOT_ID_SIZE+TUPLE_LOCATION_SIZE)*i
return (uint16(sp._data[x+4]) | uint16(sp._data[x+5])<<8)
}
func (sp *SchemaPage) ReadTuple(i uint16) []byte {
//TODO: error handling
var end_at uint16
end_at = 4096
start_at := sp.GetSlotStart(i)
if i > 0 {
end_at = sp.GetSlotStart((i - 1))
}
return sp._data[start_at:end_at]
}
func (sp *SchemaPage) InsertTuple(tp Tuple) error {
tp_size := tp.GetSize()
fp := sp.GetFreeSpacePointer()
number_of_tps := sp.GetNumberOfTuples()
slot_ends := (SLOT_OFFSET + SLOT_SIZE*number_of_tps)
space_left := fp - slot_ends
if uint64(space_left) < tp_size {
//TODO: ADD CUSTOM STANDARD ERROR TYPES TO DATABASE
fmt.Println("PAGE is FULL")
return errors.New("Page is full")
}
// assuming all tuples fit in one page
//TODO: Implement handling for Tuple OVERFLOW
start_at := fp - uint16(tp_size)
//TODO: Error handling
// intert tupple
copy(sp._data[start_at:fp], tp.GetData())
sp._dirty = true
// insert slot
//TODO: SLOT STRUCT ?
var slot [(SLOT_ID_SIZE + TUPLE_LOCATION_SIZE)]byte
slot_id := number_of_tps + 1
slot[0] = byte(slot_id)
slot[1] = byte(slot_id >> 8)
slot[2] = byte(slot_id >> 16)
slot[3] = byte(slot_id >> 24)
slot[4] = byte(start_at)
slot[5] = byte(start_at >> 8)
// set slot
copy(sp._data[slot_ends:slot_ends+(SLOT_SIZE)], slot[:])
sp.SetFreeSpacePointer(start_at)
sp.UpdateNumberOfTuples(number_of_tps + 1)
return nil
// check if space is there
}
|
package github
import (
"context"
"encoding/csv"
"fmt"
"os"
"sort"
"strconv"
"strings"
"time"
"github.com/google/go-github/v28/github"
"golang.org/x/oauth2"
)
type issueData struct {
org, repo string
number int
opened, closed bool
comments int
isPR bool
}
func IssuesAndPRs(ctx context.Context, username string, since time.Time) (map[string]func(*csv.Writer) error, error) {
token := os.Getenv("GITHUB_TOKEN")
if token == "" {
return nil, fmt.Errorf("GITHUB_TOKEN environment variable is not configured")
}
ts := oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: token,
})
tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
stats := make(map[string]*issueData)
// Get all non-golang/go issues.
var current, total int
for i := 0; ; i++ {
result, _, err := client.Search.Issues(ctx, fmt.Sprintf("involves:%v updated:>=%v", username, since.Format("2006-01-02")), &github.SearchOptions{
ListOptions: github.ListOptions{
Page: i,
PerPage: 100,
},
})
if err != nil {
return nil, err
}
for _, issue := range result.Issues {
trimmed := strings.TrimPrefix(issue.GetRepositoryURL(), "https://api.github.com/repos/")
split := strings.SplitN(trimmed, "/", 2)
org, repo := split[0], split[1]
// golang/go issues are tracker via the golang package.
if org == "golang" && repo == "go" {
continue
}
stats[issue.GetHTMLURL()] = &issueData{
org: org,
repo: repo,
number: issue.GetNumber(),
// Only mark issues as opened if the user opened them since the specified date.
opened: issue.GetUser().GetLogin() == username && issue.GetCreatedAt().After(since),
isPR: issue.IsPullRequest(),
}
}
total = result.GetTotal()
current += len(result.Issues)
if current >= total {
break
}
}
for _, issue := range stats {
events, _, err := client.Issues.ListIssueEvents(ctx, issue.org, issue.repo, issue.number, nil)
if err != nil {
return nil, err
}
for _, e := range events {
if e.GetActor().GetLogin() != username {
continue
}
if e.GetCreatedAt().Before(since) {
continue
}
switch e.GetEvent() {
case "closed":
issue.closed = true
}
}
comments, _, err := client.Issues.ListComments(ctx, issue.org, issue.repo, issue.number, nil)
if err != nil {
return nil, err
}
for _, c := range comments {
if c.GetUser().GetLogin() != username {
continue
}
if c.GetCreatedAt().Before(since) {
continue
}
issue.comments++
}
}
sortedPRs := make([]string, 0, len(stats))
for url, data := range stats {
if !data.isPR {
continue
}
sortedPRs = append(sortedPRs, url)
}
// TODO(rstambler): Add per-repo totals.
return map[string]func(*csv.Writer) error{
"github-issues": func(writer *csv.Writer) error {
sorted := make([]string, 0, len(stats))
for url, data := range stats {
if data.isPR {
continue
}
sorted = append(sorted, url)
}
sort.Strings(sorted)
if err := writer.Write([]string{"Issue", "Opened", "Closed", "Number of Comments"}); err != nil {
return err
}
var opened, closed, comments int
for _, url := range sorted {
data := stats[url]
if data.opened {
opened++
}
if data.closed {
closed++
}
comments += data.comments
if err := writer.Write([]string{
url,
strconv.FormatBool(data.opened),
strconv.FormatBool(data.closed),
fmt.Sprintf("%v", data.comments),
}); err != nil {
return err
}
}
return writer.Write([]string{
fmt.Sprintf("%v", len(stats)),
fmt.Sprintf("%v", opened),
fmt.Sprintf("%v", closed),
fmt.Sprintf("%v", comments),
})
},
"github-prs-authored": func(writer *csv.Writer) error {
if err := writer.Write([]string{"Repo", "URL"}); err != nil {
return err
}
var total int
for _, url := range sortedPRs {
data := stats[url]
// Skip any CLs reviewed.
if !data.opened {
continue
}
total++
if err := writer.Write([]string{
fmt.Sprintf("%v/%v", data.org, data.repo),
url,
}); err != nil {
return err
}
}
return writer.Write([]string{
"Total",
fmt.Sprintf("%v", total),
})
},
"github-prs-reviewed": func(writer *csv.Writer) error {
if err := writer.Write([]string{"Repo", "URL", "Closed", "Number of comments"}); err != nil {
return err
}
var total, closed, comments int
for _, url := range sortedPRs {
data := stats[url]
// SKip any CLs authored.
if data.opened {
continue
}
if data.closed {
closed++
}
comments += data.comments
total++
if err := writer.Write([]string{
fmt.Sprintf("%v/%v", data.org, data.repo),
url,
strconv.FormatBool(data.closed),
fmt.Sprintf("%v", data.comments),
}); err != nil {
return err
}
}
return writer.Write([]string{
"Total",
fmt.Sprintf("%v", total),
fmt.Sprintf("%v", closed),
fmt.Sprintf("%v", comments),
})
},
}, nil
}
|
package nes
import (
"encoding/binary"
"errors"
"io"
"os"
)
const iNESFileMagic = 0x1a53454e
type iNESFileHeader struct {
Magic uint32 // iNES magic number
NumPRG byte // number of PRG-ROM banks (16KB each)
NumCHR byte // number of CHR-ROM banks (8KB each)
Control1 byte // control bits
Control2 byte // control bits
NumRAM byte // PRG-RAM size (x 8KB)
_ [7]byte // unused padding
}
// LoadNESFile reads an iNES file (.nes) and returns a Cartridge on success.
// http://wiki.nesdev.com/w/index.php/INES
// http://nesdev.com/NESDoc.pdf (page 28)
func LoadNESFile(path string) (*Cartridge, error) {
// open file
file, err := os.Open(path)
if err != nil {
return nil, err
}
defer file.Close()
// read file header
header := iNESFileHeader{}
if err := binary.Read(file, binary.LittleEndian, &header); err != nil {
return nil, err
}
// verify header magic number
if header.Magic != iNESFileMagic {
return nil, errors.New("invalid .nes file")
}
// mapper type
mapper1 := header.Control1 >> 4
mapper2 := header.Control2 >> 4
mapper := mapper1 | mapper2<<4
// mirroring type
mirror1 := header.Control1 & 1
mirror2 := (header.Control1 >> 3) & 1
mirror := mirror1 | mirror2<<1
// battery-backed RAM
battery := (header.Control1 >> 1) & 1
// read trainer if present (unused)
if header.Control1&4 == 4 {
trainer := make([]byte, 512)
if _, err := io.ReadFull(file, trainer); err != nil {
return nil, err
}
}
// read prg-rom bank(s)
prg := make([]byte, int(header.NumPRG)*16384)
if _, err := io.ReadFull(file, prg); err != nil {
return nil, err
}
// read chr-rom bank(s)
chr := make([]byte, int(header.NumCHR)*8192)
if _, err := io.ReadFull(file, chr); err != nil {
return nil, err
}
// provide chr-rom/ram if not in file
if header.NumCHR == 0 {
chr = make([]byte, 8192)
}
// success
return NewCartridge(prg, chr, mapper, mirror, battery), nil
}
|
package main
import (
"database/sql"
"fmt"
"librarymanager/users/common"
"librarymanager/users/controllers"
"librarymanager/users/domain"
"librarymanager/users/services"
"github.com/gin-contrib/cors"
"github.com/gin-gonic/gin"
_ "github.com/mattn/go-sqlite3"
)
func main() {
fmt.Println("Users process")
broker := common.NewBroker()
database, _ := sql.Open("sqlite3", "./data/tmp.db")
repository := domain.NewUsersRepository(database)
usersService := services.NewUsersService(repository, broker)
repository.Initialize()
usersService.Subscriptions()
router := gin.Default()
config := cors.DefaultConfig()
config.AllowAllOrigins = true
config.AddAllowHeaders("Authorization", "Access-Control-Allow-Headers")
config.AddExposeHeaders("Authorization")
router.Use(cors.New(config))
router.GET("/api/usersping", func(c *gin.Context) {
c.JSON(200, gin.H{
"message": "pong",
})
})
usersController := controllers.NewUsersController(usersService)
apiRoutes := controllers.MapUrls(router, usersController)
apiRoutes.Use(cors.New(config))
router.Run(":3000")
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"strings"
"task/defs"
"task/utils"
"time"
)
var (
help bool
env string //"env6"
podBaseDir string // "/mnt/paas/kubernetes/kubelet/pods/"
backupDestBaseDir string // "/tmp/pods/"
restApiUrl string // "http://192.168.250.22:32598"
expired int // 1
logDestDir string // "/tmp/backup-task-log"
)
func init() {
flag.BoolVar(&help, "help", false, "backup help usage")
flag.StringVar(&env, "env", "env6","pod's env")
flag.StringVar(&podBaseDir, "src", "/mnt/paas/kubernetes/kubelet/pods/","pod's home directory")
flag.StringVar(&backupDestBaseDir, "dst", "/tmp/pods/","backup destination directory")
//flag.StringVar(&restApiUrl, "url", "http://192.32.14.181:30954/","url which query pod's metadata")
flag.StringVar(&restApiUrl, "url", "http://192.168.250.22:32598/","url which query pod's metadata")
flag.IntVar(&expired, "expired", 7,"expired time, day")
flag.StringVar(&logDestDir, "log", "/tmp/backup-task-log","log file directory")
flag.Usage = usage
}
type archiveLog struct {
env string
podBaseDir string
backupDestDir string
checkExpired int
restApiUrl string
logDestDir string
}
func (a *archiveLog) backup() {
//检查指定的log目录是否存在,如果不存在,就创建出这个目录
logDirExists, err := utils.CheckExists(a.logDestDir)
if err != nil {
log.Fatal(err)
}
if !logDirExists {
err := os.MkdirAll(a.logDestDir, os.ModePerm)
if err != nil {
log.Fatal(err)
}
}
dayFormat := time.Now().Format("2006-01-02")
logFileName := a.logDestDir + "backup-" + dayFormat + ".log"
exists, err := utils.CheckExists(logFileName)
if err != nil {
log.Fatal(err)
}
if !exists {
file, err := os.Create(logFileName)
defer func() {
if err != nil {
file.Close()
} else {
err = file.Close()
}
}()
if err != nil {
panic(err)
}
}
f, err := os.OpenFile(logFileName, os.O_RDWR | os.O_CREATE | os.O_APPEND, 0666)
defer f.Close()
log.SetOutput(f)
//检查/mnt/paas/kubernetes/kubelet/pods/目录下所有满足条件的文件
//根据满足条件的文件路径,解析出podId和这个podId对应的需要备份的所有文件列表
//路径:/mnt/paas/kubernetes/kubelet/pods/
//路径+:77e00ad0-7033-11ea-bfe7-000c2999f0e6/volumes/kubernetes.io~empty-dir/app-logs
//文件名:icore-service-uaa-7484d8d4d8-5zr7f.2020-03-12.0.log
//根据env和podId找restApiUrl获取namespace,deploy,rs,pod名称
//在备份目标路径上创建目录:/env/namespace/deploy/rs/pod/
//将备份文件拷贝到目标路径的新建目录上
//删除本地的文件
//如果有错,写报错日志到文件中
//定义一个备份结果数组,用于在log文件中打印信息
var backupResult []string
_ = filepath.Walk(a.podBaseDir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
fileName := info.Name()
//如果info是文件并且path包含有"kubernetes.io~empty-dir"
//并且info的名字里包含"-20"这样的关键字,就说明这是一个需要做进一步检查是否过期即满足备份的文件
if !info.IsDir() &&
strings.Contains(path,defs.EmptyDirName) &&
strings.Contains(fileName,".20") &&
utils.CheckEndWithDotLog(path) {
//检查文件是否需要备份
if utils.IsNeedBackup(fileName,a.checkExpired) {
//获取该文件的备份路径
//根据path获取podID
podId := utils.FetchPodIdByPath(a.podBaseDir, path)
//根据env,podId,restApiUrl,backupDestBaseDir获取备份目标路径
destPath, err := utils.FetchDestPathByEnvAndPodId(a.env, podId, a.restApiUrl, a.backupDestDir)
if err != nil {
log.Printf("通过env: %v和pod_id: %v调用api: %v 获取信息失败:%v\n",a.env,podId,a.restApiUrl+defs.UrlSuffix,err)
return err
}
//检查destPath是否存在,如果不存在就创建
exists, err := utils.CheckExists(destPath)
if err != nil {
log.Printf("检查备份目标路径是否存在时,报错:%v\n",err)
return err
}
if !exists {
err := os.MkdirAll(destPath, os.ModePerm)
if err != nil {
log.Printf("创建备份文件夹: %v 失败:%v\n",destPath,err)
return err
}
}
//备份文件
//err = os.Rename(path, utils.PathWrapper(destPath)+fileName)
//使用copy方法备份文件
_, err = utils.CopyFile(path, utils.PathWrapper(destPath)+fileName)
if err != nil {
log.Printf("执行备份报错: %v\n",err)
return err
}
//copy到nfs完成后,删除本地的文件
err = os.Remove(path)
if err != nil {
log.Printf("删除节点的过期文件报错: %v\n",err)
return err
}
backupResult = append(backupResult,path)
}
}
return nil
})
//输出备份结果信息到log文件中
if len(backupResult) == 0 {
log.Println("本次任务,没有找到符合备份条件的文件!")
} else {
log.Printf("备份了%v个符合条件的文件:\n",len(backupResult))
for _,v := range backupResult {
log.Println(v)
}
}
}
func newArchiveLog(env string, podDir string, backupDestDir string, expired int,restApiUrl string,logDir string) *archiveLog {
return &archiveLog{
env:env,
podBaseDir:podDir,
backupDestDir:backupDestDir,
checkExpired:expired,
restApiUrl:restApiUrl,
logDestDir:logDir,
}
}
func main() {
flag.Parse()
if help {
flag.Usage()
} else {
podBaseDir = utils.PathWrapper(podBaseDir)
backupDestBaseDir = utils.PathWrapper(backupDestBaseDir)
logDestDir = utils.PathWrapper(logDestDir)
restApiUrl = utils.PathWrapper(restApiUrl)
a := newArchiveLog(env,podBaseDir,backupDestBaseDir,expired,restApiUrl,logDestDir)
a.backup()
}
}
func usage() {
_, _ = fmt.Fprintf(os.Stderr, `backup version: 1.0
Options:
`)
flag.PrintDefaults()
}
|
// Copyright 2019 Radiation Detection and Imaging (RDI), LLC
// Use of this source code is governed by the BSD 3-clause
// license that can be found in the LICENSE file.
package data
import (
"math"
"github.com/rditech/rdi-live/model/rdi/currentmode"
"github.com/proio-org/go-proio"
)
type Pedestals struct {
Alpha float64
CovFrac float64
values [][]float64
}
func (p *Pedestals) Subtract(input <-chan *proio.Event, output chan<- *proio.Event) {
if p.Alpha == 0 {
p.Alpha = 0.0001
}
inv_alpha := 1 - p.Alpha
if p.CovFrac == 0 {
p.CovFrac = 0.1
}
covFrac2 := p.CovFrac * p.CovFrac
for event := range input {
rawFrameIds := event.TaggedEntries("Frame")
mappedFrameIds := event.TaggedEntries("Mapped")
if len(mappedFrameIds) != len(rawFrameIds) {
continue
}
for i, entryId := range mappedFrameIds {
frame, ok := event.GetEntry(entryId).(*currentmode.Frame)
if !ok {
continue
}
rawFrame, ok := event.GetEntry(rawFrameIds[i]).(*currentmode.Frame)
// if the axis offsets already exist in the stream, assume that
// they were taken care of in the detector mapping, and do nothing
if rawFrame.AxisOffsets != nil {
continue
}
nSamples := len(frame.Sample)
if nSamples == 0 {
continue
}
nAxes := len(frame.Sample[0].Axis)
thres := float32(math.Pow(covFrac2, float64(nAxes*(nAxes-1)/2)))
for sampleNum, sample := range frame.Sample {
for i, axis := range sample.Axis {
axis.Sum = 0
if len(p.values) <= i {
p.values = append(p.values, make([]float64, 0))
}
for j, val := range axis.FloatChannel {
if len(p.values[i]) <= j {
p.values[i] = append(p.values[i], 0)
}
if frame.Correlation < thres {
p.values[i][j] *= inv_alpha
p.values[i][j] += p.Alpha * float64(val)
}
axis.FloatChannel[j] -= float32(p.values[i][j])
axis.Sum += axis.FloatChannel[j]
}
}
if sampleNum == 0 {
rawFrame.AxisOffsets = make([]*currentmode.AxisSample, len(sample.Axis))
for i := 0; i < len(rawFrame.AxisOffsets); i++ {
axis := ¤tmode.AxisSample{}
rawFrame.AxisOffsets[i] = axis
axis.FloatChannel = make([]float32, len(sample.Axis[i].FloatChannel))
for j := range axis.FloatChannel {
axis.FloatChannel[j] = float32(p.values[i][j])
}
}
}
}
}
output <- event
}
}
|
package fateRPGtest
import (
"testing"
"github.com/faterpg"
)
func TestNewPlayer(t *testing.T) {
player := faterpg.NewGM()
if player == nil {
t.Error("NewPlayer retrun nil")
}
}
func TestPlayerAttr(t *testing.T) {
player := faterpg.NewPlayer()
player.Name = "Test name"
pc := faterpg.NewPC()
player.PC = pc
}
func TestNewNamedPlayer(t *testing.T) {
name := "Test name"
pc := faterpg.NewNamedPlayer(name)
if pc.Name != name {
t.Errorf("NewNamedPlayer error Wrong name\n %s != %s\n", pc.Name, name)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.