text stringlengths 11 4.05M |
|---|
package mychannel
import (
"fmt"
"testing"
"time"
)
func pass(left, right chan int){
left <- 1 + <- right
}
func TestMyChannel(t *testing.T) {
const n = 50
leftmost := make(chan int)
right := leftmost
left := leftmost
for i := 0; i< n; i++ {
right = make(chan int)
// the chain is constructed from the end
go pass(left, right) // the first goroutine holds (leftmost, new chan)
left = right // the second and following goroutines hold (last right chan, new chan)
}
go func(c chan int){ c <- 1}(right)
fmt.Println("sum:", <- leftmost)
}
func TestForRange(t *testing.T) {
go func() {
time.Sleep(1 * time.Hour)
}()
c := make(chan int)
go func() {
for i := 0; i < 10; i = i + 1 {
c <- i
time.Sleep(time.Millisecond*500)
}
close(c)
}()
for i := range c {
fmt.Println(i)
}
fmt.Println("Finished")
}
func sum(s []int, c chan int) {
sum := 0
for _, v := range s {
sum += v
}
c <- sum // send sum to c
}
func TestSum(t *testing.T) {
s := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(s[:len(s)/2], c)
go sum(s[len(s)/2:], c)
x, y := <-c, <-c // receive from c
fmt.Println(x, y, x+y)
}
func fibonacci(c, quit chan int) {
x, y := 0, 1
for {
select {
case c <- x:
x, y = y, x+y
case <-quit:
fmt.Println("quit")
return
}
}
}
func TestSelect(t *testing.T) {
c := make(chan int)
quit := make(chan int)
go func() {
for i := 0; i < 10; i++ {
fmt.Println(<-c)
}
quit <- 0
}()
fibonacci(c, quit)
}
func TestTimeout(t *testing.T) {
c1 := make(chan string, 1)
go func() {
time.Sleep(time.Second * 2)
c1 <- "result 1"
}()
select {
case res := <-c1:
fmt.Println(res)
case <-time.After(time.Second * 1):
fmt.Println("timeout 1")
}
}
func TestTimer(t *testing.T) {
timer1 := time.NewTimer(time.Second * 2)
<-timer1.C
fmt.Println("Timer 1 expired")
timer2 := time.NewTimer(time.Second)
go func() {
<-timer2.C
fmt.Println("Timer 2 expired")
}()
stop2 := timer2.Stop()
if stop2 {
fmt.Println("Timer 2 stopped")
}
}
func TestTicker(t *testing.T) {
ticker := time.NewTicker(time.Millisecond * 500)
quit := make(chan int)
go func() {
for t := range ticker.C {
fmt.Println("Tick at", t)
}
quit <- 0
}()
<- quit
}
func TestClose(t *testing.T) {
go func() {
time.Sleep(time.Hour)
}()
c := make(chan int, 10)
c <- 1
c <- 2
close(c)
//c <- 3 //panic send on closed channel:
}
func TestClose2(t *testing.T) {
c := make(chan int, 10)
c <- 1
c <- 2
close(c)
fmt.Println(<-c) //1
fmt.Println(<-c) //2
fmt.Println(<-c) //0
fmt.Println(<-c) //0
}
func TestClose3(t *testing.T) {
c := make(chan int, 10)
c <- 1
c <- 2
close(c)
for i := range c {
fmt.Println(i) //1,2
}
}
func worker(done chan bool) {
time.Sleep(time.Second)
// 通知任务已完成
done <- true
}
func TestSync(t *testing.T) {
done := make(chan bool, 1)
go worker(done)
// 等待任务完成
<-done
}
func fn1(errorC <-chan error) {
}
func fn2(errorC <-chan error, done chan bool) {
time.Sleep(time.Second)
done <- true
}
func TestError(t *testing.T) {
errorC := make(chan error)
done := make(chan bool)
go fn1(errorC)
go fn2(errorC, done)
<-done
}
func TestMemLost(t *testing.T) {
doWork := func(strings <-chan string) <-chan interface {} {
completed := make(chan interface{})
go func() {
defer fmt.Println("doWork exited.")
defer close(completed)
for s := range strings{
//做一些有趣的事
fmt.Println(s)
}
fmt.Println(11)
}()
return completed
}
for i:=0; i<1000000; i++ {
doWork(nil)
}
//也许这里有其他操作需要执行
} |
package main
import (
"fmt"
. "leetcode"
)
func main() {
fmt.Println(deleteNode(NewListNode(4, 5, 1, 9), 4))
fmt.Println(deleteNode(NewListNode(4, 5, 1, 9), 5))
fmt.Println(deleteNode(NewListNode(4, 5, 1, 9), 1))
// 4 -> 5 -> 1 -> 9
}
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
func deleteNode(head *ListNode, val int) *ListNode {
if head == nil {
return head
}
if head.Val == val {
return head.Next
}
head.Next = deleteNode(head.Next, val)
return head
}
|
// Package gitlab - user
package gitlab
import (
"context"
"encoding/json"
"fmt"
"sync"
)
// User entity
type User struct {
ID int `json:"id"`
Name string `json:"name"`
UserName string `json:"username"`
PublicEmail string `json:"public_email"`
}
func getUsersByIDs(parentCtx context.Context, c *client, ids []int) ([]User, error) {
var wg sync.WaitGroup
wg.Add(len(ids))
wgChan := make(chan struct{})
go func() {
wg.Wait()
close(wgChan)
}()
semaphore := make(chan struct{}, c.concurrency)
defer close(semaphore)
errChan := make(chan error, cap(semaphore))
defer close(errChan)
ctx, cancelFunc := context.WithCancel(parentCtx)
defer cancelFunc()
users := make([]User, 0, len(ids))
for _, id := range ids {
semaphore <- struct{}{}
go func(id int) {
defer func() {
<-semaphore
wg.Done()
}()
if user, err := c.GetUserByID(ctx, id); err != nil {
errChan <- err
} else {
users = append(users, user)
}
}(id)
}
select {
case err := <-errChan:
cancelFunc()
<-wgChan
return nil, fmt.Errorf("can't get users from gitlab: %w", err)
case <-wgChan:
return users, nil
}
}
func getUserByID(ctx context.Context, c *client, userID int) (User, error) {
resp, err := c.get(ctx, fmt.Sprintf("/users/%d", userID))
if err != nil {
return User{}, err
}
var user User
if err = json.Unmarshal(resp, &user); err != nil {
return User{}, fmt.Errorf("can't unmarshal user data: %w", err)
}
return user, nil
}
|
package mosquito
import (
"net/http"
)
type Request struct {
*http.Request
Params map[string]string
}
|
package historian
import (
"errors"
"github.com/fuserobotics/historian/dbproto"
"github.com/fuserobotics/reporter/remote"
"github.com/fuserobotics/statestream"
r "gopkg.in/dancannon/gorethink.v2"
)
const streamTableName string = "streams"
// Wrapper for response from RethinkDB with stream change
type streamChange struct {
NewValue *dbproto.Stream `gorethink:"new_val,omitempty"`
OldValue *dbproto.Stream `gorethink:"old_val,omitempty"`
State string `gorethink:"state,omitempty"`
}
type streamEntryChange struct {
NewValue *stream.StreamEntry `gorethink:"new_val,omitempty"`
OldValue *stream.StreamEntry `gorethink:"old_val,omitempty"`
State string `gorethink:"state,omitempty"`
}
type Historian struct {
rctx *r.Session
dispose chan bool
StreamsTable r.Term
// Map of loaded streams
Streams map[string]*Stream
// Map of cached remote stream configs
// Delete to invalidate one
RemoteStreamConfigs map[string]*remote.RemoteStreamConfig
// All known streams
KnownStreams map[string]*dbproto.Stream
}
func NewHistorian(rctx *r.Session) *Historian {
res := &Historian{
rctx: rctx,
dispose: make(chan bool, 1),
Streams: make(map[string]*Stream),
RemoteStreamConfigs: make(map[string]*remote.RemoteStreamConfig),
KnownStreams: make(map[string]*dbproto.Stream),
StreamsTable: r.Table(streamTableName),
}
return res
}
// Returns pre-loaded stream or gets from DB
func (h *Historian) GetStream(id string) (str *Stream, ferr error) {
if str, ok := h.Streams[id]; ok {
return str, nil
}
data, ok := h.KnownStreams[id]
if !ok {
return nil, errors.New("Stream not known.")
}
str, err := h.NewStream(data)
if err != nil {
return nil, err
}
// Note: be sure to call Dispose() when deleting.
h.Streams[id] = str
return str, nil
}
func (h *Historian) GetDeviceStreams(hostname string) ([]*dbproto.Stream, error) {
res := []*dbproto.Stream{}
for _, stream := range h.KnownStreams {
if stream.DeviceHostname != hostname {
continue
}
res = append(res, stream)
}
return res, nil
}
func (h *Historian) BuildRemoteStreamConfig(hostname string) (*remote.RemoteStreamConfig, error) {
if resa, ok := h.RemoteStreamConfigs[hostname]; ok {
return resa, nil
}
streams, err := h.GetDeviceStreams(hostname)
if err != nil {
return nil, err
}
res := &remote.RemoteStreamConfig{}
for _, stream := range streams {
rstream := &remote.RemoteStreamConfig_Stream{
ComponentId: stream.ComponentName,
StateId: stream.StateName,
}
res.Streams = append(res.Streams, rstream)
}
res.FillCrc32()
h.RemoteStreamConfigs[hostname] = res
return res, nil
}
|
package memrepo
import (
"sort"
"github.com/scjalliance/drivestream"
"github.com/scjalliance/drivestream/resource"
)
var _ drivestream.DriveMap = (*Drives)(nil)
// Drives accesses a map of drives in an in-memory repository.
type Drives struct {
repo *Repository
}
// List returns the list of drives contained within the repository.
func (ref Drives) List() (ids []resource.ID, err error) {
for id := range ref.repo.drives {
ids = append(ids, id)
}
sort.Slice(ids, func(i, j int) bool { return ids[i] < ids[j] })
return ids, nil
}
// Ref returns a drive reference.
func (ref Drives) Ref(driveID resource.ID) drivestream.DriveReference {
return Drive{
repo: ref.repo,
drive: driveID,
}
}
|
package pair
import (
"github.com/hnnyzyf/go-stl/container/value"
)
type Pair interface {
value.Value
GetKey() interface{}
GetValue() interface{}
}
//The Pair could be string,interge,float,pointers and so on,we implement some Pair we always use
//String Pair
type StringPair struct {
Key string
Val interface{}
}
func String(Key string, Val interface{}) *StringPair {
return &StringPair{Key, Val}
}
func (e *StringPair) Less(Val value.Value) bool {
if v, ok := Val.(*StringPair); ok {
return e.Key < v.Key
} else {
panic("StringPair shoud be compared with StringPair")
}
}
func (e *StringPair) More(Val value.Value) bool {
if v, ok := Val.(*StringPair); ok {
return e.Key > v.Key
} else {
panic("StringPair shoud be compared with StringPair")
}
}
func (e *StringPair) Equal(Val value.Value) bool {
if v, ok := Val.(*StringPair); ok {
return e.Key == v.Key
} else {
panic("StringPair shoud be compared with StringPair")
}
}
func (e *StringPair) GetKey() interface{} {
return e.Key
}
func (e *StringPair) GetValue() interface{} {
return e.Val
}
//Uint64 Pair
type Uint64Pair struct {
Key uint64
Val interface{}
}
func Uint64(Key uint64, Val interface{}) *Uint64Pair {
return &Uint64Pair{Key, Val}
}
func (e *Uint64Pair) Less(Val value.Value) bool {
if v, ok := Val.(*Uint64Pair); ok {
return e.Key < v.Key
} else {
panic("Uint64Pair shoud be compared with Uint64Pair")
}
}
func (e *Uint64Pair) More(Val value.Value) bool {
if v, ok := Val.(*Uint64Pair); ok {
return e.Key > v.Key
} else {
panic("Uint64Pair shoud be compared with Uint64Pair")
}
}
func (e *Uint64Pair) Equal(Val value.Value) bool {
if v, ok := Val.(*Uint64Pair); ok {
return e.Key == v.Key
} else {
panic("Uint64Pair shoud be compared with Uint64Pair")
}
}
func (e *Uint64Pair) GetKey() interface{} {
return e.Key
}
func (e *Uint64Pair) GetValue() interface{} {
return e.Val
}
//Uint64 Pair
type IntPair struct {
Key int
Val interface{}
}
func Int(Key int, Val interface{}) *IntPair {
return &IntPair{Key, Val}
}
func (e *IntPair) Less(Val value.Value) bool {
if v, ok := Val.(*IntPair); ok {
return e.Key < v.Key
} else {
panic("IntPair shoud be compared with IntPair")
}
}
func (e *IntPair) More(Val value.Value) bool {
if v, ok := Val.(*IntPair); ok {
return e.Key > v.Key
} else {
panic("IntPair shoud be compared with IntPair")
}
}
func (e *IntPair) Equal(Val value.Value) bool {
if v, ok := Val.(*IntPair); ok {
return e.Key == v.Key
} else {
panic("IntPair shoud be compared with IntPair")
}
}
func (e *IntPair) GetKey() interface{} {
return e.Key
}
func (e *IntPair) GetValue() interface{} {
return e.Val
}
//float64 Pair
type Floate64Pair struct {
Key float64
Val interface{}
}
func Float64(Key float64, Val interface{}) *Floate64Pair {
return &Floate64Pair{Key, Val}
}
func (e *Floate64Pair) Less(Val value.Value) bool {
if v, ok := Val.(*Floate64Pair); ok {
return e.Key < v.Key
} else {
panic("Floate64Pair shoud be compared with Floate64Pair")
}
}
func (e *Floate64Pair) More(Val value.Value) bool {
if v, ok := Val.(*Floate64Pair); ok {
return e.Key > v.Key
} else {
panic("Floate64Pair shoud be compared with Floate64Pair")
}
}
func (e *Floate64Pair) Equal(Val value.Value) bool {
if v, ok := Val.(*Floate64Pair); ok {
return e.Key == v.Key
} else {
panic("Floate64Pair shoud be compared with Floate64Pair")
}
}
func (e *Floate64Pair) GetKey() interface{} {
return e.Key
}
func (e *Floate64Pair) GetValue() interface{} {
return e.Val
}
//rune Pair
type RunePair struct {
Key rune
Val interface{}
}
func Rune(Key rune, Val interface{}) *RunePair {
return &RunePair{Key, Val}
}
func (e *RunePair) Less(Val value.Value) bool {
if v, ok := Val.(*RunePair); ok {
return e.Key < v.Key
} else {
panic("RunePair shoud be compared with RunePair")
}
}
func (e *RunePair) More(Val value.Value) bool {
if v, ok := Val.(*RunePair); ok {
return e.Key > v.Key
} else {
panic("RunePair shoud be compared with RunePair")
}
}
func (e *RunePair) Equal(Val value.Value) bool {
if v, ok := Val.(*RunePair); ok {
return e.Key == v.Key
} else {
panic("RunePair shoud be compared with RunePair")
}
}
func (e *RunePair) GetKey() interface{} {
return e.Key
}
func (e *RunePair) GetValue() interface{} {
return e.Val
}
|
package main
import "fmt"
const idx_max = 500
type barang_mentah struct {
nama string
berat float64
harga int
barang_hasil string
}
type barang_jadi struct {
nama string
berat float64
harga int
asal_barang string
}
func main() {
var array_mentah [idx_max]barang_mentah
var array_jadi [idx_max]barang_jadi
var input_user int
array_mentah[1].nama = "bawang"
array_jadi[1].nama = "nasi goreng"
fmt.Scan(&input_user)
for input_user != 0000 {
menu()
}
}
func menu() {
var input_user int
fmt.Println("1. Tampilkan Barang")
fmt.Println("2. Cari Barang")
fmt.Println("3. Edit Barang")
fmt.Println("4. Hapus Barang")
if input_user == 1 {
tampil_barang(&barang_jadi)
}
}
func tampil_barang(tab *barang_mentah) {
var i int
var input_user int
for i <= idx_max {
fmt.Println(i, ". ", *tab[i])
if input_user == 1 {
sorting_mentah(&barang_mentah)
} else if input_user == 2 {
sorting_jadi(&barang_jadi)
}
}
}
func sorting_mentah(tab *barang_mentah) {
for i := 0; i < len(*tab); i++ {
min_index := 1
for j := i + 1; j < len(*tab); j++ {
if (*tab)[min_index] > (*tab)[j] {
min_index = j
}
}
tmp := (*tab)[i]
(*tab)[i] = (*tab)[min_index]
(*tab)[min_index] = tmp
}
}
func sorting_jadi(tab *array_mentah) {
var min_index int
var j int
for i := 0; i < len(*tab); i++ {
min_index = 1
for j := i + 1; j < len(*tab); j++ {
if (*tab)[min_index] > array_mentah[j] {
min_index = j
}
}
tmp := (*tab)[j]
*tab = *tab[min_index]
(*tab)[min_index] = tmp
}
}
|
// 24. Create the MT19937 stream cipher and break it
package main
import (
"bytes"
"crypto/cipher"
"errors"
"fmt"
"os"
"time"
)
const (
arraySize = 624
offset = 397
multiplier = 1812433253
upperMask = 0x80000000
lowerMask = 0x7fffffff
coefficient = 0x9908b0df
temperMask1 = 0x9d2c5680
temperMask2 = 0xefc60000
)
func main() {
seed := uint16(time.Now().Unix() & 0xffff)
stream := NewMTCipher(uint32(seed))
plaintext := bytes.Repeat([]byte{'a'}, 14)
ciphertext := encrypt(stream, plaintext)
n, err := breakMTCipher(ciphertext, plaintext)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return
}
if n == seed {
fmt.Println("success: recovered 16-bit seed")
}
if isRecent(passwordToken()) {
fmt.Println("token generated from recent timestamp")
}
}
// encrypt returns an encrypted buffer prefixed with 5-10 random bytes.
func encrypt(stream cipher.Stream, buf []byte) []byte {
res := append(MTBytes(int(MTInRange(5, 10))), buf...)
stream.XORKeyStream(res, res)
return res
}
// breakMTCipher returns the 16-bit seed for an MT19937 stream cipher.
func breakMTCipher(ciphertext, plaintext []byte) (uint16, error) {
if len(ciphertext) < len(plaintext) {
return 0, errors.New("breakMTCipher: invalid ciphertext")
}
// Encrypt the length of the ciphertext, but ignore the prefix.
tmp := make([]byte, len(ciphertext))
n := len(ciphertext) - len(plaintext)
for i := 0; i < 65536; i++ {
stream := NewMTCipher(uint32(i))
stream.XORKeyStream(tmp[:n], tmp[:n])
stream.XORKeyStream(tmp[n:], plaintext)
if bytes.Equal(tmp[n:], ciphertext[n:]) {
return uint16(i), nil
}
}
return 0, errors.New("breakMTCipher: nothing found")
}
// isRecent returns true if the password token was generated from a recent timestamp.
func isRecent(buf []byte) bool {
n := uint32(time.Now().Unix())
tmp := make([]byte, len(buf))
// Check back at most 24 hours.
for i := 0; i < 24*60*60; i++ {
stream := NewMTCipher(n - uint32(i))
stream.XORKeyStream(tmp, tmp)
if bytes.Equal(buf, tmp) {
return true
}
clear(tmp)
}
return false
}
// passwordToken returns a 128-bit password reset token using the current time.
func passwordToken() []byte {
return MTBytes(16)
}
// MT represents an MT19937 PRNG.
type MT struct {
state [arraySize]uint32
pos int
}
// NewMT initializes and returns a new PRNG.
func NewMT(seed uint32) *MT {
var mt MT
mt.state[0] = seed
for i := 1; i < len(mt.state); i++ {
mt.state[i] = multiplier*
(mt.state[i-1]^(mt.state[i-1]>>30)) +
uint32(i)
}
mt.twist()
return &mt
}
// Uint32 returns a pseudo-random unsigned 32-bit integer.
func (mt *MT) Uint32() uint32 {
n := temper(mt.state[mt.pos])
mt.pos++
if mt.pos == len(mt.state) {
mt.twist()
mt.pos = 0
}
return n
}
// Uint32n returns a pseudo-random unsigned 32-bit integer in [0, n).
func (mt *MT) Uint32n(n uint32) uint32 {
if n == 0 {
panic("Uint32n: invalid range")
}
return uint32(float64(mt.Uint32()) *
float64(n-1) / float64(^uint32(0)))
}
// twist scrambles the state array.
func (mt *MT) twist() {
for i := range mt.state {
n := (mt.state[i] & upperMask) | (mt.state[(i+1)%len(mt.state)] & lowerMask)
mt.state[i] = mt.state[(i+offset)%len(mt.state)] ^ (n >> 1)
if n&1 == 1 {
mt.state[i] ^= coefficient
}
}
}
// temper applies the tempering transformation.
func temper(n uint32) uint32 {
n ^= n >> 11
n ^= (n << 7) & temperMask1
n ^= (n << 15) & temperMask2
n ^= n >> 18
return n
}
// mtCipher represents an MT19937 stream cipher.
type mtCipher struct {
*MT
}
// NewMTCipher creates a new MT19937 cipher.
func NewMTCipher(seed uint32) cipher.Stream {
return mtCipher{NewMT(seed)}
}
// XORKeyStream encrypts a buffer with MT19937.
func (x mtCipher) XORKeyStream(dst, src []byte) {
// Panic if dst is smaller than src.
for i := range src {
dst[i] = src[i] ^ byte(x.Uint32()&0xff)
}
}
// MTBytes returns a pseudo-random buffer of the desired length.
func MTBytes(n int) []byte {
buf := make([]byte, n)
stream := NewMTCipher(uint32(time.Now().Unix()))
stream.XORKeyStream(buf, buf)
return buf
}
// MTInRange returns a pseudo-random unsigned 32-bit integer in [lo, hi].
func MTInRange(lo, hi uint32) uint32 {
if lo > hi {
panic("MTInRange: invalid range")
}
mt := NewMT(uint32(time.Now().Unix()))
return lo + mt.Uint32n(hi-lo+1)
}
// clear overwrites a buffer with zeroes.
func clear(buf []byte) {
// The compiler should optimize this loop.
for i := range buf {
buf[i] = 0
}
}
|
package pojo
import (
pojo2 "tesou.io/platform/brush-parent/brush-api/module/analy/pojo"
)
/**
发布记录
*/
type SuggStub struct {
pojo2.AnalyResult `xorm:"extends"`
}
|
package framework
import (
"time"
"github.com/gin-gonic/gin"
"github.com/appleboy/gin-jwt"
"strings"
userService "cms/service/user"
)
func getAuthMiddleware() *jwt.GinJWTMiddleware {
authMiddleware := &jwt.GinJWTMiddleware{
Realm: "UserRealm",
Key: []byte("hjTSdjskiTOWJWRsjfskfjlkowqj8j23LQj"),
Timeout: time.Hour,
MaxRefresh: time.Hour,
Authenticator: func(username string, password string, c *gin.Context) (string, bool) {
user := userService.GetUserByName(username)
if strings.Compare(password, user.Password) > -1 {
return username, true
}
return username, false
},
Authorizator: func(username string, c *gin.Context) bool {
return true
},
Unauthorized: func(c *gin.Context, code int, message string) {
c.JSON(code, message)
},
TokenLookup: "header:Authorization",
// TokenHeadName is a string in the header. Default value is "Bearer"
TokenHeadName: "Bearer",
// TimeFunc provides the current time. You can override it to use another time value. This is useful for testing or if your server uses a different time zone than your tokens.
TimeFunc: time.Now,
}
return authMiddleware
}
|
package libraries
import (
"bufio"
"encoding/json"
"log"
"net/http"
"os"
"strings"
)
/*
Send the Vault Token and retrieve the secret from the specified path
Data can be pulled from data.file
*/
func GetSecret(x_vault_token string) map[string]interface{}{
path := GetData("secretpath")
env := GetConfig("environment")
url := GetConfig("url_"+env)
client := &http.Client{}
req, _ := http.NewRequest("GET", url+path, nil)
req.Header.Set("X-Vault-Token", x_vault_token)
resp, err := client.Do(req)
if err != nil {
panic(err)
}
var res map[string]interface{}
defer resp.Body.Close()
json.NewDecoder(resp.Body).Decode(&res)
client.CloseIdleConnections()
return res
}
func GetData(key string) string{
var value []string
pwd, _ := os.Getwd()
file, err := os.Open(pwd+"/data.file")
if err != nil {
log.Fatalf("failed opening file: %s", err)
}
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
var txtlines []string
for scanner.Scan() {
txtlines = append(txtlines, scanner.Text())
}
file.Close()
for _, eachline := range txtlines {
if strings.Contains(eachline, key){
value = append(strings.Split(eachline, "="))
break
}
}
return value[1]
}
func GetHealth(x_vault_token string) int {
path := GetData("health_check_path")
env := GetConfig("environment")
url := GetConfig("url_"+env)
client := &http.Client{}
req, _ := http.NewRequest("GET", url+path, nil)
req.Header.Set("X-Vault-Token", x_vault_token)
resp, err := client.Do(req)
if err != nil {
panic(err)
}
defer resp.Body.Close()
client.CloseIdleConnections()
return resp.StatusCode
} |
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/humio/humio-operator/pkg/kubernetes"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/humio/humio-operator/pkg/helpers"
humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
corev1 "k8s.io/api/core/v1"
)
const (
Image = "humio/humio-core:1.36.1"
HelperImage = "humio/humio-operator-helper:0.5.0"
targetReplicationFactor = 2
storagePartitionsCount = 24
digestPartitionsCount = 24
nodeCount = 3
HumioPort = 8080
elasticPort = 9200
idpCertificateFilename = "idp-certificate.pem"
ExtraKafkaPropertiesFilename = "extra-kafka-properties.properties"
ViewGroupPermissionsFilename = "view-group-permissions.json"
nodeUUIDPrefix = "humio_"
HumioContainerName = "humio"
AuthContainerName = "auth"
InitContainerName = "init"
// cluster-wide resources:
initClusterRoleSuffix = "init"
initClusterRoleBindingSuffix = "init"
// namespaced resources:
HumioServiceAccountNameSuffix = "humio"
initServiceAccountNameSuffix = "init"
initServiceAccountSecretNameIdentifier = "init"
authServiceAccountNameSuffix = "auth"
authServiceAccountSecretNameIdentifier = "auth"
authRoleSuffix = "auth"
authRoleBindingSuffix = "auth"
extraKafkaConfigsConfigMapNameSuffix = "extra-kafka-configs"
viewGroupPermissionsConfigMapNameSuffix = "view-group-permissions"
idpCertificateSecretNameSuffix = "idp-certificate"
)
type HumioNodePool struct {
clusterName string
nodePoolName string
namespace string
hostname string
esHostname string
hostnameSource humiov1alpha1.HumioHostnameSource
esHostnameSource humiov1alpha1.HumioESHostnameSource
humioNodeSpec humiov1alpha1.HumioNodeSpec
tls *humiov1alpha1.HumioClusterTLSSpec
idpCertificateSecretName string
viewGroupPermissions string
targetReplicationFactor int
storagePartitionsCount int
digestPartitionsCount int
path string
ingress humiov1alpha1.HumioClusterIngressSpec
clusterAnnotations map[string]string
}
func NewHumioNodeManagerFromHumioCluster(hc *humiov1alpha1.HumioCluster) *HumioNodePool {
return &HumioNodePool{
namespace: hc.Namespace,
clusterName: hc.Name,
hostname: hc.Spec.Hostname,
esHostname: hc.Spec.ESHostname,
hostnameSource: hc.Spec.HostnameSource,
esHostnameSource: hc.Spec.ESHostnameSource,
humioNodeSpec: humiov1alpha1.HumioNodeSpec{
Image: hc.Spec.Image,
NodeCount: hc.Spec.NodeCount,
DataVolumePersistentVolumeClaimSpecTemplate: hc.Spec.DataVolumePersistentVolumeClaimSpecTemplate,
DataVolumeSource: hc.Spec.DataVolumeSource,
AuthServiceAccountName: hc.Spec.AuthServiceAccountName,
DisableInitContainer: hc.Spec.DisableInitContainer,
EnvironmentVariablesSource: hc.Spec.EnvironmentVariablesSource,
PodAnnotations: hc.Spec.PodAnnotations,
ShareProcessNamespace: hc.Spec.ShareProcessNamespace,
HumioServiceAccountName: hc.Spec.HumioServiceAccountName,
ImagePullSecrets: hc.Spec.ImagePullSecrets,
HelperImage: hc.Spec.HelperImage,
ImagePullPolicy: hc.Spec.ImagePullPolicy,
ContainerSecurityContext: hc.Spec.ContainerSecurityContext,
ContainerStartupProbe: hc.Spec.ContainerStartupProbe,
ContainerLivenessProbe: hc.Spec.ContainerLivenessProbe,
ContainerReadinessProbe: hc.Spec.ContainerReadinessProbe,
PodSecurityContext: hc.Spec.PodSecurityContext,
Resources: hc.Spec.Resources,
Tolerations: hc.Spec.Tolerations,
TerminationGracePeriodSeconds: hc.Spec.TerminationGracePeriodSeconds,
Affinity: hc.Spec.Affinity,
SidecarContainers: hc.Spec.SidecarContainers,
ExtraKafkaConfigs: hc.Spec.ExtraKafkaConfigs,
NodeUUIDPrefix: hc.Spec.NodeUUIDPrefix,
ExtraHumioVolumeMounts: hc.Spec.ExtraHumioVolumeMounts,
ExtraVolumes: hc.Spec.ExtraVolumes,
HumioServiceAccountAnnotations: hc.Spec.HumioServiceAccountAnnotations,
HumioServiceLabels: hc.Spec.HumioServiceLabels,
EnvironmentVariables: hc.Spec.EnvironmentVariables,
ImageSource: hc.Spec.ImageSource,
HumioESServicePort: hc.Spec.HumioESServicePort,
HumioServicePort: hc.Spec.HumioServicePort,
HumioServiceType: hc.Spec.HumioServiceType,
HumioServiceAnnotations: hc.Spec.HumioServiceAnnotations,
InitServiceAccountName: hc.Spec.InitServiceAccountName,
PodLabels: hc.Spec.PodLabels,
UpdateStrategy: hc.Spec.UpdateStrategy,
},
tls: hc.Spec.TLS,
idpCertificateSecretName: hc.Spec.IdpCertificateSecretName,
viewGroupPermissions: hc.Spec.ViewGroupPermissions,
targetReplicationFactor: hc.Spec.TargetReplicationFactor,
storagePartitionsCount: hc.Spec.StoragePartitionsCount,
digestPartitionsCount: hc.Spec.DigestPartitionsCount,
path: hc.Spec.Path,
ingress: hc.Spec.Ingress,
clusterAnnotations: hc.Annotations,
}
}
func NewHumioNodeManagerFromHumioNodePool(hc *humiov1alpha1.HumioCluster, hnp *humiov1alpha1.HumioNodePoolSpec) *HumioNodePool {
return &HumioNodePool{
namespace: hc.Namespace,
clusterName: hc.Name,
nodePoolName: hnp.Name,
hostname: hc.Spec.Hostname,
esHostname: hc.Spec.ESHostname,
hostnameSource: hc.Spec.HostnameSource,
esHostnameSource: hc.Spec.ESHostnameSource,
humioNodeSpec: humiov1alpha1.HumioNodeSpec{
Image: hnp.Image,
NodeCount: hnp.NodeCount,
DataVolumePersistentVolumeClaimSpecTemplate: hnp.DataVolumePersistentVolumeClaimSpecTemplate,
DataVolumeSource: hnp.DataVolumeSource,
AuthServiceAccountName: hnp.AuthServiceAccountName,
DisableInitContainer: hnp.DisableInitContainer,
EnvironmentVariablesSource: hnp.EnvironmentVariablesSource,
PodAnnotations: hnp.PodAnnotations,
ShareProcessNamespace: hnp.ShareProcessNamespace,
HumioServiceAccountName: hnp.HumioServiceAccountName,
ImagePullSecrets: hnp.ImagePullSecrets,
HelperImage: hnp.HelperImage,
ImagePullPolicy: hnp.ImagePullPolicy,
ContainerSecurityContext: hnp.ContainerSecurityContext,
ContainerStartupProbe: hnp.ContainerStartupProbe,
ContainerLivenessProbe: hnp.ContainerLivenessProbe,
ContainerReadinessProbe: hnp.ContainerReadinessProbe,
PodSecurityContext: hnp.PodSecurityContext,
Resources: hnp.Resources,
Tolerations: hnp.Tolerations,
TerminationGracePeriodSeconds: hnp.TerminationGracePeriodSeconds,
Affinity: hnp.Affinity,
SidecarContainers: hnp.SidecarContainers,
ExtraKafkaConfigs: hnp.ExtraKafkaConfigs,
NodeUUIDPrefix: hnp.NodeUUIDPrefix,
ExtraHumioVolumeMounts: hnp.ExtraHumioVolumeMounts,
ExtraVolumes: hnp.ExtraVolumes,
HumioServiceAccountAnnotations: hnp.HumioServiceAccountAnnotations,
HumioServiceLabels: hnp.HumioServiceLabels,
EnvironmentVariables: hnp.EnvironmentVariables,
ImageSource: hnp.ImageSource,
HumioESServicePort: hnp.HumioESServicePort,
HumioServicePort: hnp.HumioServicePort,
HumioServiceType: hnp.HumioServiceType,
HumioServiceAnnotations: hnp.HumioServiceAnnotations,
InitServiceAccountName: hnp.InitServiceAccountName,
PodLabels: hnp.PodLabels,
UpdateStrategy: hnp.UpdateStrategy,
},
tls: hc.Spec.TLS,
idpCertificateSecretName: hc.Spec.IdpCertificateSecretName,
viewGroupPermissions: hc.Spec.ViewGroupPermissions,
targetReplicationFactor: hc.Spec.TargetReplicationFactor,
storagePartitionsCount: hc.Spec.StoragePartitionsCount,
digestPartitionsCount: hc.Spec.DigestPartitionsCount,
path: hc.Spec.Path,
ingress: hc.Spec.Ingress,
clusterAnnotations: hc.Annotations,
}
}
func (hnp HumioNodePool) GetClusterName() string {
return hnp.clusterName
}
func (hnp HumioNodePool) GetNodePoolName() string {
if hnp.nodePoolName == "" {
return hnp.GetClusterName()
}
return strings.Join([]string{hnp.GetClusterName(), hnp.nodePoolName}, "-")
}
func (hnp HumioNodePool) GetNamespace() string {
return hnp.namespace
}
func (hnp HumioNodePool) GetHostname() string {
return hnp.hostname
}
func (hnp *HumioNodePool) SetImage(image string) {
hnp.humioNodeSpec.Image = image
}
func (hnp *HumioNodePool) GetImage() string {
if hnp.humioNodeSpec.Image != "" {
return hnp.humioNodeSpec.Image
}
return Image
}
func (hnp HumioNodePool) GetImageSource() *humiov1alpha1.HumioImageSource {
return hnp.humioNodeSpec.ImageSource
}
func (hnp HumioNodePool) GetHelperImage() string {
if hnp.humioNodeSpec.HelperImage != "" {
return hnp.humioNodeSpec.HelperImage
}
return HelperImage
}
func (hnp HumioNodePool) GetImagePullSecrets() []corev1.LocalObjectReference {
return hnp.humioNodeSpec.ImagePullSecrets
}
func (hnp HumioNodePool) GetImagePullPolicy() corev1.PullPolicy {
return hnp.humioNodeSpec.ImagePullPolicy
}
func (hnp HumioNodePool) GetEnvironmentVariablesSource() []corev1.EnvFromSource {
return hnp.humioNodeSpec.EnvironmentVariablesSource
}
func (hnp HumioNodePool) GetTargetReplicationFactor() int {
if hnp.targetReplicationFactor != 0 {
return hnp.targetReplicationFactor
}
return targetReplicationFactor
}
func (hnp HumioNodePool) GetStoragePartitionsCount() int {
if hnp.storagePartitionsCount != 0 {
return hnp.storagePartitionsCount
}
return storagePartitionsCount
}
func (hnp HumioNodePool) GetDigestPartitionsCount() int {
if hnp.digestPartitionsCount != 0 {
return hnp.digestPartitionsCount
}
return digestPartitionsCount
}
func (hnp *HumioNodePool) SetHumioClusterNodePoolRevisionAnnotation(newRevision int) {
if hnp.clusterAnnotations == nil {
hnp.clusterAnnotations = map[string]string{}
}
revisionKey, _ := hnp.GetHumioClusterNodePoolRevisionAnnotation()
hnp.clusterAnnotations[revisionKey] = strconv.Itoa(newRevision)
}
func (hnp HumioNodePool) GetHumioClusterNodePoolRevisionAnnotation() (string, int) {
annotations := map[string]string{}
if len(hnp.clusterAnnotations) > 0 {
annotations = hnp.clusterAnnotations
}
podAnnotationKey := strings.Join([]string{PodRevisionAnnotation, hnp.GetNodePoolName()}, "-")
revision, ok := annotations[podAnnotationKey]
if !ok {
revision = "0"
}
existingRevision, err := strconv.Atoi(revision)
if err != nil {
return "", -1
}
return podAnnotationKey, existingRevision
}
func (hnp HumioNodePool) GetIngress() humiov1alpha1.HumioClusterIngressSpec {
return hnp.ingress
}
func (hnp HumioNodePool) GetEnvironmentVariables() []corev1.EnvVar {
var envVar []corev1.EnvVar
for _, env := range hnp.humioNodeSpec.EnvironmentVariables {
envVar = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVar, env)
}
scheme := "https"
if !hnp.TLSEnabled() {
scheme = "http"
}
envDefaults := []corev1.EnvVar{
{
Name: "THIS_POD_IP",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
{
Name: "POD_NAME",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
{Name: "HUMIO_PORT", Value: strconv.Itoa(HumioPort)},
{Name: "ELASTIC_PORT", Value: strconv.Itoa(elasticPort)},
{Name: "DIGEST_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())},
{Name: "STORAGE_REPLICATION_FACTOR", Value: strconv.Itoa(hnp.GetTargetReplicationFactor())},
{Name: "DEFAULT_PARTITION_COUNT", Value: strconv.Itoa(hnp.GetStoragePartitionsCount())},
{Name: "INGEST_QUEUE_INITIAL_PARTITIONS", Value: strconv.Itoa(hnp.GetDigestPartitionsCount())},
{Name: "KAFKA_MANAGED_BY_HUMIO", Value: "true"},
{Name: "AUTHENTICATION_METHOD", Value: "single-user"},
{Name: "HUMIO_LOG4J_CONFIGURATION", Value: "log4j2-json-stdout.xml"},
{
Name: "EXTERNAL_URL", // URL used by other Humio hosts.
Value: fmt.Sprintf("%s://$(POD_NAME).%s.$(POD_NAMESPACE):$(HUMIO_PORT)", strings.ToLower(scheme), headlessServiceName(hnp.GetClusterName())),
},
}
humioVersion, _ := HumioVersionFromString(hnp.GetImage())
if ok, _ := humioVersion.AtLeast(HumioVersionWithLauncherScript); ok {
envDefaults = append(envDefaults, corev1.EnvVar{
Name: "HUMIO_GC_OPTS",
Value: "-XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC",
})
envDefaults = append(envDefaults, corev1.EnvVar{
Name: "HUMIO_JVM_LOG_OPTS",
Value: "-Xlog:gc+jni=debug:stdout -Xlog:gc*:stdout:time,tags",
})
envDefaults = append(envDefaults, corev1.EnvVar{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true",
})
} else {
envDefaults = append(envDefaults, corev1.EnvVar{
Name: "HUMIO_JVM_ARGS",
Value: "-Xss2m -Xms256m -Xmx1536m -server -XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC",
})
}
if EnvVarHasValue(hnp.humioNodeSpec.EnvironmentVariables, "USING_EPHEMERAL_DISKS", "true") {
envDefaults = append(envDefaults, corev1.EnvVar{
Name: "ZOOKEEPER_URL_FOR_NODE_UUID",
Value: "$(ZOOKEEPER_URL)",
})
}
for _, defaultEnvVar := range envDefaults {
envVar = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVar, defaultEnvVar)
}
// Allow overriding PUBLIC_URL. This may be useful when other methods of exposing the cluster are used other than
// ingress
if !EnvVarHasKey(envDefaults, "PUBLIC_URL") {
// Only include the path suffix if it's non-root. It likely wouldn't harm anything, but it's unnecessary
pathSuffix := ""
if hnp.GetPath() != "/" {
pathSuffix = hnp.GetPath()
}
if hnp.GetIngress().Enabled {
envVar = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVar, corev1.EnvVar{
Name: "PUBLIC_URL", // URL used by users/browsers.
Value: fmt.Sprintf("https://%s%s", hnp.GetHostname(), pathSuffix),
})
} else {
envVar = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVar, corev1.EnvVar{
Name: "PUBLIC_URL", // URL used by users/browsers.
Value: fmt.Sprintf("%s://$(THIS_POD_IP):$(HUMIO_PORT)%s", scheme, pathSuffix),
})
}
}
if hnp.GetPath() != "/" {
envVar = AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVar, corev1.EnvVar{
Name: "PROXY_PREFIX_URL",
Value: hnp.GetPath(),
})
}
return envVar
}
func (hnp HumioNodePool) GetContainerSecurityContext() *corev1.SecurityContext {
if hnp.humioNodeSpec.ContainerSecurityContext == nil {
return &corev1.SecurityContext{
AllowPrivilegeEscalation: helpers.BoolPtr(false),
Privileged: helpers.BoolPtr(false),
ReadOnlyRootFilesystem: helpers.BoolPtr(true),
RunAsUser: helpers.Int64Ptr(65534),
RunAsNonRoot: helpers.BoolPtr(true),
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_BIND_SERVICE",
"SYS_NICE",
},
Drop: []corev1.Capability{
"ALL",
},
},
}
}
return hnp.humioNodeSpec.ContainerSecurityContext
}
func (hnp HumioNodePool) GetNodePoolLabels() map[string]string {
labels := hnp.GetCommonClusterLabels()
labels[kubernetes.NodePoolLabelName] = hnp.GetNodePoolName()
return labels
}
func (hnp HumioNodePool) GetPodLabels() map[string]string {
labels := hnp.GetNodePoolLabels()
for k, v := range hnp.humioNodeSpec.PodLabels {
if _, ok := labels[k]; !ok {
labels[k] = v
}
}
return labels
}
func (hnp HumioNodePool) GetCommonClusterLabels() map[string]string {
return kubernetes.LabelsForHumio(hnp.clusterName)
}
func (hnp HumioNodePool) GetCASecretName() string {
if hnp.tls != nil && hnp.tls.CASecretName != "" {
return hnp.tls.CASecretName
}
return fmt.Sprintf("%s-ca-keypair", hnp.GetClusterName())
}
func (hnp HumioNodePool) UseExistingCA() bool {
return hnp.tls != nil && hnp.tls.CASecretName != ""
}
func (hnp HumioNodePool) GetLabelsForSecret(secretName string) map[string]string {
labels := hnp.GetCommonClusterLabels()
labels[kubernetes.SecretNameLabelName] = secretName
return labels
}
func (hnp HumioNodePool) GetNodeCount() int {
if hnp.humioNodeSpec.NodeCount == nil {
return nodeCount
}
return *hnp.humioNodeSpec.NodeCount
}
func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplate(pvcName string) corev1.VolumeSource {
if hnp.PVCsEnabled() {
return corev1.VolumeSource{
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
}
}
return corev1.VolumeSource{}
}
func (hnp HumioNodePool) GetDataVolumePersistentVolumeClaimSpecTemplateRAW() corev1.PersistentVolumeClaimSpec {
return hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate
}
func (hnp HumioNodePool) DataVolumePersistentVolumeClaimSpecTemplateIsSetByUser() bool {
return !reflect.DeepEqual(hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate, corev1.PersistentVolumeClaimSpec{})
}
func (hnp HumioNodePool) GetDataVolumeSource() corev1.VolumeSource {
return hnp.humioNodeSpec.DataVolumeSource
}
func (hnp HumioNodePool) GetPodAnnotations() map[string]string {
return hnp.humioNodeSpec.PodAnnotations
}
func (hnp HumioNodePool) GetAuthServiceAccountSecretName() string {
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authServiceAccountSecretNameIdentifier)
}
func (hnp HumioNodePool) GetInitServiceAccountSecretName() string {
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), initServiceAccountSecretNameIdentifier)
}
func (hnp HumioNodePool) GetInitServiceAccountName() string {
if hnp.humioNodeSpec.InitServiceAccountName != "" {
return hnp.humioNodeSpec.InitServiceAccountName
}
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), initServiceAccountNameSuffix)
}
func (hnp HumioNodePool) InitServiceAccountIsSetByUser() bool {
return hnp.humioNodeSpec.InitServiceAccountName != ""
}
func (hnp HumioNodePool) GetAuthServiceAccountName() string {
if hnp.humioNodeSpec.AuthServiceAccountName != "" {
return hnp.humioNodeSpec.AuthServiceAccountName
}
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authServiceAccountNameSuffix)
}
func (hnp HumioNodePool) AuthServiceAccountIsSetByUser() bool {
return hnp.humioNodeSpec.AuthServiceAccountName != ""
}
func (hnp HumioNodePool) GetInitClusterRoleName() string {
return fmt.Sprintf("%s-%s-%s", hnp.GetNamespace(), hnp.GetNodePoolName(), initClusterRoleSuffix)
}
func (hnp HumioNodePool) GetInitClusterRoleBindingName() string {
return fmt.Sprintf("%s-%s-%s", hnp.GetNamespace(), hnp.GetNodePoolName(), initClusterRoleBindingSuffix)
}
func (hnp HumioNodePool) GetAuthRoleName() string {
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authRoleSuffix)
}
func (hnp HumioNodePool) GetAuthRoleBindingName() string {
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), authRoleBindingSuffix)
}
func (hnp HumioNodePool) GetShareProcessNamespace() *bool {
if hnp.humioNodeSpec.ShareProcessNamespace == nil {
return helpers.BoolPtr(false)
}
return hnp.humioNodeSpec.ShareProcessNamespace
}
func (hnp HumioNodePool) HumioServiceAccountIsSetByUser() bool {
return hnp.humioNodeSpec.HumioServiceAccountName != ""
}
func (hnp HumioNodePool) GetHumioServiceAccountName() string {
if hnp.humioNodeSpec.HumioServiceAccountName != "" {
return hnp.humioNodeSpec.HumioServiceAccountName
}
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), HumioServiceAccountNameSuffix)
}
func (hnp HumioNodePool) GetHumioServiceAccountAnnotations() map[string]string {
return hnp.humioNodeSpec.HumioServiceAccountAnnotations
}
func (hnp HumioNodePool) GetContainerReadinessProbe() *corev1.Probe {
if hnp.humioNodeSpec.ContainerReadinessProbe != nil && (*hnp.humioNodeSpec.ContainerReadinessProbe == (corev1.Probe{})) {
return nil
}
if hnp.humioNodeSpec.ContainerReadinessProbe == nil {
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/is-node-up",
Port: intstr.IntOrString{IntVal: HumioPort},
Scheme: hnp.GetProbeScheme(),
},
},
InitialDelaySeconds: 30,
PeriodSeconds: 5,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 10,
}
}
return hnp.humioNodeSpec.ContainerReadinessProbe
}
func (hnp HumioNodePool) GetContainerLivenessProbe() *corev1.Probe {
if hnp.humioNodeSpec.ContainerLivenessProbe != nil && (*hnp.humioNodeSpec.ContainerLivenessProbe == (corev1.Probe{})) {
return nil
}
if hnp.humioNodeSpec.ContainerLivenessProbe == nil {
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/is-node-up",
Port: intstr.IntOrString{IntVal: HumioPort},
Scheme: hnp.GetProbeScheme(),
},
},
InitialDelaySeconds: 30,
PeriodSeconds: 5,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 10,
}
}
return hnp.humioNodeSpec.ContainerLivenessProbe
}
func (hnp HumioNodePool) GetContainerStartupProbe() *corev1.Probe {
if hnp.humioNodeSpec.ContainerStartupProbe != nil && (*hnp.humioNodeSpec.ContainerStartupProbe == (corev1.Probe{})) {
return nil
}
if hnp.humioNodeSpec.ContainerStartupProbe == nil {
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/is-node-up",
Port: intstr.IntOrString{IntVal: HumioPort},
Scheme: hnp.GetProbeScheme(),
},
},
PeriodSeconds: 10,
TimeoutSeconds: 5,
SuccessThreshold: 1,
FailureThreshold: 30,
}
}
return hnp.humioNodeSpec.ContainerStartupProbe
}
func (hnp HumioNodePool) GetPodSecurityContext() *corev1.PodSecurityContext {
if hnp.humioNodeSpec.PodSecurityContext == nil {
return &corev1.PodSecurityContext{
RunAsUser: helpers.Int64Ptr(65534),
RunAsNonRoot: helpers.BoolPtr(true),
RunAsGroup: helpers.Int64Ptr(0), // TODO: We probably want to move away from this.
FSGroup: helpers.Int64Ptr(0), // TODO: We probably want to move away from this.
}
}
return hnp.humioNodeSpec.PodSecurityContext
}
func (hnp HumioNodePool) GetAffinity() *corev1.Affinity {
if hnp.humioNodeSpec.Affinity == (corev1.Affinity{}) {
return &corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: corev1.LabelArchStable,
Operator: corev1.NodeSelectorOpIn,
Values: []string{
"amd64",
},
},
{
Key: corev1.LabelOSStable,
Operator: corev1.NodeSelectorOpIn,
Values: []string{
"linux",
},
},
},
},
},
},
},
}
}
return &hnp.humioNodeSpec.Affinity
}
func (hnp HumioNodePool) GetSidecarContainers() []corev1.Container {
return hnp.humioNodeSpec.SidecarContainers
}
func (hnp HumioNodePool) GetTolerations() []corev1.Toleration {
return hnp.humioNodeSpec.Tolerations
}
func (hnp HumioNodePool) GetResources() corev1.ResourceRequirements {
return hnp.humioNodeSpec.Resources
}
func (hnp HumioNodePool) GetExtraKafkaConfigs() string {
return hnp.humioNodeSpec.ExtraKafkaConfigs
}
func (hnp HumioNodePool) GetExtraKafkaConfigsConfigMapName() string {
return fmt.Sprintf("%s-%s", hnp.GetNodePoolName(), extraKafkaConfigsConfigMapNameSuffix)
}
func (hnp HumioNodePool) GetViewGroupPermissions() string {
return hnp.viewGroupPermissions
}
func (hnp HumioNodePool) GetViewGroupPermissionsConfigMapName() string {
return fmt.Sprintf("%s-%s", hnp.GetClusterName(), viewGroupPermissionsConfigMapNameSuffix)
}
func (hnp HumioNodePool) GetPath() string {
if hnp.path != "" {
if strings.HasPrefix(hnp.path, "/") {
return hnp.path
} else {
return fmt.Sprintf("/%s", hnp.path)
}
}
return "/"
}
func (hnp HumioNodePool) GetNodeUUIDPrefix() string {
if hnp.humioNodeSpec.NodeUUIDPrefix != "" {
return hnp.humioNodeSpec.NodeUUIDPrefix
}
return nodeUUIDPrefix
}
func (hnp HumioNodePool) GetHumioServiceLabels() map[string]string {
return hnp.humioNodeSpec.HumioServiceLabels
}
func (hnp HumioNodePool) GetTerminationGracePeriodSeconds() *int64 {
if hnp.humioNodeSpec.TerminationGracePeriodSeconds == nil {
return helpers.Int64Ptr(300)
}
return hnp.humioNodeSpec.TerminationGracePeriodSeconds
}
func (hnp HumioNodePool) GetIDPCertificateSecretName() string {
if hnp.idpCertificateSecretName != "" {
return hnp.idpCertificateSecretName
}
return fmt.Sprintf("%s-%s", hnp.GetClusterName(), idpCertificateSecretNameSuffix)
}
func (hnp HumioNodePool) GetExtraHumioVolumeMounts() []corev1.VolumeMount {
return hnp.humioNodeSpec.ExtraHumioVolumeMounts
}
func (hnp HumioNodePool) GetExtraVolumes() []corev1.Volume {
return hnp.humioNodeSpec.ExtraVolumes
}
func (hnp HumioNodePool) GetHumioServiceAnnotations() map[string]string {
return hnp.humioNodeSpec.HumioServiceAnnotations
}
func (hnp HumioNodePool) GetHumioServicePort() int32 {
if hnp.humioNodeSpec.HumioServicePort != 0 {
return hnp.humioNodeSpec.HumioServicePort
}
return HumioPort
}
func (hnp HumioNodePool) GetHumioESServicePort() int32 {
if hnp.humioNodeSpec.HumioESServicePort != 0 {
return hnp.humioNodeSpec.HumioESServicePort
}
return elasticPort
}
func (hnp HumioNodePool) GetServiceType() corev1.ServiceType {
if hnp.humioNodeSpec.HumioServiceType != "" {
return hnp.humioNodeSpec.HumioServiceType
}
return corev1.ServiceTypeClusterIP
}
func (hnp HumioNodePool) InitContainerDisabled() bool {
return hnp.humioNodeSpec.DisableInitContainer
}
func (hnp HumioNodePool) PVCsEnabled() bool {
emptyPersistentVolumeClaimSpec := corev1.PersistentVolumeClaimSpec{}
return !reflect.DeepEqual(hnp.humioNodeSpec.DataVolumePersistentVolumeClaimSpecTemplate, emptyPersistentVolumeClaimSpec)
}
func (hnp HumioNodePool) TLSEnabled() bool {
if hnp.tls == nil {
return helpers.UseCertManager()
}
if hnp.tls.Enabled == nil {
return helpers.UseCertManager()
}
return helpers.UseCertManager() && *hnp.tls.Enabled
}
func (hnp HumioNodePool) GetProbeScheme() corev1.URIScheme {
if !hnp.TLSEnabled() {
return corev1.URISchemeHTTP
}
return corev1.URISchemeHTTPS
}
func (hnp HumioNodePool) GetUpdateStrategy() *humiov1alpha1.HumioUpdateStrategy {
if hnp.humioNodeSpec.UpdateStrategy != nil {
return hnp.humioNodeSpec.UpdateStrategy
}
return &humiov1alpha1.HumioUpdateStrategy{
Type: humiov1alpha1.HumioClusterUpdateStrategyReplaceAllOnUpdate,
MinReadySeconds: 0,
}
}
func viewGroupPermissionsOrDefault(hc *humiov1alpha1.HumioCluster) string {
return hc.Spec.ViewGroupPermissions
}
func ViewGroupPermissionsConfigMapName(hc *humiov1alpha1.HumioCluster) string {
return fmt.Sprintf("%s-%s", hc.Name, viewGroupPermissionsConfigMapNameSuffix)
}
func AppendEnvVarToEnvVarsIfNotAlreadyPresent(envVars []corev1.EnvVar, defaultEnvVar corev1.EnvVar) []corev1.EnvVar {
for _, envVar := range envVars {
if envVar.Name == defaultEnvVar.Name {
return envVars
}
}
return append(envVars, defaultEnvVar)
}
func certificateSecretNameOrDefault(hc *humiov1alpha1.HumioCluster) string {
if hc.Spec.Ingress.SecretName != "" {
return hc.Spec.Ingress.SecretName
}
return fmt.Sprintf("%s-certificate", hc.Name)
}
func esCertificateSecretNameOrDefault(hc *humiov1alpha1.HumioCluster) string {
if hc.Spec.Ingress.ESSecretName != "" {
return hc.Spec.Ingress.ESSecretName
}
return fmt.Sprintf("%s-es-certificate", hc.Name)
}
func ingressTLSOrDefault(hc *humiov1alpha1.HumioCluster) bool {
if hc.Spec.Ingress.TLS == nil {
return true
}
return *hc.Spec.Ingress.TLS
}
func humioHeadlessServiceAnnotationsOrDefault(hc *humiov1alpha1.HumioCluster) map[string]string {
return hc.Spec.HumioHeadlessServiceAnnotations
}
func humioPathOrDefault(hc *humiov1alpha1.HumioCluster) string {
if hc.Spec.Path != "" {
if strings.HasPrefix(hc.Spec.Path, "/") {
return hc.Spec.Path
} else {
return fmt.Sprintf("/%s", hc.Spec.Path)
}
}
return "/"
}
func licenseSecretKeyRefOrDefault(hc *humiov1alpha1.HumioCluster) *corev1.SecretKeySelector {
return hc.Spec.License.SecretKeyRef
}
|
package main
import "fmt"
type Student struct {
name string
age int
}
func (s Student) changeName(newName string) {
s.name = newName
}
func (s *Student) changeAge(newAge int) {
s.age = newAge
}
func main() {
std1 := Student{"Bob", 21}
fmt.Println("New Student:", std1)
std1.changeName("Bobby")
std1.changeAge(22)
fmt.Println("Stduent after changing methods:", std1)
}
|
package handler
import (
"context"
api "github.com/aibotsoft/gen/pinapi"
"github.com/pkg/errors"
"time"
)
const CurrencyJobPeriod = time.Hour
func (h *Handler) CurrencyJob() {
for {
start := time.Now()
if !h.NetStatus {
h.log.Info("netStatus_not_ok")
time.Sleep(time.Minute)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
err := h.CurrencyRound(ctx)
cancel()
if err != nil {
h.log.Error(err)
} else {
h.log.Debugw("CurrencyJob_done", "time", time.Since(start))
}
time.Sleep(CurrencyJobPeriod)
}
}
func (h *Handler) CurrencyRound(ctx context.Context) error {
resp, err := h.CollectCurrency(ctx)
if err != nil {
return err
}
err = h.store.SaveCurrency(ctx, resp)
return err
}
func (h *Handler) CollectCurrency(ctx context.Context) ([]api.Currency, error) {
account, err := h.GetAccount(ctx, "pin-service")
if err != nil {
return nil, errors.Wrap(err, "GetAccount error")
}
auth := context.WithValue(ctx, api.ContextBasicAuth, api.BasicAuth{UserName: account.Username, Password: account.Password})
resp, err := h.pinClient.GetCurrencies(auth)
return resp, err
}
|
// Package numeral provides the ability to create custom positional numeral
// systems in an efficient and performant way. You can create numerals based
// on custom numeral systems and use them at will.
//
//
// Each digit represented as a circular list that contains the all the possible numeral.
//
// Each number is represented as a doubly linked list of circular lists.
//
// Example
//
// // create a slice of runes.
// digitValues := []rune{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'}
//
// number := numeral.NewNumeral(digitValues, "128z")
//
// // will make the number 1290.
// number.Increment()
//
// // will make the number 128y.
// number.Decrement()
//
// //will give you the string representation of the number.
// strnumber:=number.String()
package numeral
import (
"bytes"
"container/list"
"container/ring"
"fmt"
"math"
"strings"
)
// Numeral represents a numeral that is consisted by its digits
// and digit values.
type Numeral struct {
digits *list.List
digitValues []rune
}
// NewNumeral initializes a numeral by providing the initial number in strings
// along with the possible values that each digit can have.
func NewNumeral(values []rune, initial string) (*Numeral, error) {
// initialise a new number.
number := Numeral{
digits: list.New(),
digitValues: values,
}
// add digits to the number along with their state.
for i := 0; i < len(initial); i++ {
digit, err := newDigit(values, rune(initial[i]))
if err != nil {
return nil, err
}
number.digits.PushBack(digit)
}
return &number, nil
}
// newDigit creates and initializes a new digit (ring).
func newDigit(values []rune, state rune) (*ring.Ring, error) {
// initialize a new empty ring
r := ring.New(len(values))
// fill the ring with values
for _, e := range values {
r.Value = e
r = r.Next()
}
if indexOf(state, values) == -1 {
return nil, fmt.Errorf("invalid digit. value: %v does not exist in possible values: %v", state, values)
}
// roll the ring in desired "state" position.
for range values {
if r.Value == state {
break
}
r = r.Next()
}
return r, nil
}
// Sum sums 2 numerals into a 3rd one. Values are needed to define the new system under
// which the number will be displayed.
// Every number can be from a different system.
func Sum(values []rune, number Numeral, number2 Numeral) (*Numeral, error) {
n1 := number.Decimal()
n2 := number2.Decimal()
newNumeral, err := NewFromDecimal(values, n1+n2)
if err != nil {
return nil, err
}
return newNumeral, nil
}
// abs returns the absolute value of x.
func abs(x int64) int64 {
if x < 0 {
return -x
}
return x
}
// Diff returns the absolute difference between two numerals
func Diff(values []rune, number Numeral, number2 Numeral) (*Numeral, error) {
n1 := number.Decimal()
n2 := number2.Decimal()
n, err := NewFromDecimal(values, int(abs(int64(n1-n2))))
if err != nil {
return nil, err
}
return n, nil
}
// Increment performs a +1 to the Numeral.
func (n *Numeral) Increment() error {
// take the first digit from the right and keep going if there are any arithmetic holdings.
for e := n.digits.Back(); e != nil; e = e.Prev() {
// get current ring.
r := e.Value.(*ring.Ring)
// rotate and update.
r = r.Next()
e.Value = r
// if the digit is not being reset (no arithmetic holdings) then there is no need to
// proceed in adding on the others.
if r.Value != n.digitValues[0] {
break
}
// If needed add an extra new digit on the left side.
if e.Prev() == nil {
d, _ := newDigit(n.digitValues, n.digitValues[0])
n.digits.PushFront(d)
}
}
return nil
}
// Decrement performs a -1 to the Numeral.
func (n *Numeral) Decrement() error {
// take the first digit from the right and keep going if there are any arithmetic holdings or if the number is 0.
for d := n.digits.Back(); d != nil; d = d.Prev() {
// get current ring.
r := d.Value.(*ring.Ring)
// rotate and update
rNext := r.Prev()
d.Value = rNext
// if the digit has not returned to it's last state then
// there is no need to continue.
if rNext.Value != n.digitValues[len(n.digitValues)-1] {
break
}
if d.Prev() == nil {
d.Value = r
return fmt.Errorf("numeral: can not Decrement")
}
}
return nil
}
// Decimal converts a numeral to a decimal integer.
func (n *Numeral) Decimal() int {
dec := 0
di := 0
for d := n.digits.Back(); d != nil; d = d.Prev() {
// get current ring.
r := d.Value.(*ring.Ring)
// get the index of the ring.
i := indexOf(r.Value.(rune), n.digitValues)
// Add digit's decimal counterpart to the decimal sum.
dec = dec + i*powInt(len(n.digitValues), di)
di++
}
return dec
}
// Add adds a number to the already existing number
func (n *Numeral) Add(number Numeral) error {
num := n.Decimal()
num2 := number.Decimal()
newNum, err := NewFromDecimal(n.digitValues, num+num2)
if err != nil {
return err
}
n.digits = newNum.digits
return nil
}
// NewFromDecimal creates a numeral from a decimal integer.
func NewFromDecimal(values []rune, decimal int) (*Numeral, error) {
dividend := decimal
quotient := decimal
divisor := len(values)
sNumeral := new(strings.Builder)
for quotient > 0 {
if dividend < divisor {
break
}
quotient := dividend / divisor
remainder := dividend % divisor
//prepend character
s := new(strings.Builder)
s.WriteRune(values[remainder])
s.WriteString(sNumeral.String())
sNumeral = s
//previous remainder will be the new dividend
dividend = quotient
}
//prepend last remaining character
s := new(strings.Builder)
s.WriteRune(values[dividend%divisor])
s.WriteString(sNumeral.String())
sNumeral = s
return NewNumeral(values, sNumeral.String())
}
func powInt(x, y int) int {
return int(math.Pow(float64(x), float64(y)))
}
func indexOf(element rune, data []rune) int {
for k, v := range data {
if element == v {
return k
}
}
return -1 //not found.
}
// String returns a string representation of Numeral.
func (n Numeral) String() string {
// Loop over container list.
var numberBytes bytes.Buffer
for e := n.digits.Front(); e != nil; e = e.Next() {
r := e.Value.(*ring.Ring)
v := r.Value.(rune)
numberBytes.WriteString(string(v))
}
return numberBytes.String()
}
|
package model_test
import (
"github.com/igogorek/http-rest-api-go/internal/app/model"
"github.com/stretchr/testify/assert"
"strings"
"testing"
)
func TestUser_Validate(t *testing.T) {
testCases := []struct {
name string
prepareUser func() *model.User
isValid bool
}{
{
name: "valid",
prepareUser: func() *model.User {
return model.TestUser()
},
isValid: true,
},
{
name: "emptyEmail",
prepareUser: func() *model.User {
user := model.TestUser()
user.Email = ""
return user
},
isValid: false,
},
{
name: "invalidEmail",
prepareUser: func() *model.User {
user := model.TestUser()
user.Email = "invalid@email"
return user
},
isValid: false,
},
{
name: "emptyPassword",
prepareUser: func() *model.User {
user := model.TestUser()
user.Password = ""
return user
},
isValid: false,
},
{
name: "tooShortPassword",
prepareUser: func() *model.User {
user := model.TestUser()
user.Password = "12345"
return user
},
isValid: false,
},
{
name: "shortPassword",
prepareUser: func() *model.User {
user := model.TestUser()
user.Password = "123456"
return user
},
isValid: true,
},
{
name: "tooLongPassword",
prepareUser: func() *model.User {
user := model.TestUser()
var b strings.Builder
for i := 0; i < 10; i++ {
b.Write([]byte("0123456789"))
}
user.Password = b.String() + "1"
return user
},
isValid: false,
},
{
name: "longPassword",
prepareUser: func() *model.User {
user := model.TestUser()
var b strings.Builder
for i := 0; i < 10; i++ {
b.Write([]byte("0123456789"))
}
user.Password = b.String()
return user
},
isValid: true,
},
{
name: "encryptedPassword",
prepareUser: func() *model.User {
user := model.TestUser()
user.Password = ""
user.EncryptedPassword = "asdfadfadsf"
return user
},
isValid: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
if tc.isValid {
assert.NoError(t, tc.prepareUser().Validate())
} else {
assert.Error(t, tc.prepareUser().Validate())
}
})
}
}
func TestUser_BeforeCreate(t *testing.T) {
u := model.TestUser()
err := u.BeforeCreate()
assert.NoError(t, err)
assert.NotEmpty(t, u.EncryptedPassword)
}
|
/**
一定要记得在confin.json配置这个模块的参数,否则无法使用
*/
package login
import (
"fmt"
"github.com/dming/lodos/conf"
"github.com/dming/lodos/gate"
"github.com/dming/lodos/module"
"github.com/dming/lodos/module/base"
"github.com/dming/lodos/log"
"github.com/go-redis/redis"
"github.com/dming/lodos/utils/uuid"
)
var Module = func() module.Module {
m := new(Login)
return m
}
type Login struct {
basemodule.BaseModule
db *redis.Client
}
func (m *Login) GetType() string {
//很关键,需要与配置文件中的Module配置对应
return "Login"
}
func (m *Login) Version() string {
//可以在监控时了解代码版本
return "1.0.0"
}
func (m *Login) OnInit(app module.AppInterface, settings *conf.ModuleSettings) {
m.BaseModule.OnInit(app, m, settings)
//m.SetDBClient("127.0.0.1:6379", "")
//m.GetServer().RegisterGo("HD_Login", m.login) //我们约定所有对客户端的请求都以Handler_开头
m.GetServer().RegisterGo("getRand", m.getRand) //演示后台模块间的rpc调用
m.GetServer().Register("HD_Robot", m.robot)
m.GetServer().RegisterGo("HD_Robot_GO", m.robot) //我们约定所有对客户端的请求都以Handler_开头
m.GetServer().RegisterGo("HD_Test", m.Test)
}
func (m *Login) Run(closeSig chan bool) {
}
func (m *Login) OnDestroy() {
//一定别忘了关闭RPC
m.GetServer().OnDestroy()
}
func (m *Login) robot(session gate.Session, msg map[string]interface{}) (result string, err error) {
//time.Sleep(1)
//log.Info("function on call robot: %s", string(r))
if msg["userName"] == nil || msg["passWord"] == nil {
err = fmt.Errorf("userName or passWord cannot be nil")
return
}
return fmt.Sprintf("%s, %s", msg["userName"], msg["passWord"]), nil
}
func (m *Login) getRand(session gate.Session, by []byte,mp map[string]interface{},f float64,i int32,b bool) (result string, err error) {
//演示后台模块间的rpc调用
return fmt.Sprintf("My is Login Module %s", by,mp,f,i,b), nil
}
func (m *Login) Test(session gate.Session, args map[string]interface{}) error {
log.Info("IP is %s,\n serverID is %s,\n sessionID is %s,\n userID is %s, \n",
session.GetIP(), session.GetServerid(), session.GetSessionid(), session.GetUserid())
var msg string = ""
if args["msg"] != nil {//&& reflect.TypeOf(args["msg"]) == reflect.TypeOf(reflect.String) {
msg = args["msg"].(string)
}
uuid := uuid.Rand()
log.Info("Get a test request from mqtt client,\n uuid: %s \n message is %s", uuid.Hex(), msg)
return nil
}
func (m *Login) SetDBClient(addr string, password string, args ...interface{}) (error) {
client := redis.NewClient(&redis.Options{
Addr: addr, //"127.0.0.1:6379",
Password: password, // "guest",
DB : 0,
})
pong, err := client.Ping().Result()
if err != nil {
log.Error("connect to redis %s fail, err is [%s]", addr, err)
return err
}
m.db = client
log.Info("connect to redis %s success, ping result [%s]", addr, pong)
//
return nil
} |
package main
import (
"fmt"
)
func fib(n uint) uint {
if n < 2 {
return n
}
return fib(n-2) + fib(n-1)
}
func main() {
for i := uint(1); i <= 10; i++ {
fmt.Printf("i=%d\t%d\n", i, fib(i))
}
// stack overflow
//fmt.Println(fib(10000000000000000))
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated from the elasticsearch-specification DO NOT EDIT.
// https://github.com/elastic/elasticsearch-specification/tree/33e8a1c9cad22a5946ac735c4fba31af2da2cec2
package types
import (
"bytes"
"encoding/json"
"errors"
"io"
"strconv"
)
// TrainedModelsRecord type.
//
// https://github.com/elastic/elasticsearch-specification/blob/33e8a1c9cad22a5946ac735c4fba31af2da2cec2/specification/cat/ml_trained_models/types.ts#L23-L115
type TrainedModelsRecord struct {
// CreateTime The time the model was created.
CreateTime DateTime `json:"create_time,omitempty"`
// CreatedBy Information about the creator of the model.
CreatedBy *string `json:"created_by,omitempty"`
// DataFrameAnalysis The analysis used by the data frame to build the model.
DataFrameAnalysis *string `json:"data_frame.analysis,omitempty"`
// DataFrameCreateTime The time the data frame analytics job was created.
DataFrameCreateTime *string `json:"data_frame.create_time,omitempty"`
// DataFrameId The identifier for the data frame analytics job that created the model.
// Only displayed if the job is still available.
DataFrameId *string `json:"data_frame.id,omitempty"`
// DataFrameSourceIndex The source index used to train in the data frame analysis.
DataFrameSourceIndex *string `json:"data_frame.source_index,omitempty"`
// Description A description of the model.
Description *string `json:"description,omitempty"`
// HeapSize The estimated heap size to keep the model in memory.
HeapSize ByteSize `json:"heap_size,omitempty"`
// Id The model identifier.
Id *string `json:"id,omitempty"`
// IngestCount The total number of documents that are processed by the model.
IngestCount *string `json:"ingest.count,omitempty"`
// IngestCurrent The total number of documents that are currently being handled by the model.
IngestCurrent *string `json:"ingest.current,omitempty"`
// IngestFailed The total number of failed ingest attempts with the model.
IngestFailed *string `json:"ingest.failed,omitempty"`
// IngestPipelines The number of pipelines that are referencing the model.
IngestPipelines *string `json:"ingest.pipelines,omitempty"`
// IngestTime The total time spent processing documents with thie model.
IngestTime *string `json:"ingest.time,omitempty"`
// License The license level of the model.
License *string `json:"license,omitempty"`
// Operations The estimated number of operations to use the model.
// This number helps to measure the computational complexity of the model.
Operations *string `json:"operations,omitempty"`
Type *string `json:"type,omitempty"`
// Version The version of Elasticsearch when the model was created.
Version *string `json:"version,omitempty"`
}
func (s *TrainedModelsRecord) UnmarshalJSON(data []byte) error {
dec := json.NewDecoder(bytes.NewReader(data))
for {
t, err := dec.Token()
if err != nil {
if errors.Is(err, io.EOF) {
break
}
return err
}
switch t {
case "create_time", "ct":
if err := dec.Decode(&s.CreateTime); err != nil {
return err
}
case "created_by", "c", "createdBy":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.CreatedBy = &o
case "data_frame.analysis", "dfa", "dataFrameAnalyticsAnalysis":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.DataFrameAnalysis = &o
case "data_frame.create_time", "dft", "dataFrameAnalyticsTime":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.DataFrameCreateTime = &o
case "data_frame.id", "dfid", "dataFrameAnalytics":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.DataFrameId = &o
case "data_frame.source_index", "dfsi", "dataFrameAnalyticsSrcIndex":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.DataFrameSourceIndex = &o
case "description", "d":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Description = &o
case "heap_size", "hs", "modelHeapSize":
if err := dec.Decode(&s.HeapSize); err != nil {
return err
}
case "id":
if err := dec.Decode(&s.Id); err != nil {
return err
}
case "ingest.count", "ic", "ingestCount":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.IngestCount = &o
case "ingest.current", "icurr", "ingestCurrent":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.IngestCurrent = &o
case "ingest.failed", "if", "ingestFailed":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.IngestFailed = &o
case "ingest.pipelines", "ip", "ingestPipelines":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.IngestPipelines = &o
case "ingest.time", "it", "ingestTime":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.IngestTime = &o
case "license", "l":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.License = &o
case "operations", "o", "modelOperations":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Operations = &o
case "type":
var tmp json.RawMessage
if err := dec.Decode(&tmp); err != nil {
return err
}
o := string(tmp[:])
o, err = strconv.Unquote(o)
if err != nil {
o = string(tmp[:])
}
s.Type = &o
case "version", "v":
if err := dec.Decode(&s.Version); err != nil {
return err
}
}
}
return nil
}
// NewTrainedModelsRecord returns a TrainedModelsRecord.
func NewTrainedModelsRecord() *TrainedModelsRecord {
r := &TrainedModelsRecord{}
return r
}
|
package main
import (
"fmt"
"os"
"testing"
"time"
)
// The benchmark function must run the target code b.N times.
// During benchmark execution, b.N is adjusted until the benchmark function lasts long enough to be timed reliably.
// allocs/op means how many distinct memory allocations occurred per op (single iteration).
// B/op is how many bytes were allocated per op.
func benchmarkFP(i int, b *testing.B) {
for n := 0; n < b.N; n++ {
RunFpBenchmark(i)
}
}
func benchmarkSort(amountOfThreads int, sizeOfSlice int, b *testing.B) {
for n := 0; n < b.N; n++ {
RunSortBenchmark(amountOfThreads, sizeOfSlice)
}
}
func benchmarkStringConcat(amountOfThreads int, sizeOfSlice int, b *testing.B) {
for n := 0; n < b.N; n++ {
RunStringConcat(amountOfThreads, sizeOfSlice)
}
}
// func BenchmarkFpBenchmark1(b *testing.B) { benchmarkFP(1, b) }
// func BenchmarkFpBenchmark1(b *testing.B) { benchmarkFP(10, b) }
// func BenchmarkFpBenchmark3(b *testing.B) { benchmarkFP(100, b) }
// func BenchmarkFpBenchmark2(b *testing.B) { benchmarkFP(1000, b) }
// func BenchmarkSort1(b *testing.B) { benchmarkSort(1, 100, b) }
// func BenchmarkSort2(b *testing.B) { benchmarkSort(1, 1000, b) }
// func BenchmarkSort3(b *testing.B) { benchmarkSort(1, 10000, b) }
func BenchmarkSort1(b *testing.B) { benchmarkSort(1000, 100000, b) }
func BenchmarkSort2(b *testing.B) { benchmarkSort(1000, 10000, b) }
func BenchmarkSort3(b *testing.B) { benchmarkSort(100, 100000, b) }
func BenchmarkSort4(b *testing.B) { benchmarkSort(100, 10000, b) }
// func BenchmarkStringConcat1(b *testing.B) { benchmarkStringConcat(1, 1000, b) }
func BenchmarkStringConcat1(b *testing.B) { benchmarkStringConcat(100, 100000, b) }
func BenchmarkStringConcat2(b *testing.B) { benchmarkStringConcat(100, 10000, b) }
func BenchmarkStringConcat3(b *testing.B) { benchmarkStringConcat(1, 100000, b) }
func BenchmarkStringConcat4(b *testing.B) { benchmarkStringConcat(1, 10000, b) }
// func BenchmarkStringConcat3(b *testing.B) { benchmarkStringConcat(1, 100000, b) }
// func BenchmarkStringConcat2(b *testing.B) { benchmarkStringConcat(100, 100000, b) }
func TestMain(m *testing.M) {
ClearScreen()
PrintSystemInfo()
PrintMemUsage()
start := time.Now()
code := m.Run()
PrintMemUsage()
timeOfExecution := time.Since(start)
fmt.Println("Benchmark done in: ", timeOfExecution)
os.Exit(code)
}
|
// Copyright (C) 2019 Cisco Systems Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package vpplink
import (
"os"
"sync"
"github.com/sirupsen/logrus"
"github.com/projectcalico/vpp-dataplane/v3/vpplink/generated"
)
type VppLink struct {
*generated.Vpp
pid uint32
watcherLock sync.Mutex
interfaceEventWatchers []*interfaceEventWatcher
stopEvents func() error
}
func NewVppLink(socket string, logger *logrus.Entry) (*VppLink, error) {
vpp, err := generated.NewVpp(socket, logger)
return &VppLink{Vpp: vpp, pid: uint32(os.Getpid())}, err
}
|
package main
import "fmt"
func half(number int) (int, bool) {
return number / 2, number%2 == 0
}
func main() {
fmt.Print("half(1) returns ")
fmt.Println(half(1))
fmt.Print("half(2) returns ")
fmt.Println(half(2))
h, even := half(5)
fmt.Println(h, even)
}
|
/*
Run code from given body information such as
body.stdIn, body.code, body.language, etc.
*/
package main
import (
"fmt"
"io"
"io/ioutil"
"os/exec"
)
// path to /tmp/ folder and its files' names
const (
scripts = "scripts"
codeFile = "tmp/code"
)
// write body.code to /tmp/code.<<language extension>> and run the appropriate script file.
// write body.stdin to the stdin of the running file and get its stdout and stderr.
func Run(body io.ReadCloser) (stdout, stderr []byte, err error) {
// Decode request body
reqbody, err := ReadBody(body)
if err != nil {
return
}
// If modes is not know, get it.
if modes == nil {
modes, err = GetModes()
if err != nil {
return
}
}
// Get the file extension for given language.
extension := modes[reqbody.Language][1]
// Get the appropriate file name for given language.
file := fmt.Sprintf("%s.%s", codeFile, extension)
// Write body.Code to file
if err = ioutil.WriteFile(file, []byte(reqbody.Code), 0644); err != nil {
return
}
// script file to be ran.
script := fmt.Sprintf("%s/run-%s.bash", scripts, extension)
// How to run the script
cmd := exec.Command("bash", script)
// get scripts stdin, stdout, stderr
cmdIn, _ := cmd.StdinPipe()
cmdOut, _ := cmd.StdoutPipe()
cmdErr, _ := cmd.StderrPipe()
// run script
cmd.Start()
// write to its stdin
if reqbody.StdIn != "" {
cmdIn.Write([]byte(reqbody.StdIn))
}
cmdIn.Close()
// Get its stdout, stderr
stdout, _ = ioutil.ReadAll(cmdOut)
stderr, _ = ioutil.ReadAll(cmdErr)
// Wait command exit status.
cmd.Wait()
return
}
|
package websock
import(
"fmt"
"net/http"
"github.com/henrylee2cn/pholcus/common/websocket"
)
type SockServer struct{
client *websocket.Conn
data chan([]byte)
}
var (
Server = NewServer()
)
func NewServer() *SockServer{
return &SockServer{
data :make(chan []byte),
}
}
func Start() {
http.Handle("/sock", websocket.Handler(Server.Run))
fmt.Println("/sock start for websocket");
}
func (m *SockServer) Run(ws *websocket.Conn){
defer func() {
if err := ws.Close(); err != nil {
fmt.Println("Websocket could not be closed", err.Error())
}
}()
m.client = ws
fmt.Println(ws.Request().RemoteAddr, " connected.",)
for {
select {
case data := <-m.data:
m.send(data)
break
}
}
}
func (m *SockServer) SendData(cmd []byte) {
m.data<-cmd
}
func (m *SockServer) send(cmd []byte){
if m.client != nil {
//if err := websocket.JSON.Send(m.client, cmd); err != nil {
// // we could not send the message to a peer
// fmt.Println("Could not send message ", err.Error())
//}
}
}
|
package nkb
import (
"strconv"
"os/exec"
"fmt"
"strings"
)
func (a *app) enable() {
a.start("" +
"setkeycodes 3a " + strconv.Itoa(KEY_0) + " &" + // capslock
"wait")
}
func (a *app) disable() {
a.start("" +
"setkeycodes 3a 58 &" + // capslock
"wait")
}
func (a *app) capsModeOn() {
a.state.capsMode = true
a.state.capsMode2 = false
a.start("" +
"setkeycodes 38 " + strconv.Itoa(KEY_1) + " &" + // alt
"setkeycodes 0e 111 &" + // bksp -> del
"setkeycodes 1e 105 &" + // a -> left
"setkeycodes 11 103 &" + // w -> up
"setkeycodes 20 106 &" + // d -> right
"setkeycodes 1f 108 &" + // s -> down
"wait")
}
// disable capsMode and capsMode2
func (a *app) capsModeOff() {
a.state.capsMode = false
a.state.capsMode2 = false
a.start("" +
"setkeycodes 38 56 &" + // restore alt
"setkeycodes 0e 14 &" + // restore bksp
"setkeycodes 1e 30 &" + // restore a
"setkeycodes 11 17 &" + // restore w
"setkeycodes 20 32 &" + // restore d
"setkeycodes 1f 31 &" + // restore s
"wait")
}
func (a *app) capsMode2On() {
a.state.capsMode = false
a.state.capsMode2 = true
a.start("" +
"setkeycodes 38 " + strconv.Itoa(KEY_1) + " &" + // alt
"setkeycodes 0e 111 &" + // bksp -> del
"setkeycodes 1e 102 &" + // a -> home
"setkeycodes 11 104 &" + // w -> pgup
"setkeycodes 20 107 &" + // d -> end
"setkeycodes 1f 109 &" + // s -> pgdn
"wait")
}
var user string // user who is logged in
func init() {
var err error
if user, err = sh(`who | grep :0 | grep -oP '^.*?(?=\s)'`); err != nil {
fmt.Printf("Error during get logged in user: %s\n", err.Error())
} else {
user = strings.Trim(user, "\n")
fmt.Printf("Logged in user: %s\n", user)
}
}
func (a *app) send(keys string) {
a.start(`su ` + user + ` -c "export DISPLAY=':0.0'; xdotool key ` + keys + `"`)
}
func (a *app) start(cmd string) {
if out, err := sh(cmd); err != nil {
fmt.Printf("Error during exec command: %s, out: %s\n", err.Error(), out)
}
}
func sh(cmd string) (string, error) {
if out, err := exec.Command("/bin/sh", "-c", cmd).Output(); err == nil {
return string(out), nil
} else {
return "", nil
}
} |
/*
So, I wrote myself a one-liner which printed out a snake on the console. It's a bit of fun, and I wondered how I might condense my code...
Here's a (short) example output:
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Here's the specs:
In each line, a single non-whitespace character (whichever you like) is printed to the console, initially with 29 to 31 spaces padding to the left of it.
Each iteration, a random decision is made between these three actions
The amount of padding decreases by 1
The amount of padding remains the same
The amount of padding increases by 1
Do this 30 times, to print 30-segment long a snake to the console.
The shortest answer in bytes wins.
*/
package main
import (
"fmt"
"math/rand"
"strings"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
snake(29+rand.Intn(3), 30)
}
func snake(s, n int) {
for i := 0; i < n; i++ {
fmt.Printf("%s+\n", strings.Repeat(" ", s))
s += 1 - rand.Intn(3)
}
}
|
package jsonapi
import (
"encoding/json"
)
type LinkDescriptor struct {
Href URLTemplate `json:"href"`
Type string `json:"type"`
}
func (ld *LinkDescriptor) UnmarshalJSON(data []byte) error {
// assume they gave us a full link descriptor object
var dst struct {
Href URLTemplate `json:"href"`
Type string `json:"type"`
}
if err := json.Unmarshal(data, &dst); err != nil {
// that didn't work, hopefully they gave us a string for the url template
if err = json.Unmarshal(data, &ld.Href); err != nil {
return err
}
} else {
ld.Href = dst.Href
ld.Type = dst.Type
}
return nil
}
type ResourceLink struct {
Id string
Href string
Type string
}
func (rl ResourceLink) MarshalJSON() ([]byte, error) {
if rl.Id == "" {
return json.Marshal(nil)
}
if rl.Href == "" && rl.Type == "" {
return json.Marshal(rl.Id)
}
src := struct {
Id string `json:"id"`
Href string `json:"href,omitempty"`
Type string `json:"type,omitempty"`
}{rl.Id, rl.Href, rl.Type}
return json.Marshal(src)
}
func (rl *ResourceLink) UnmarshalJSON(data []byte) error {
// assume they gave us a full ResourceLink object
var dst struct {
Id string `json:"id"`
Href string `json:"href,omitempty"`
Type string `json:"type,omitempty"`
}
if err := json.Unmarshal(data, &dst); err != nil {
// failed, hopefully they gave us a string id
if err = json.Unmarshal(data, &rl.Id); err != nil {
return err
}
} else {
rl.Id = dst.Id
rl.Href = dst.Href
rl.Type = dst.Href
}
return nil
}
type CollectionLink struct {
Ids []string
Href string
Type string
}
func (cl CollectionLink) MarshalJSON() ([]byte, error) {
if cl.Href == "" && cl.Type == "" {
return json.Marshal(cl.Ids)
}
src := struct {
Ids []string `json:"ids"`
Href string `json:"href"`
Type string `json:"type"`
}{cl.Ids, cl.Href, cl.Type}
return json.Marshal(src)
}
func (cl *CollectionLink) UnmarshalJSON(data []byte) error {
// assume they gave us a full CollectionLink object
var dst struct {
Ids []string `json:"ids"`
Href string `json:"href"`
Type string `json:"type"`
}
if err := json.Unmarshal(data, &dst); err != nil {
// failed, hopefully they gave us an array of string ids
if err = json.Unmarshal(data, &cl.Ids); err != nil {
return err
}
} else {
cl.Ids = dst.Ids
cl.Href = dst.Href
cl.Type = dst.Type
}
return nil
}
type Links struct {
ToOne map[string]ResourceLink
ToMany map[string]CollectionLink
}
func NewLinks() *Links {
return &Links{
make(map[string]ResourceLink),
make(map[string]CollectionLink),
}
}
func (l *Links) IsEmpty() bool {
return len(l.ToOne) == 0 && len(l.ToMany) == 0
}
func (l *Links) HasOne(relation string) bool {
_, exists := l.ToOne[relation]
return exists
}
func (l *Links) HasMany(relation string) bool {
_, exists := l.ToMany[relation]
return exists
}
func (l *Links) One(relation string) string {
return l.ToOne[relation].Id
}
func (l *Links) Many(relation string) []string {
return l.ToMany[relation].Ids
}
func (l *Links) GetOne(relation string, dst interface{}) error {
return UnmarshalId(l.One(relation), dst)
}
func (l *Links) GetMany(relation string, dst interface{}) error {
return UnmarshalIds(l.Many(relation), dst)
}
func (l *Links) LinkOne(relation string, id string) {
if l.ToOne == nil {
l.ToOne = make(map[string]ResourceLink)
}
l.ToOne[relation] = ResourceLink{Id: id}
}
func (l *Links) LinkMany(relation string, ids []string) {
if l.ToMany == nil {
l.ToMany = make(map[string]CollectionLink)
}
l.ToMany[relation] = CollectionLink{Ids: ids}
}
func (l *Links) MarshalJSON() ([]byte, error) {
src := make(map[string]interface{})
for relation, link := range l.ToOne {
src[relation] = link
}
for relation, link := range l.ToMany {
src[relation] = link
}
return json.Marshal(src)
}
func (l *Links) UnmarshalJSON(data []byte) error {
l.ToOne = make(map[string]ResourceLink)
l.ToMany = make(map[string]CollectionLink)
var rawLinks map[string]json.RawMessage
if err := json.Unmarshal(data, &rawLinks); err != nil {
return err
}
for relation, rawLink := range rawLinks {
// try to make it a ResourceLink
var resourceLink ResourceLink
if err := json.Unmarshal(rawLink, &resourceLink); err != nil {
// try to make it a CollectionLink
var collectionLink CollectionLink
if err = json.Unmarshal(rawLink, &collectionLink); err != nil {
return err
} else {
l.ToMany[relation] = collectionLink
}
} else {
l.ToOne[relation] = resourceLink
}
}
return nil
}
|
package main
import (
"fmt"
"time"
)
// The function will return if a message arrival interval
// is larger than one minute.
func longRunning(messages <-chan string) {
for {
select {
// 会产生大量 chan 滞留在内存
case <-time.After(time.Minute):
return
case msg := <-messages:
fmt.Println(msg)
}
}
}
func main() {
var x = make(chan string, 1)
x <- "hello, world, chan"
longRunning(x)
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/codegangsta/negroni"
"html/template"
"io"
"net/http"
"net/url"
"nlc_dv/marc"
"nlc_dv/search"
"os"
"reflect"
"sort"
"strconv"
"flag"
)
var flagSkip int
var flagUtf8 bool
var ds *DataStore
type Doc struct {
Id int
Year int `json:"year"`
Name string `json:"name"`
Terms []string `json:"terms"`
Desc string `json:"desc"`
Author []string `json:"author"`
URL string `json:"url"`
keyword string
}
type DataStore struct {
tn int
dn int
Lexicon map[string]int
Docs map[int]*Doc
searcher *search.Searcher
yearStatData []*YearStat
yearStatMap map[int]*YearStat
}
type YearStat struct {
Year int `json:"year,string"`
Quantity int `json:"quantity"`
Keywords []*WordCount `json:"keywords"`
words map[string]int
}
type ByYear []*YearStat
func (s ByYear) Len() int {
return len(s)
}
func (s ByYear) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s ByYear) Less(i, j int) bool {
return s[i].Year < s[j].Year
}
type WordCount struct {
Value string `json:"value"`
Count int `json:"count"`
}
type ByCount []*WordCount
func (s ByCount) Len() int {
return len(s)
}
func (s ByCount) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s ByCount) Less(i, j int) bool {
return s[i].Count > s[j].Count
}
func (y *YearStat) AddWord(word string) {
c, e := y.words[word]
if e {
y.words[word] = c + 1
} else {
y.words[word] = 1
}
}
func (y *YearStat) initKeywords() {
l := len(y.words)
y.Keywords = make([]*WordCount, l, l)
i := 0
for k, v := range y.words {
y.Keywords[i] = &WordCount{k, v}
i++
}
sort.Sort(ByCount(y.Keywords))
}
func (d *DataStore) Add(doc *Doc) {
d.dn++
d.Docs[d.dn] = doc
doc.Id = d.dn
y, e := d.yearStatMap[doc.Year]
if !e {
y = &YearStat{Year: doc.Year, Quantity: 1, words: map[string]int{}}
d.yearStatMap[doc.Year] = y
} else {
y.Quantity++
}
for _, v := range doc.Terms {
id, exists := d.Lexicon[v]
if !exists {
d.tn++
id = d.tn
d.Lexicon[v] = id
}
d.searcher.Put(id, d.dn)
//y.AddWord(v)
}
y.AddWord(doc.keyword)
}
func (d *DataStore) initYearStat() {
l := len(d.yearStatMap)
d.yearStatData = make([]*YearStat, l, l)
i := 0
for _, v := range d.yearStatMap {
v.initKeywords()
d.yearStatData[i] = v
i++
}
sort.Sort(ByYear(d.yearStatData))
}
func (d *DataStore) searchToDoc(sr *search.SearchResult) ([]*Doc, int) {
if sr == nil || sr.Docs == nil {
return nil, 0
}
res := []*Doc{}
for _, v := range sr.Docs {
for _, f := range v.Fields {
if f.GetName() == "id" {
res = append(res, d.Docs[f.GetValue().(int)])
break
}
}
}
return res, sr.Total
}
func (d *DataStore) Find(term string, year string, start int, limit int) ([]*Doc, int) {
var q search.Query
if term == "" && year == "" {
return nil, 0
}
if term == "" && year != "" {
q = &search.TermPageQuery{search.TermQuery{&search.Term{"year", year}}, start, limit}
} else if term != "" && year == "" {
q = &search.TermPageQuery{search.TermQuery{&search.Term{"term", term}}, start, limit}
} else {
q = &search.BooleanQuery{
&search.TermQuery{&search.Term{"term", term}},
&search.TermQuery{&search.Term{"year", year}},
search.MUST,
start,
limit}
}
fmt.Println(reflect.TypeOf(q))
return d.searchToDoc(d.searcher.Find(q))
}
func check(e error) {
if e != nil {
panic(e)
}
}
func parseYear(f string) (int, error) {
sf := marc.ParseSubfield(f, 'a')
r := []rune(sf)
ys := string(r[9:13])
return strconv.Atoi(ys)
}
func convert(r *marc.Record) (doc *Doc) {
doc = &Doc{}
i := 0
for _, v := range r.Field {
switch v.Header {
case 100:
y, err := parseYear(v.Value)
if err != nil {
//if err != nil || y < 1949 {
return nil
}
//fmt.Println(y)
doc.Year = y
i = i | 1
case 200:
doc.Name = marc.ParseSubfield(v.Value, 'a')
//fmt.Println(doc.Name)
i = i | 2
case 606:
doc.Terms = marc.ParseAllSubfield(v.Value)
doc.keyword = marc.ParseSubfield(v.Value, 'a')
//fmt.Println(v.Value, doc.Terms)
if len(doc.Terms) > 0 {
i = i | 4
}
case 330:
doc.Desc = marc.ParseSubfield(v.Value, 'a')
i = i | 8
case 701:
if doc.Author == nil {
doc.Author = []string{}
}
au := marc.ParseSubfield(v.Value, 'a')
if au != "" {
doc.Author = append(doc.Author, au)
if i&16 == 0 {
i = i | 16
}
}
case 856:
doc.URL = marc.ParseSubfield(v.Value, 'u')
i = i | 32
}
}
if (i & 7) < 7 {
fmt.Printf("%d %s %s\r\n", doc.Year, doc.Name, doc.Desc)
//fmt.Println(doc.Terms)
return nil
}
return doc
}
func docForSearch(doc *Doc) *search.Document {
fid := &search.IntField{search.BaseField{true, "id"}, doc.Id}
fyear := &search.IntField{search.BaseField{true, "year"}, doc.Year}
fterms := &search.StrSliceField{search.BaseField{true, "term"}, doc.Terms}
fields := []search.Field{fid, fyear, fterms}
return &search.Document{fields}
}
func readFile(fp string, skip int, chinese bool) *DataStore {
searcher := search.NewSearcher()
ds := &DataStore{
searcher: searcher,
Lexicon: map[string]int{},
Docs: map[int]*Doc{},
yearStatMap: map[int]*YearStat{},
}
f, err := os.Open(fp)
check(err)
r := marc.NewReader(f, skip, chinese)
for {
rc, err := r.Read()
if err == io.EOF {
break
}
check(err)
doc := convert(rc)
if doc != nil {
ds.Add(doc)
searcher.Add(docForSearch(doc))
}
}
ds.initYearStat()
return ds
}
func home(w http.ResponseWriter, r *http.Request) {
t, _ := template.ParseFiles("views/index.html")
t.Execute(w, nil)
}
func writeJson(w http.ResponseWriter, d interface{}) {
b, err := json.Marshal(d)
if err != nil {
fmt.Println("json err: ", err)
}
w.Header().Set("Content-Type", "application/json")
w.Write(b)
}
func limitStatData(data []*YearStat, limit int) []*YearStat {
res := make([]*YearStat, len(data))
for i, item := range data {
if len(item.Keywords) > limit {
res[i] = &YearStat{item.Year, item.Quantity, item.Keywords[:limit], item.words}
} else {
res[i] = item
}
}
return res
}
func yearJson(w http.ResponseWriter, r *http.Request) {
fmt.Println(len(ds.yearStatData))
writeJson(w, limitStatData(ds.yearStatData, 100))
}
func findDoc(w http.ResponseWriter, r *http.Request) {
q := r.URL.Query()
fmt.Println(q)
data := map[string]interface{}{}
start := getIntParam(q, "start", 0)
limit := getIntParam(q, "limit", 50)
docs, total := ds.Find(q.Get("word"), q.Get("year"), start, limit)
data["docs"] = docs
data["total"] = total
writeJson(w, data)
}
func getIntParam(q url.Values, key string, def int) int {
str := q.Get(key)
res := def
if str != "" {
res, _ = strconv.Atoi(str)
if res < 0 {
res = 0
}
}
return res
}
func initFlag(){
flag.IntVar(&flagSkip, "skip", 0, "每条记录解析后需跳过的字节数")
flag.BoolVar(&flagUtf8, "utf8", true, "CNMARC文件是否是utf8编码")
}
func main() {
initFlag()
flag.Parse()
if flag.NArg() < 1{
panic("请输入文件路径")
}
file := flag.Arg(0)
ds = readFile(file, flagSkip, !flagUtf8)
mux := http.NewServeMux()
mux.HandleFunc("/data.json", yearJson)
mux.HandleFunc("/search.json", findDoc)
mux.HandleFunc("/", home)
n := negroni.Classic()
s := negroni.NewStatic(http.Dir("static"))
n.Use(s)
n.UseHandler(mux)
n.Run(":3000")
}
|
package main
import (
"fmt"
"net/http"
)
func Handle_NAME(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "anu!!!!!!!!!")
}
func main() {
http.HandleFunc("/", Handle_NAME)
http.ListenAndServe(":80", nil)
}
|
package persist
type IQuery interface {
}
|
/*
* Tencent is pleased to support the open source community by making 蓝鲸 available.
* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*/
package sys
import "bscp.io/pkg/iam/client"
const (
// SystemIDBSCP is bscp in iam's system id.
SystemIDBSCP = "bk-bscp"
// SystemNameBSCPEn is bscp in iam's system english name.
SystemNameBSCPEn = "bscp"
// SystemNameBSCP is bscp in iam's system name.
SystemNameBSCP = "服务配置平台"
// SystemIDCMDB is cmdb in iam's system id.
SystemIDCMDB = "bk_cmdb"
// SystemNameCMDB is cmdb system name in iam.
SystemNameCMDB = "配置平台"
)
// SystemIDNameMap is system id to name map.
var SystemIDNameMap = map[string]string{
SystemIDBSCP: SystemNameBSCP,
SystemIDCMDB: SystemNameCMDB,
}
// TypeID resource type to register iam.
const (
Business client.TypeID = "biz"
Application client.TypeID = "app"
)
// ActionID action id to register iam.
const (
// BusinessViewResource business view.
BusinessViewResource client.ActionID = "find_business_resource"
// AppCreate app create.
AppCreate client.ActionID = "app_create"
// AppView
AppView client.ActionID = "app_view"
// AppEdit app edit.
AppEdit client.ActionID = "app_edit"
// AppDelete app delete.
AppDelete client.ActionID = "app_delete"
// GenerateRelease generate release.
GenerateRelease client.ActionID = "generate_release"
// ReleasePublish release publish.
ReleasePublish client.ActionID = "release_publish"
// ConfigItemFinishPublish config item finish publish.
ConfigItemFinishPublish client.ActionID = "config_item_finish_publish"
// StrategySetCreate strategy set create.
StrategySetCreate client.ActionID = "strategy_set_create"
// StrategySetEdit strategy set edit.
StrategySetEdit client.ActionID = "strategy_set_edit"
// StrategySetDelete strategy set delete.
StrategySetDelete client.ActionID = "strategy_set_delete"
// StrategyCreate strategy create.
StrategyCreate client.ActionID = "strategy_create"
// StrategyEdit strategy edit.
StrategyEdit client.ActionID = "strategy_edit"
// StrategyDelete strategy delete.
StrategyDelete client.ActionID = "strategy_delete"
// TaskHistoryView task history view.
TaskHistoryView client.ActionID = "history_view"
// GroupCreate 分组创建
GroupCreate client.ActionID = "group_create"
// GroupDelete 分组删除
GroupDelete client.ActionID = "group_delete"
// GroupEdit 分组编辑
GroupEdit client.ActionID = "group_edit"
// Unsupported is an action that can not be recognized
Unsupported client.ActionID = "unsupported"
// Skip is an action that no need to auth
Skip client.ActionID = "skip"
// CredentialCreate 服务密钥创建
CredentialCreate client.ActionID = "credential_create"
// CredentialView 服务密钥查看
CredentialView client.ActionID = "credential_view"
// CredentialEdit 服务密钥编辑
CredentialEdit client.ActionID = "credential_edit"
// CredentialDelete 服务密钥删除
CredentialDelete client.ActionID = "credential_delete"
)
// ActionIDNameMap is action id type map.
var ActionIDNameMap = map[client.ActionID]string{
BusinessViewResource: "业务访问",
AppCreate: "服务创建",
AppView: "服务查看",
AppEdit: "服务编辑",
AppDelete: "服务删除",
GenerateRelease: "生成版本",
ReleasePublish: "上线版本",
ConfigItemFinishPublish: "配置项结束发布",
StrategySetCreate: "策略集创建",
StrategySetEdit: "策略集编辑",
StrategySetDelete: "策略集删除",
StrategyCreate: "策略创建",
StrategyEdit: "策略编辑",
StrategyDelete: "策略删除",
GroupCreate: "分组创建",
GroupEdit: "分组编辑",
GroupDelete: "分组删除",
TaskHistoryView: "任务历史",
CredentialCreate: "服务密钥创建",
CredentialView: "服务密钥查看",
CredentialEdit: "服务密钥编辑",
CredentialDelete: "服务密钥删除",
}
// InstanceSelectionID selection id to register iam.
const (
BusinessSelection client.InstanceSelectionID = "business"
ApplicationSelection client.InstanceSelectionID = "application"
)
// ActionType action type to register iam.
const (
Create client.ActionType = "create"
Delete client.ActionType = "delete"
View client.ActionType = "view"
Edit client.ActionType = "edit"
List client.ActionType = "list"
)
// ActionTypeIDNameMap is action type map.
var ActionTypeIDNameMap = map[client.ActionType]string{
Create: "新建",
Edit: "编辑",
Delete: "删除",
View: "查询",
List: "列表查询",
}
|
package schema
import (
"time"
"github.com/graphql-go/graphql"
"github.com/mszsgo/hjson"
)
type EditMutation struct {
Name string `description:"配置名"`
UpdatedAt time.Time `description:"更新时间"`
}
func (*EditMutation) Description() string {
return "编辑"
}
type EditMutationArgs struct {
Name string `graphql:"!" description:"配置名"`
Value string `graphql:"!" description:"配置值"`
Remark string `graphql:"" description:"备注信息"`
}
func (*EditMutation) Args() *EditMutationArgs {
return &EditMutationArgs{}
}
func (*EditMutation) Resolve() graphql.FieldResolveFn {
return func(p graphql.ResolveParams) (i interface{}, err error) {
var args *EditMutationArgs
hjson.MapToStruct(p.Args, &args)
NewConfig().Update(args.Name, args.Value, args.Remark)
return &EditMutation{
Name: args.Name,
UpdatedAt: time.Now(),
}, err
}
}
|
package main
import (
"encoding/json"
"fmt"
"my9awsgo/my9sfn"
"time"
)
const GENERIC_STEP_PATH = "generic"
func (swlRun *SwlRun) RunStateMachine() (err error) {
var smInput StateMachineInput
smInput.Project = swlRun.SwlConf.Project
smInput.Env = swlRun.SwlConf.Env
smInput.Region = swlRun.SwlConf.Region
smInput.ConfigBucket = swlRun.ConfigBucket
smInput.ConfigBucketKey = swlRun.SwlConf.ConfigPrefix
instance_id, err := swlRun.LaunchCompute()
smInput.Result = instance_id
var runSmIn my9sfn.RunSmIn
runSmIn.Name = swlRun.SwlConf.Project + "_" + getUniqueExecutionName()
smInput.ExecutionArn = "arn:aws:states:" + swlRun.SwlConf.Region + ":" + swlRun.SwlConf.AwsAccountId + ":execution:" + swlRun.SwlConf.StateMachineName + ":" + runSmIn.Name
smInput.Mode = "launch_stepworker"
runSmIn.StateMachineArn = swlRun.SwlConf.StateMachineArn
inputData, err := json.Marshal(smInput)
if err != nil {
fmt.Println("RunStateMachine: Error in reading JSON marshalling State Machine Input :: Error=%s", err)
panic(err)
}
runSmIn.Input = string(inputData)
runSmOut, err := swlRun.SFNSession.SfnRunStateMachine(runSmIn)
if err != nil {
fmt.Println("RunStateMachine: Error in SfnRunStateMachine :: Error=%s", err)
panic(err)
}
fmt.Println("RunStateMachine: running StateMachine ", runSmOut)
return err
}
func (swlRun *SwlRun) LaunchCompute() (instance_id string, err error) {
swlRun.Ec2InstanceConfigFilePath = swlRun.SwlConf.ConfigPrefix + "/" + swlRun.SwlConf.Project + "/" + swlRun.SwlConf.Env + "/" + GENERIC_STEP_PATH + "/" + swlRun.SwlConf.Ec2InstConfig
fmt.Println("Launch compute for this State Machine run ... ")
instance_id, err = swlRun.CreateEcsContainerInstance(false)
fmt.Println("Wait for compute spin up ...")
time.Sleep(240 * 1000 * time.Millisecond) // sleep for 4 mins
return instance_id, err
}
|
package reliable
import (
"errors"
"log"
"net"
"sync"
"time"
"github.com/seanpfeifer/hostrelay/scan"
)
var ErrServerClosed = errors.New("tcp: Server closed")
func ListenAndServeTCP(network, address string) error {
ln, err := net.Listen(network, address)
if err != nil {
return err
}
log.Printf(`Listening for %s on "%s"`, ln.Addr().Network(), ln.Addr().String())
srv := Server{
doneChan: make(chan struct{}),
activeConn: make(map[*playerConn]struct{}),
}
return srv.Serve(ln)
}
type Server struct {
doneChan chan struct{}
activeConn map[*playerConn]struct{}
connMutex sync.Mutex
}
func (srv *Server) trackConn(c *playerConn, add bool) {
srv.connMutex.Lock()
defer srv.connMutex.Unlock()
if add {
srv.activeConn[c] = struct{}{}
} else {
delete(srv.activeConn, c)
}
}
func (srv *Server) Close() {
// Close the server so we don't accept new connections
select {
case <-srv.doneChan:
// Already closed. Don't close again
default:
close(srv.doneChan)
}
// Cycle through all active connections and close them, then delete from the map
for c := range srv.activeConn {
c.Close()
delete(srv.activeConn, c)
}
}
func (srv *Server) Serve(l net.Listener) error {
defer l.Close()
var tempDelay time.Duration // how long to sleep on accept failure
// Most of this is the same as http.Server.Serve(), with the delay being copied with modifications,
// and the rest of the Server patterns being similar.
for {
conn, err := l.Accept()
// Handle any errors we get, delaying if there's a temporary error
if err != nil {
select {
case <-srv.doneChan:
return ErrServerClosed
default:
}
if ne, ok := err.(net.Error); ok && ne.Temporary() {
if tempDelay == 0 {
tempDelay = 5 * time.Millisecond
} else {
tempDelay *= 2
}
if max := 1 * time.Second; tempDelay > max {
tempDelay = max
}
// Retry after the delay
time.Sleep(tempDelay)
continue
}
return err
}
// No errors, let's reset our delay then spawn a goroutine to handle the connection
tempDelay = 0
player := srv.newConn(conn)
go srv.handlePlayer(player)
}
}
func (srv *Server) newConn(conn net.Conn) *playerConn {
c := &playerConn{
doneChan: make(chan struct{}),
outgoing: make(chan []byte),
conn: conn,
srv: srv,
}
srv.trackConn(c, true)
return c
}
func (srv *Server) broadcast(player *playerConn, msg []byte) {
// We need to lock our mutex, or we can't cycle through all connections
srv.connMutex.Lock()
defer srv.connMutex.Unlock()
for conn := range srv.activeConn {
if conn != player {
// Only send to other players
conn.send(msg)
}
}
}
func (srv *Server) handlePlayer(player *playerConn) {
defer player.Close()
// Listen on another goroutine.
// We can't just do a blocking read in our select, even in "default", as it will hold all broadcasts up for quiet clients.
go player.listen()
for {
select {
case out := <-player.outgoing:
_, err := player.conn.Write(out)
if err != nil {
log.Println(err)
return
}
case <-player.doneChan:
return
}
}
}
type playerConn struct {
doneChan chan struct{}
outgoing chan []byte
conn net.Conn
srv *Server
}
func (c *playerConn) Read(p []byte) (int, error) {
return c.conn.Read(p)
}
func (c *playerConn) Close() {
c.srv.trackConn(c, false)
c.conn.Close()
}
func (c *playerConn) send(msg []byte) {
c.outgoing <- msg
}
func (c *playerConn) listen() {
scan.ListenAndDispatch(c, c.onMessageReceived)
// If the scanner's done, we're done listening to this connection
c.doneChan <- struct{}{}
}
func (c *playerConn) onMessageReceived(prefix [scan.PrefixLengthBytes]byte, data []byte) {
// TODO: Profile and optimize this slice allocation by pre-allocating per connection if necessary.
out := make([]byte, 0, len(data)+scan.PrefixLengthBytes)
out = append(out, prefix[:]...)
out = append(out, data...)
c.srv.broadcast(c, out)
}
|
package repository
import (
"github.com/shharn/blog/db"
)
type dgraphRepositoryContext struct {
Client *db.Client
Err error
}
func (c *dgraphRepositoryContext) Commit() {
c.Client.Commit()
}
func (c *dgraphRepositoryContext) Rollback() {
c.Client.Rollback()
}
func (c *dgraphRepositoryContext) Dispose() {
defer c.Client.Dispose()
if c.Err == nil {
c.Commit()
} else {
c.Rollback()
}
} |
package main
import (
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/zserge/lorca"
"gopkg.in/yaml.v2"
)
type cfg struct {
Debug bool
Title string
Width int
Height int
port int
CDN string
}
func main() {
dir, err := filepath.Abs(filepath.Dir(os.Args[0]))
if err != nil {
log.Fatal(err)
}
t := func() cfg {
data, err := ioutil.ReadFile(filepath.Join(dir, "config.yaml"))
if err != nil {
panic(err)
}
t := cfg{}
err = yaml.Unmarshal(data, &t)
if err != nil {
panic(err)
}
return t
}()
ln, err := net.Listen("tcp", "127.0.0.1:"+strconv.Itoa(t.port))
if err != nil {
log.Fatal(err)
}
defer ln.Close()
go http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
switch r.URL.Path {
case "/api/search":
b, _ := ioutil.ReadFile(filepath.Join(dir, "search.yaml"))
w.Write(b)
case "/api/image":
b, _ := ioutil.ReadFile(filepath.Join(dir, "image.yaml"))
w.Write(b)
}
return
}
if strings.HasPrefix(r.URL.Path, "/img") || strings.HasPrefix(r.URL.Path, "/fonts") {
p := filepath.Join(dir, strings.ReplaceAll(r.URL.Path[1:], "/", string(os.PathSeparator)))
_, err := os.Stat(p)
if err == nil {
http.ServeFile(w, r, p)
return
}
redirectURL := t.CDN
length := len(redirectURL)
if string(redirectURL[length-1]) == "/" {
redirectURL = redirectURL[:length-1]
}
redirectURL = redirectURL + r.URL.Path
http.Redirect(w, r, redirectURL, http.StatusTemporaryRedirect)
return
}
http.FileServer(http.Dir(filepath.Join(dir, "public"))).ServeHTTP(w, r)
}))
fmt.Printf("Listening on %s\n", ln.Addr())
if lorca.LocateChrome() != "" {
ui, _ := lorca.New("data:text/html,"+url.PathEscape(fmt.Sprintf(`
<html>
<head><title>%s</title></head>
</html>
`, t.Title)), "", t.Width, t.Height)
defer ui.Close()
ui.Load(fmt.Sprintf("http://%s", ln.Addr()))
<-ui.Done()
return
}
fallback(t, fmt.Sprintf("http://%s", ln.Addr()))
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"bytes"
"encoding/json"
"math"
"reflect"
"strconv"
"strings"
)
type structureElement struct {
Values []IAdaValue
valueMap map[string]IAdaValue
}
func newStructureElement() *structureElement {
return &structureElement{valueMap: make(map[string]IAdaValue)}
}
// StructureValueTraverser structure value traverser
type StructureValueTraverser interface {
Traverse(t TraverserValuesMethods, x interface{}) (ret TraverseResult, err error)
}
// StructureValue structure value struct
type StructureValue struct {
adaValue
Elements []*structureElement
elementMap map[uint32]*structureElement
}
func newStructure(initType IAdaType) *StructureValue {
Central.Log.Debugf("Create new structure value %s", initType.Name())
value := StructureValue{adaValue: adaValue{adatype: initType}}
value.elementMap = make(map[uint32]*structureElement)
switch initType.Type() {
case FieldTypeGroup:
// value.initSubValues(0, 0, false)
default:
}
return &value
}
/*
* Init sub structures with empty value fields
*/
func (value *StructureValue) initSubValues(index uint32, peIndex uint32, initMuFields bool) {
value.initMultipleSubValues(index, peIndex, 0, initMuFields)
}
/*
* Init sub structures with empty value fields
*/
func (value *StructureValue) initMultipleSubValues(index uint32, peIndex uint32, muIndex uint32, initMuFields bool) {
subType := value.adatype.(*StructureType)
Central.Log.Debugf("Init sub values for %s[%d,%d] -> |%d,%d| - %d init MU fields=%v", value.adatype.Name(), value.PeriodIndex(),
value.MultipleIndex(), peIndex, muIndex, index, initMuFields)
if value.Type().Type() != FieldTypeMultiplefield || initMuFields {
for _, st := range subType.SubTypes {
if st.HasFlagSet(FlagOptionMUGhost) && !initMuFields {
continue
}
Central.Log.Debugf("Init sub structure %s(%s) for structure %s period index=%d multiple index=%d",
st.Name(), st.Type().name(), value.Type().Name(), peIndex, muIndex)
stv, err := st.Value()
if err != nil {
Central.Log.Debugf("Error %v", err)
return
}
stv.setPeriodIndex(peIndex)
/*if st.Type() == FieldTypeMultiplefield {
stv.setMultipleIndex(muIndex)
}*/
Central.Log.Debugf("Add to %s[%d,%d] element %s[%d,%d] --> PEindex=%d MUindex=%d index=%d", value.Type().Name(), value.PeriodIndex(),
value.MultipleIndex(), stv.Type().Name(),
stv.PeriodIndex(), stv.MultipleIndex(), peIndex, muIndex, index)
err = value.addValue(stv, peIndex, muIndex)
if err != nil {
Central.Log.Debugf("Error (addValue) %v", err)
return
}
if stv.Type().IsStructure() {
stv.(*StructureValue).initMultipleSubValues(index, peIndex, muIndex, initMuFields)
}
}
Central.Log.Debugf("Finished Init sub values for %s len=%d", value.Type().Name(), len(value.Elements))
} else {
Central.Log.Debugf("Skip Init sub values for %s", value.Type().Name())
// debug.PrintStack()
}
}
func (value *StructureValue) String() string {
return ""
}
// PeriodIndex returns the period index of the structured value
func (value *StructureValue) PeriodIndex() uint32 {
return value.peIndex
}
type evaluateFieldNameStructure struct {
names []string
namesMap map[string]bool
second uint32
needSecond SecondCall
}
func evaluateFieldNames(adaValue IAdaValue, x interface{}) (TraverseResult, error) {
efns := x.(*evaluateFieldNameStructure)
Central.Log.Debugf("Evaluate field %s", adaValue.Type().Name())
if adaValue.Type().IsStructure() {
if adaValue.Type().Type() == FieldTypeMultiplefield {
if efns.second == 0 && adaValue.Type().HasFlagSet(FlagOptionPE) && !adaValue.Type().PeriodicRange().IsSingleIndex() {
Central.Log.Debugf("Skip PE/multiple field %s in first call (%s)", adaValue.Type().Name(), adaValue.Type().PeriodicRange().FormatBuffer())
efns.needSecond = ReadSecond
return SkipTree, nil
} else if _, ok := efns.namesMap[adaValue.Type().Name()]; !ok {
Central.Log.Debugf("Add multiple field %s", adaValue.Type().Name())
efns.names = append(efns.names, adaValue.Type().Name())
efns.namesMap[adaValue.Type().Name()] = true
}
}
} else {
if !adaValue.Type().HasFlagSet(FlagOptionMUGhost) {
if _, ok := efns.namesMap[adaValue.Type().Name()]; !ok {
Central.Log.Debugf("Add field %s", adaValue.Type().Name())
efns.names = append(efns.names, adaValue.Type().Name())
efns.namesMap[adaValue.Type().Name()] = true
}
}
}
Central.Log.Debugf("EFNS need second call %d", efns.needSecond)
return Continue, nil
}
func countPEsize(adaType IAdaType, parentType IAdaType, level int, x interface{}) error {
size := x.(*uint32)
if adaType.Type() != FieldTypeMultiplefield && !adaType.HasFlagSet(FlagOptionMUGhost) {
*size = *size + adaType.Length()
Central.Log.Debugf("Add to PE size: %s -> %d", adaType.Name(), *size)
}
return nil
}
/*
Parse buffer if a period group contains multiple fields. In that case the buffer parser need to parse
field by field and not the group alltogether
*/
func (value *StructureValue) parseBufferWithMUPE(helper *BufferHelper, option *BufferOption) (res TraverseResult, err error) {
Central.Log.Debugf("Parse Buffer structure with (MUPE) name=%s offset=%d remaining=%d length=%d value length=%d", value.Type().Name(),
helper.offset, helper.Remaining(), len(helper.buffer), len(value.Elements))
adaType := value.Type().(*StructureType)
if value.Type().Type() != FieldTypePeriodGroup &&
!(value.Type().HasFlagSet(FlagOptionPE) && value.Type().Type() == FieldTypeMultiplefield) {
Central.Log.Debugf("Skip not group -> %s", value.Type().Name())
return
}
Central.Log.Debugf("%s/%s parse buffer for MU in PE/first call", value.Type().Name(), value.Type().ShortName())
var occNumber int
Central.Log.Debugf("Check descriptor read %v", option.DescriptorRead)
if option.DescriptorRead {
occNumber = 1
} else {
// In the second call the occurrence is available
if option.SecondCall > 0 && value.Type().Type() == FieldTypePeriodGroup {
occNumber = value.NrElements()
Central.Log.Debugf("Second call use available occurrence %d Type %s", occNumber, value.Type().Type().name())
} else {
occNumber, err = value.evaluateOccurrence(helper)
if err != nil {
return
}
Central.Log.Debugf("Call got occurrence %d available Type %s", occNumber, value.Type().Type().name())
}
}
Central.Log.Debugf("PE occurrence %s has %d entries pos=%d", value.Type().Name(), occNumber, helper.offset)
if occNumber > 0 {
lastNumber := uint32(occNumber)
if adaType.peRange.multiplier() != allEntries {
occNumber = adaType.peRange.multiplier()
}
Central.Log.Debugf("%s read %d entries", value.Type().Name(), occNumber)
if occNumber > 10000 {
Central.Log.Debugf("Too many occurrences")
return SkipTree, NewGenericError(181)
}
if len(value.Elements) != occNumber {
peIndex := value.peIndex
muIndex := uint32(0)
for i := uint32(0); i < uint32(occNumber); i++ {
if value.Type().Type() == FieldTypePeriodGroup {
peIndex = adaType.peRange.index(i+1, lastNumber)
} else {
muIndex = i + 1
}
Central.Log.Debugf("Work on %s PE=%d MU=%d last=%d PEv=%d PErange=%d MUrange=%d",
adaType.Name(), peIndex, muIndex, lastNumber, value.peIndex,
adaType.PeriodicRange().from, adaType.MultipleRange().from)
value.initMultipleSubValues(i+1, peIndex, muIndex, true)
}
if option.SecondCall > 0 &&
(value.Type().HasFlagSet(FlagOptionPE) && value.Type().Type() == FieldTypeMultiplefield) {
return value.parsePeriodMultiple(helper, option)
}
return value.parsePeriodGroup(helper, option, occNumber)
}
for _, e := range value.Elements {
for _, v := range e.Values {
v.parseBuffer(helper, option)
}
}
}
Central.Log.Debugf("No occurrence, check shift of PE empty part, sn=%s mainframe=%v need second=%v pos=%d", value.Type().Name(), option.Mainframe,
option.NeedSecondCall, helper.offset)
if option.Mainframe {
Central.Log.Debugf("Are on mainframe, shift PE empty part pos=%d/%X", helper.offset, helper.offset)
err = value.shiftEmptyMfBuffer(helper)
if err != nil {
return EndTraverser, err
}
Central.Log.Debugf("After shift PE empty part pos=%d/%X", helper.offset, helper.offset)
}
res = SkipTree
return
}
func (value *StructureValue) parsePeriodGroup(helper *BufferHelper, option *BufferOption, occNumber int) (res TraverseResult, err error) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Parse period group/structure %s offset=%d/%X need second=%v", value.Type().Name(),
helper.offset, helper.offset, option.NeedSecondCall)
}
/* Evaluate the fields which need to be parsed in the period group */
tm := TraverserValuesMethods{EnterFunction: evaluateFieldNames}
efns := &evaluateFieldNameStructure{namesMap: make(map[string]bool), second: option.SecondCall}
res, err = value.Traverse(tm, efns)
if Central.IsDebugLevel() {
Central.Log.Debugf("Got %d names got need second=%v was need second=%v", len(efns.names), efns.needSecond, option.NeedSecondCall)
}
if option.NeedSecondCall == NoneSecond {
option.NeedSecondCall = efns.needSecond
}
for _, n := range efns.names {
if Central.IsDebugLevel() {
Central.Log.Debugf("Parse start of name : %s offset=%d/%X need second=%v", n, helper.offset,
helper.offset, option.NeedSecondCall)
}
for i := 0; i < occNumber; i++ {
if Central.IsDebugLevel() {
Central.Log.Debugf("Get occurrence : %d -> %d", (i + 1), value.NrElements())
}
v := value.Get(n, i+1)
if v == nil {
return EndTraverser, NewGenericError(171)
}
//v.setPeriodIndex(uint32(i + 1))
if v.Type().IsStructure() {
st := v.Type().(*StructureType)
if st.Type() == FieldTypeMultiplefield && st.HasFlagSet(FlagOptionPE) && !st.PeriodicRange().IsSingleIndex() {
if Central.IsDebugLevel() {
Central.Log.Debugf("Skip %s PE=%d", st.Name(), v.PeriodIndex())
}
if option.NeedSecondCall = ReadSecond; option.StoreCall {
option.NeedSecondCall = StoreSecond
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Parse PG: need second call %d", option.NeedSecondCall)
}
} else {
nrMu := uint32(1)
if !st.MultipleRange().IsSingleIndex() {
nrMu, err = helper.ReceiveUInt32()
if err != nil {
return
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Got Nr of Multiple Fields = %d creating them ... for %d (%s/%s)",
nrMu, v.PeriodIndex(), st.PeriodicRange().FormatBuffer(), st.MultipleRange().FormatBuffer())
}
/* Initialize MU elements dependent on the counter result */
for muIndex := uint32(0); muIndex < nrMu; muIndex++ {
muStructureType := v.Type().(*StructureType)
Central.Log.Debugf("Create index MU %d", (muIndex + 1))
sv, typeErr := muStructureType.SubTypes[0].Value()
if typeErr != nil {
err = typeErr
return
}
muStructure := v.(*StructureValue)
sv.Type().AddFlag(FlagOptionSecondCall)
sv.setMultipleIndex(muIndex + 1)
//sv.setPeriodIndex(uint32(i + 1))
sv.setPeriodIndex(v.PeriodIndex())
muStructure.addValue(sv, v.PeriodIndex(), muIndex)
if st.PeriodicRange().IsSingleIndex() {
_, err = sv.parseBuffer(helper, option)
if err != nil {
return
}
} else {
if Central.IsDebugLevel() {
Central.Log.Debugf("MU index %d,%d -> %d", sv.PeriodIndex(), sv.MultipleIndex(), i)
Central.Log.Debugf("Due to Period and MU field, need second call call (PE/MU) for %s", value.Type().Name())
}
if option.NeedSecondCall = ReadSecond; option.StoreCall {
option.NeedSecondCall = StoreSecond
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Parse PG2: need second call %d", option.NeedSecondCall)
}
}
}
}
} else {
/* Parse field value for each non-structure field */
res, err = v.parseBuffer(helper, option)
if err != nil {
return
}
Central.Log.Debugf("Parsed to %s[%d,%d] index is %d", v.Type().Name(), v.PeriodIndex(), v.MultipleIndex(), i+1)
// if value.Type().Type() == FieldTypeMultiplefield {
// v.setMultipleIndex(uint32(i + 1))
// Central.Log.Debugf("MU index %d,%d -> %d", v.PeriodIndex(), v.MultipleIndex(), i)
// }
}
}
Central.Log.Debugf("Parse end of name : %s offset=%d/%X", n, helper.offset, helper.offset)
}
res = SkipStructure
return
}
func (value *StructureValue) parsePeriodMultiple(helper *BufferHelper, option *BufferOption) (res TraverseResult, err error) {
Central.Log.Debugf("Parse MU in PE added nodes")
for _, e := range value.Elements {
for _, v := range e.Values {
v.Type().AddFlag(FlagOptionSecondCall)
res, err = v.parseBuffer(helper, option)
if err != nil {
return
}
Central.Log.Debugf("Parsed Value %s -> len=%s type=%T", v.Type().Name(), len(v.Bytes()), v)
}
}
Central.Log.Debugf("End parsing MU in PE")
res = SkipTree
return
}
// Parse the structure
func (value *StructureValue) parseBuffer(helper *BufferHelper, option *BufferOption) (res TraverseResult, err error) {
if option.SecondCall > 0 {
if !(value.Type().HasFlagSet(FlagOptionPE) && value.Type().Type() == FieldTypeMultiplefield) {
Central.Log.Debugf("Skip parsing structure value %s offset=%X", value.Type().Name(), helper.offset)
return
}
}
Central.Log.Debugf("Parse structure buffer %s/%s secondCall=%v offset=%d/%X pe=%v mu=%v", value.Type().Name(), value.Type().ShortName(),
option.SecondCall, helper.offset, helper.offset, value.adatype.HasFlagSet(FlagOptionPE), value.adatype.HasFlagSet(FlagOptionAtomicFB))
if value.adatype.HasFlagSet(FlagOptionPE) && value.adatype.HasFlagSet(FlagOptionAtomicFB) {
return value.parseBufferWithMUPE(helper, option)
}
return value.parseBufferWithoutMUPE(helper, option)
}
// Evaluate the occurrence of the structure
func (value *StructureValue) evaluateOccurrence(helper *BufferHelper) (occNumber int, err error) {
subStructureType := value.adatype.(*StructureType)
switch {
case subStructureType.Type() == FieldTypePeriodGroup && subStructureType.peRange.IsSingleIndex():
Central.Log.Debugf("Single PE index occurence only 1")
return 1, nil
case subStructureType.Type() == FieldTypeMultiplefield && subStructureType.muRange.IsSingleIndex():
Central.Log.Debugf("Single MU index occurence only 1")
return 1, nil
case subStructureType.Type() == FieldTypePeriodGroup && subStructureType.HasFlagSet(FlagOptionSingleIndex):
if len(value.Elements) > 0 {
return len(value.Elements), nil
}
subStructureType.occ = 1
default:
if Central.IsDebugLevel() {
Central.Log.Debugf("Single index flag: %v (%s)", subStructureType.HasFlagSet(FlagOptionSingleIndex), subStructureType.Type().name())
Central.Log.Debugf("PE range: %s", subStructureType.peRange.FormatBuffer())
Central.Log.Debugf("MU range: %s", subStructureType.muRange.FormatBuffer())
}
}
// if subStructureType.HasFlagSet(FlagOptionSingleIndex) {
// Central.Log.Debugf("Single index occurence only 1")
// return 1, nil
// }
occNumber = math.MaxInt32
Central.Log.Debugf("Current structure occurrence %d", subStructureType.occ)
if subStructureType.occ > 0 {
occNumber = subStructureType.occ
} else {
switch subStructureType.occ {
case OccCapacity:
res, subErr := helper.ReceiveUInt32()
if subErr != nil {
err = subErr
return
}
occNumber = int(res)
case OccSingle:
occNumber = 1
case OccByte:
res, subErr := helper.ReceiveUInt8()
if subErr != nil {
err = subErr
return
}
occNumber = int(res)
case OccNone:
}
}
Central.Log.Debugf("Evaluate occurrence for %s of type %d to %d offset after=%d", value.Type().Name(),
subStructureType.occ, occNumber, helper.offset)
return
}
// Parse the buffer containing no PE and MU fields
func (value *StructureValue) parseBufferWithoutMUPE(helper *BufferHelper, option *BufferOption) (res TraverseResult, err error) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Parse Buffer structure without MUPE name=%s offset=%d remaining=%d length=%d value length=%d type=%d", value.Type().Name(),
helper.offset, helper.Remaining(), len(helper.buffer), len(value.Elements), value.Type().Type())
}
var occNumber int
Central.Log.Debugf("Check descriptor read %v", option.DescriptorRead)
if option.DescriptorRead {
occNumber = 1
} else {
if option.SecondCall > 0 /*&& value.Type().Type() == FieldTypePeriodGroup */ {
occNumber = value.NrElements()
Central.Log.Debugf("Second call use available occurrence %d", occNumber)
} else {
occNumber, err = value.evaluateOccurrence(helper)
if err != nil {
return
}
}
// TODO Remove because it it only a limit and assert statement
if occNumber > 4000 && !strings.HasPrefix(value.Type().Name(), "fdt") {
return SkipTree, NewGenericError(182, value.Type().Name(), occNumber)
}
}
Central.Log.Debugf("Occurrence %d period parent index=%d", occNumber, value.peIndex)
switch value.Type().Type() {
case FieldTypePeriodGroup:
Central.Log.Debugf("Init period group values occurrence=%d mainframe=%v", occNumber, option.Mainframe)
if occNumber == 0 {
if option.Mainframe {
err = value.shiftEmptyMfBuffer(helper)
if err != nil {
return EndTraverser, err
}
}
Central.Log.Debugf("Skip PE shifted to offset=%d/%X", helper.offset, helper.offset)
return
}
for i := uint32(0); i < uint32(occNumber); i++ {
value.initSubValues(i+1, i+1, true)
}
Central.Log.Debugf("Init period group sub values finished, elements=%d ", value.NrElements())
return
case FieldTypeMultiplefield:
if occNumber == 0 {
if option.Mainframe {
adaType := value.Type().(*StructureType)
helper.ReceiveBytes(adaType.SubTypes[0].Length())
}
Central.Log.Debugf("Skip MU shifted to offset=%d/%X", helper.offset, helper.offset)
return
}
Central.Log.Debugf("Init multiple field sub values")
lastNumber := uint32(occNumber)
adaType := value.Type().(*StructureType)
if adaType.muRange.multiplier() != allEntries {
occNumber = adaType.muRange.multiplier()
}
Central.Log.Debugf("Defined range for values: %s", adaType.muRange.FormatBuffer())
for i := uint32(0); i < uint32(occNumber); i++ {
muIndex := adaType.muRange.index(i+1, lastNumber)
Central.Log.Debugf("%d. Work on MU index = %d/%d", i, muIndex, lastNumber)
value.initMultipleSubValues(i, value.peIndex, muIndex, true)
}
Central.Log.Debugf("Init multiple fields sub values finished")
return
case FieldTypeStructure:
default:
Central.Log.Debugf("Unused type=%d", value.Type().Type())
return
}
Central.Log.Debugf("Start going through elements=%d", value.NrElements())
// Go through all occurrences and check remaining buffer size
index := 0
for index < occNumber && helper.Remaining() > 0 {
if Central.IsDebugLevel() {
Central.Log.Debugf("index=%d remaining Buffer structure remaining=%d pos=%d",
index, helper.Remaining(), helper.offset)
}
values, pErr := parseBufferTypes(helper, option, value, uint32(index))
if pErr != nil {
res = EndTraverser
err = pErr
Central.Log.Debugf("Parse buffer error in structure %s:%v", value.adatype.Name(), err)
return
}
if len(value.Elements) <= index {
element := newStructureElement()
value.Elements = append(value.Elements, element)
value.elementMap[uint32(index+1)] = element
}
if values != nil && value.adatype.Type() != FieldTypeGroup {
value.Elements[index].Values = values
}
index++
if Central.IsDebugLevel() {
Central.Log.Debugf("------------------ Ending Parse index of structure index=%d len elements=%d",
index, len(value.Elements))
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Sructure parse ready for %s index=%d occ=%d value length=%d pos=%d",
value.Type().Name(), index, occNumber, len(value.Elements), helper.offset)
}
return
}
func (value *StructureValue) shiftEmptyMfBuffer(helper *BufferHelper) (err error) {
if value.Type().Type() == FieldTypeMultiplefield {
st := value.Type().(*StructureType)
subType := st.SubTypes[0]
_, err = helper.ReceiveBytes(subType.Length())
return
}
size := uint32(0)
t := TraverserMethods{EnterFunction: countPEsize}
adaType := value.Type().(*StructureType)
adaType.Traverse(t, 1, &size)
if Central.IsDebugLevel() {
Central.Log.Debugf("Skip parsing %s/%s type=%s, shift PE empty part %d bytes remaining=%d",
value.Type().Name(), value.Type().ShortName(), value.Type().Type().name(), size, helper.Remaining())
}
_, err = helper.ReceiveBytes(size)
return
}
// Search for structure field entries by name
func (value *StructureValue) search(fieldName string) IAdaValue {
Central.Log.Debugf("Search field %s elements=%d", fieldName, len(value.Elements))
for _, val := range value.Elements {
for _, v := range val.Values {
Central.Log.Debugf("Searched in value %s", v.Type().Name())
if v.Type().Name() == fieldName {
return v
}
if v.Type().IsStructure() {
Central.Log.Debugf("Structure search")
subValue := v.(*StructureValue).search(fieldName)
if subValue != nil {
return subValue
}
} else {
Central.Log.Debugf("No structure search")
}
}
}
Central.Log.Debugf("Searched field %s not found", fieldName)
return nil
}
// Traverse Traverse through the definition tree calling a callback method for each node
func (value *StructureValue) Traverse(t TraverserValuesMethods, x interface{}) (ret TraverseResult, err error) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Traverse level %d structure: %s", value.Type().Level(), value.Type().Name())
Central.Log.Debugf("Nr sub elements=%d", value.NrElements())
}
if value.Elements != nil { // && len(value.Elements[0].Values) > 0 {
nr := len(value.Elements)
for e, val := range value.Elements {
if Central.IsDebugLevel() {
Central.Log.Debugf("%d: Nr sub values=%d", e, len(val.Values))
}
if t.ElementFunction != nil {
ret, err = t.ElementFunction(value, e, nr, x)
if err != nil || ret == EndTraverser {
return
}
}
for i, v := range val.Values {
if Central.IsDebugLevel() {
Central.Log.Debugf("Traverse node %d.element and %d.value at %s[%d,%d] (%s) for %s[%d,%d] (%s)", e, i, v.Type().Name(),
v.PeriodIndex(), v.MultipleIndex(), v.Type().Type().name(), value.Type().Name(), value.PeriodIndex(),
value.MultipleIndex(), value.Type().Type().name())
if value.PeriodIndex() != v.PeriodIndex() {
if value.Type().Type() != FieldTypePeriodGroup {
Central.Log.Debugf("!!!!----> Error index parent not correct for %s of %s", v.Type().Name(), value.Type().Name())
}
}
}
if t.EnterFunction != nil {
ret, err = t.EnterFunction(v, x)
if err != nil || ret == EndTraverser {
return
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("%s-%s: Got structure return directive : %d", value.Type().Name(), v.Type().Name(),
ret)
LogMultiLineString(true, FormatByteBuffer("DATA: ", v.Bytes()))
}
if ret == SkipStructure {
if Central.IsDebugLevel() {
Central.Log.Debugf("Skip structure tree ... ")
}
return Continue, nil
}
if v.Type().IsStructure() && ret != SkipTree {
if Central.IsDebugLevel() {
Central.Log.Debugf("Traverse tree %s", v.Type().Name())
}
ret, err = v.(*StructureValue).Traverse(t, x)
if err != nil || ret == EndTraverser {
return
}
}
if t.LeaveFunction != nil {
ret, err = t.LeaveFunction(v, x)
if err != nil || ret == EndTraverser {
return
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Traverse index=%d/%d pfield=%s-field=%s", i, nr, value.Type().Name(), v.Type().Name())
}
}
}
}
return Continue, nil
}
// Get get the value of an named tree node with an specific index
func (value *StructureValue) Get(fieldName string, index int) IAdaValue {
if Central.IsDebugLevel() {
Central.Log.Debugf("Get field %s index %d -> %d", fieldName, index, len(value.Elements))
}
if len(value.Elements) < index {
Central.Log.Debugf("Not got index")
return nil
}
structElement := value.Elements[index-1]
if vr, ok := structElement.valueMap[fieldName]; ok {
Central.Log.Debugf("Got value map entry %#v", structElement.valueMap)
return vr
}
Central.Log.Debugf("Nr values %d", len(structElement.Values))
for _, vr := range structElement.Values {
Central.Log.Debugf("Check %s -> %s", vr.Type().Name(), fieldName)
if vr.Type().Name() == fieldName {
Central.Log.Debugf("Found index %d to %s[%d,%d]", index, vr.Type().Name(), vr.PeriodIndex(), vr.MultipleIndex())
return vr
}
if vr.Type().IsStructure() {
svr := vr.(*StructureValue).Get(fieldName, index)
if svr != nil {
return svr
}
}
}
Central.Log.Debugf("No %s entry found with index=%d", fieldName, index)
return nil
}
// NrElements number of structure values
func (value *StructureValue) NrElements() int {
if value.Elements == nil {
return 0
}
return len(value.Elements)
}
// NrValues number of structure values
func (value *StructureValue) NrValues(index uint32) int {
if value.Elements == nil {
return -1
}
return len(value.Elements[index-1].Values)
}
// Value return the values of an structure value
func (value *StructureValue) Value() interface{} {
return value.Elements
}
// Bytes byte array representation of the value
func (value *StructureValue) Bytes() []byte {
var empty []byte
return empty
}
// SetStringValue set the string value of the value
func (value *StructureValue) SetStringValue(stValue string) {
Central.Log.Fatal("Structure set string, not implement yet")
}
// SetValue set value for structure
func (value *StructureValue) SetValue(v interface{}) error {
switch reflect.TypeOf(v).Kind() {
case reflect.Slice:
switch value.Type().Type() {
case FieldTypeMultiplefield:
vi := reflect.ValueOf(v)
for i := 0; i < vi.Len(); i++ {
muStructureType := value.Type().(*StructureType)
sv, typeErr := muStructureType.SubTypes[0].Value()
if typeErr != nil {
return typeErr
}
sv.setMultipleIndex(uint32(i + 1))
sv.setPeriodIndex(value.PeriodIndex())
sv.SetValue(vi.Index(i).Interface())
value.addValue(sv, value.PeriodIndex(), uint32(i+1))
}
case FieldTypePeriodGroup:
if Central.IsDebugLevel() {
Central.Log.Debugf("Check preiod group slice possible")
}
vi := reflect.ValueOf(v)
ti := reflect.TypeOf(v)
if ti.Kind() == reflect.Ptr {
ti = ti.Elem()
}
jsonV, _ := json.Marshal(v)
if Central.IsDebugLevel() {
Central.Log.Debugf("Work on group entry %s -> %s", ti.Name(), string(jsonV))
}
for i := 0; i < vi.Len(); i++ {
value.initMultipleSubValues(uint32(i+1), uint32(i+1), 0, false)
if Central.IsDebugLevel() {
Central.Log.Debugf("%d. Element len is %d", i, len(value.Elements))
}
iv := vi.Index(i)
if iv.Kind() == reflect.Ptr {
iv = iv.Elem()
}
ti = reflect.TypeOf(iv.Interface())
for j, x := range value.Elements[i].Values {
if Central.IsDebugLevel() {
Central.Log.Debugf("Try setting element %d/%d -> %s", i, j, x.Type().Name())
}
s := iv.FieldByName(x.Type().Name())
if s.IsValid() {
err := x.SetValue(s.Interface())
if err != nil {
Central.Log.Debugf("Error seting value for %s", x.Type().Name())
return err
}
} else {
if Central.IsDebugLevel() {
Central.Log.Debugf("Try search tag of number of fields %d", ti.NumField())
}
sn := extractAdabasTagShortName(ti, x.Type().Name())
s := iv.FieldByName(sn)
if s.IsValid() {
err := x.SetValue(s.Interface())
if err != nil {
Central.Log.Debugf("Error setting value for %s", x.Type().Name())
return err
}
// return nil
} else {
if Central.IsDebugLevel() {
Central.Log.Errorf("Invalid or missing field for %s", x.Type().Name())
}
}
}
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("PE entries %d", value.NrElements())
}
default:
}
case reflect.Ptr, reflect.Struct:
if value.Type().Type() != FieldTypeMultiplefield && value.Type().Type() != FieldTypePeriodGroup {
Central.Log.Debugf("Check struct possible")
}
default:
if Central.IsDebugLevel() {
Central.Log.Debugf("Structure set interface, not implement yet %s -> %v", value.Type().Name(), v)
}
}
return nil
}
func extractAdabasTagShortName(ti reflect.Type, searchName string) string {
for fi := 0; fi < ti.NumField(); fi++ {
s := ti.FieldByIndex([]int{fi})
if Central.IsDebugLevel() {
Central.Log.Debugf("%d Tag = %s -> %s", fi, s.Tag, s.Name)
}
if x, ok := s.Tag.Lookup("adabas"); ok {
if Central.IsDebugLevel() {
Central.Log.Debugf("Adabas tag: %s", x)
}
p := strings.Split(x, ":")
if len(p) > 2 && p[2] == searchName {
return s.Name
}
}
}
return ""
}
func (value *StructureValue) formatBufferSecondCall(buffer *bytes.Buffer, option *BufferOption) uint32 {
structureType := value.Type().(*StructureType)
if structureType.Type() == FieldTypeMultiplefield && structureType.HasFlagSet(FlagOptionPE) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Generate FB for second call [%d,%d]", value.peIndex, value.muIndex)
}
if buffer.Len() > 0 {
buffer.WriteString(",")
}
x := value.peIndex
r := structureType.muRange.FormatBuffer()
buffer.WriteString(value.Type().ShortName())
buffer.WriteString(strconv.FormatInt(int64(x), 10))
buffer.WriteString("C,4,B," + value.Type().ShortName())
buffer.WriteString(strconv.FormatInt(int64(x), 10))
buffer.WriteString("(" + r + "),")
buffer.WriteString(strconv.FormatInt(int64(structureType.SubTypes[0].Length()), 10))
// buffer.WriteString(fmt.Sprintf("%s%dC,4,B,%s%d(%s),%d",
// value.Type().ShortName(), x, value.Type().ShortName(), x, r, structureType.SubTypes[0].Length()))
if Central.IsDebugLevel() {
Central.Log.Debugf("FB of second call %s", buffer.String())
}
return 4 + structureType.SubTypes[0].Length()
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Skip because second call")
}
return 0
}
// FormatBuffer provide the format buffer of this structure
func (value *StructureValue) FormatBuffer(buffer *bytes.Buffer, option *BufferOption) uint32 {
if Central.IsDebugLevel() {
Central.Log.Debugf("Write FormatBuffer for structure of %s store=%v", value.Type().Name(), option.StoreCall)
}
if option.SecondCall > 0 {
return value.formatBufferSecondCall(buffer, option)
}
if value.Type().Type() == FieldTypeMultiplefield && value.Type().HasFlagSet(FlagOptionSingleIndex) {
Central.Log.Debugf("Single index FB?")
return 0
}
structureType := value.Type().(*StructureType)
recordBufferLength := uint32(0)
if structureType.NrFields() > 0 {
if Central.IsDebugLevel() {
Central.Log.Debugf("Structure FormatBuffer %s type=%d nrFields=%d", value.Type().Name(), value.Type().Type(), structureType.NrFields())
}
switch value.Type().Type() {
case FieldTypeMultiplefield:
// if structureType.HasFlagSet(FlagOptionSingleIndex) {
// fmt.Println("FB:", structureType.peRange.FormatBuffer())
// }
if !option.StoreCall {
if buffer.Len() > 0 {
buffer.WriteString(",")
}
p := "1-N"
//r := structureType.Range.FormatBuffer()
if value.Type().HasFlagSet(FlagOptionPE) {
buffer.WriteString(value.Type().ShortName() + p + "(C),4")
} else {
buffer.WriteString(value.Type().ShortName() + "C,4,B,")
muType := structureType.SubTypes[0]
buffer.WriteString(value.Type().ShortName())
buffer.WriteString(p + ",")
buffer.WriteString(strconv.FormatInt(int64(muType.Length()), 10))
buffer.WriteString("," + muType.Type().FormatCharacter())
// buffer.WriteString(fmt.Sprintf("%s%s,%d,%s",
// value.Type().ShortName(), p, muType.Length(), muType.Type().FormatCharacter()))
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Current MU field %s, search in %d nodes", value.Type().Name(), len(value.Elements))
}
recordBufferLength += option.multipleSize
}
case FieldTypePeriodGroup:
if option.StoreCall {
} else {
if buffer.Len() > 0 {
buffer.WriteString(",")
}
if !value.Type().HasFlagSet(FlagOptionSingleIndex) {
buffer.WriteString(value.Type().ShortName() + "C,4,B")
}
if Central.IsDebugLevel() {
Central.Log.Debugf("%s Flag option %d %v %d", structureType.Name(), structureType.flags, structureType.HasFlagSet(FlagOptionPart), FlagOptionPart)
}
if !value.Type().HasFlagSet(FlagOptionAtomicFB) && !value.Type().HasFlagSet(FlagOptionPart) {
r := structureType.peRange.FormatBuffer()
if Central.IsDebugLevel() {
Central.Log.Debugf("Add generic format buffer field with range %s", r)
}
buffer.WriteString("," + value.Type().ShortName() + r)
}
recordBufferLength += option.multipleSize
}
default:
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Final structure RB FormatBuffer for %s: %s", value.Type().Name(), buffer.String())
}
return recordBufferLength
}
// StoreBuffer generate store buffer
func (value *StructureValue) StoreBuffer(helper *BufferHelper, option *BufferOption) error {
if Central.IsDebugLevel() {
Central.Log.Debugf("Skip store structured record buffer for %s at %d", value.Type().Name(), len(helper.buffer))
}
return nil
}
// addValue Add sub value with given index
func (value *StructureValue) addValue(subValue IAdaValue, peindex uint32, muindex uint32) error {
if Central.IsDebugLevel() {
Central.Log.Debugf("Add value index PE=%d MU=%d to list for %s[%d,%d], appending %s[%d,%d] %p", peindex, muindex, value.Type().Name(), value.PeriodIndex(), value.MultipleIndex(),
subValue.Type().Name(), subValue.PeriodIndex(), subValue.MultipleIndex(), value)
}
if value.Type().Type() == FieldTypeMultiplefield && muindex == 0 {
Central.Log.Debugf("Skip MU index")
// debug.PrintStack()
return nil
}
//Central.Log.Debugf("Stack trace:\n%s", string(debug.Stack()))
subValue.SetParent(value)
var element *structureElement
var ok bool
lenElements := 0
if value.Elements != nil {
if Central.IsDebugLevel() {
Central.Log.Debugf("Before Elements in list %d", len(value.Elements))
}
lenElements = len(value.Elements)
}
curIndex := peindex
if value.Type().Type() == FieldTypeMultiplefield {
curIndex = muindex
}
if Central.IsDebugLevel() {
Central.Log.Debugf("curIndex=%d PE index=%d MU index=%d ghost=%v", curIndex, peindex, muindex, subValue.Type().HasFlagSet(FlagOptionMUGhost))
Central.Log.Debugf("Current add check current index = %d lenElements=%d", curIndex, lenElements)
}
if element, ok = value.elementMap[curIndex]; !ok {
element = newStructureElement()
value.Elements = append(value.Elements, element)
value.elementMap[curIndex] = element
if Central.IsDebugLevel() {
Central.Log.Debugf("Create new Elements on index %d", curIndex)
}
} else {
if Central.IsDebugLevel() {
Central.Log.Debugf("Elements already part of map %d", curIndex)
}
}
if subValue.PeriodIndex() == 0 {
subValue.setPeriodIndex(peindex)
}
if value.Type().Type() == FieldTypeMultiplefield && subValue.MultipleIndex() == 0 {
subValue.setMultipleIndex(muindex)
}
Central.Log.Debugf("Current period index for %s[%d:%d]", subValue.Type().Name(), subValue.PeriodIndex(), subValue.MultipleIndex())
s := convertMapIndex(subValue)
if Central.IsDebugLevel() {
Central.Log.Debugf("Search for %s", s)
}
var v IAdaValue
if v, ok = element.valueMap[s]; ok {
if Central.IsDebugLevel() {
Central.Log.Debugf("Found sub value found %s[%d:%d] %T",
v.Type().Name(), v.PeriodIndex(), v.MultipleIndex(), v)
}
} else {
// Check elements list already available
if value.Elements == nil {
if Central.IsDebugLevel() {
Central.Log.Debugf("Create new list for %s and append", value.Type().Name())
}
// If MU field and index not already initialized, define index
if value.Type().Type() == FieldTypeMultiplefield {
/*if subValue.MultipleIndex() == 0 {
subValue.setMultipleIndex(1)
} else {*/
if value.MultipleIndex() != 0 {
subValue.setMultipleIndex(value.MultipleIndex())
}
//}
}
var values []IAdaValue
values = append(values, subValue)
element.Values = values
} else {
if Central.IsDebugLevel() {
Central.Log.Debugf("Append list to %s len=%d", value.Type().Name(), len(element.Values))
}
// If MU field and index not already initialized, define index
if value.Type().Type() == FieldTypeMultiplefield && subValue.MultipleIndex() == 0 {
subValue.setMultipleIndex(uint32(lenElements + 1))
// subValue.setMultipleIndex(uint32(len(element.Values) + 1))
}
element.Values = append(element.Values, subValue)
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Add sub value new %s[%d:%d] %T previous %s mapIndex %s",
subValue.Type().Name(), subValue.PeriodIndex(), subValue.MultipleIndex(), subValue, s, convertMapIndex(subValue))
}
element.valueMap[convertMapIndex(subValue)] = subValue
//element.valueMap[fmt.Sprintf("%s-%d-%d", subValue.Type().Name(), subValue.PeriodIndex(), subValue.MultipleIndex())] = subValue
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Final list for %s[%d,%d] = %d elements for %s[%d,%d]", value.Type().Name(), value.PeriodIndex(),
value.MultipleIndex(), len(value.Elements), subValue.Type().Name(), subValue.PeriodIndex(), subValue.MultipleIndex())
}
return nil
}
func convertMapIndex(subValue IAdaValue) string {
buf := make([]byte, 0, 30)
buf = append(buf, subValue.Type().Name()...)
buf = append(buf, '-')
buf = strconv.AppendUint(buf, uint64(subValue.PeriodIndex()), 10)
buf = append(buf, '-')
buf = strconv.AppendUint(buf, uint64(subValue.MultipleIndex()), 10)
return string(buf)
}
// Int8 not used
func (value *StructureValue) Int8() (int8, error) {
return 0, NewGenericError(105, value.Type().Name(), "signed 8-bit integer")
}
// UInt8 not used
func (value *StructureValue) UInt8() (uint8, error) {
return 0, NewGenericError(105, value.Type().Name(), "unsigned 8-bit integer")
}
// Int16 not used
func (value *StructureValue) Int16() (int16, error) {
return 0, NewGenericError(105, value.Type().Name(), "signed 16-bit integer")
}
// UInt16 not used
func (value *StructureValue) UInt16() (uint16, error) {
return 0, NewGenericError(105, value.Type().Name(), "unsigned 16-bit integer")
}
// Int32 not used
func (value *StructureValue) Int32() (int32, error) {
return 0, NewGenericError(105, value.Type().Name(), "signed 32-bit integer")
}
// UInt32 not used
func (value *StructureValue) UInt32() (uint32, error) {
return 0, NewGenericError(105, value.Type().Name(), "unsigned 32-bit integer")
}
// Int64 not used
func (value *StructureValue) Int64() (int64, error) {
return 0, NewGenericError(105, value.Type().Name(), "signed 64-bit integer")
}
// UInt64 not used
func (value *StructureValue) UInt64() (uint64, error) {
return 0, NewGenericError(105, value.Type().Name(), "unsigned 64-bit integer")
}
// Float not used
func (value *StructureValue) Float() (float64, error) {
return 0, NewGenericError(105, value.Type().Name(), "64-bit float")
}
func (value *StructureValue) setPeriodIndex(index uint32) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Set %s structure period index = %d -> %d", value.Type().Name(), value.PeriodIndex(), index)
}
value.peIndex = index
for _, val := range value.Elements {
for _, v := range val.Values {
if Central.IsDebugLevel() {
Central.Log.Debugf("Set %s period index in structure %d -> %d", v.Type().Name(), v.PeriodIndex(), index)
}
v.setPeriodIndex(1)
}
}
}
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package manager
import (
"fmt"
"sync"
"time"
"k8s.io/api/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
type listObjectFunc func(string, metav1.ListOptions) (runtime.Object, error)
type watchObjectFunc func(string, metav1.ListOptions) (watch.Interface, error)
type newObjectFunc func() runtime.Object
type isImmutableFunc func(runtime.Object) bool
// objectCacheItem is a single item stored in objectCache.
type objectCacheItem struct {
refCount int
store cache.Store
hasSynced func() (bool, error)
// lock is protecting from closing stopCh multiple times.
lock sync.Mutex
stopCh chan struct{}
}
func (i *objectCacheItem) stop() bool {
i.lock.Lock()
defer i.lock.Unlock()
select {
case <-i.stopCh:
// This means that channel is already closed.
return false
default:
close(i.stopCh)
return true
}
}
// objectCache is a local cache of objects propagated via
// individual watches.
type objectCache struct {
listObject listObjectFunc
watchObject watchObjectFunc
newObject newObjectFunc
isImmutable isImmutableFunc
groupResource schema.GroupResource
lock sync.RWMutex
items map[objectKey]*objectCacheItem
}
// NewObjectCache returns a new watch-based instance of Store interface.
func NewObjectCache(
listObject listObjectFunc,
watchObject watchObjectFunc,
newObject newObjectFunc,
isImmutable isImmutableFunc,
groupResource schema.GroupResource) Store {
return &objectCache{
listObject: listObject,
watchObject: watchObject,
newObject: newObject,
isImmutable: isImmutable,
groupResource: groupResource,
items: make(map[objectKey]*objectCacheItem),
}
}
func (c *objectCache) newStore() cache.Store {
// TODO: We may consider created a dedicated store keeping just a single
// item, instead of using a generic store implementation for this purpose.
// However, simple benchmarks show that memory overhead in that case is
// decrease from ~600B to ~300B per object. So we are not optimizing it
// until we will see a good reason for that.
return cache.NewStore(cache.MetaNamespaceKeyFunc)
}
func (c *objectCache) newReflector(namespace, name string) *objectCacheItem {
fieldSelector := fields.Set{"metadata.name": name}.AsSelector().String()
listFunc := func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fieldSelector
return c.listObject(namespace, options)
}
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fieldSelector
return c.watchObject(namespace, options)
}
store := c.newStore()
reflector := cache.NewNamedReflector(
fmt.Sprintf("object-%q/%q", namespace, name),
&cache.ListWatch{ListFunc: listFunc, WatchFunc: watchFunc},
c.newObject(),
store,
0,
)
stopCh := make(chan struct{})
go reflector.Run(stopCh)
return &objectCacheItem{
refCount: 0,
store: store,
hasSynced: func() (bool, error) { return reflector.LastSyncResourceVersion() != "", nil },
stopCh: stopCh,
}
}
func (c *objectCache) AddReference(namespace, name string) {
key := objectKey{namespace: namespace, name: name}
// AddReference is called from RegisterPod thus it needs to be efficient.
// Thus, it is only increasing refCount and in case of first registration
// of a given object it starts corresponding reflector.
// It's responsibility of the first Get operation to wait until the
// reflector propagated the store.
c.lock.Lock()
defer c.lock.Unlock()
item, exists := c.items[key]
if !exists {
item = c.newReflector(namespace, name)
c.items[key] = item
}
item.refCount++
}
func (c *objectCache) DeleteReference(namespace, name string) {
key := objectKey{namespace: namespace, name: name}
c.lock.Lock()
defer c.lock.Unlock()
if item, ok := c.items[key]; ok {
item.refCount--
if item.refCount == 0 {
// Stop the underlying reflector.
item.stop()
delete(c.items, key)
}
}
}
// key returns key of an object with a given name and namespace.
// This has to be in-sync with cache.MetaNamespaceKeyFunc.
func (c *objectCache) key(namespace, name string) string {
if len(namespace) > 0 {
return namespace + "/" + name
}
return name
}
func (c *objectCache) Get(namespace, name string) (runtime.Object, error) {
key := objectKey{namespace: namespace, name: name}
c.lock.RLock()
item, exists := c.items[key]
c.lock.RUnlock()
if !exists {
return nil, fmt.Errorf("object %q/%q not registered", namespace, name)
}
if err := wait.PollImmediate(10*time.Millisecond, time.Second, item.hasSynced); err != nil {
return nil, fmt.Errorf("failed to sync %s cache: %v", c.groupResource.String(), err)
}
obj, exists, err := item.store.GetByKey(c.key(namespace, name))
if err != nil {
return nil, err
}
if !exists {
return nil, apierrors.NewNotFound(c.groupResource, name)
}
if object, ok := obj.(runtime.Object); ok {
// If the returned object is immutable, stop the reflector.
//
// NOTE: we may potentially not even start the reflector if the object is
// already immutable. However, given that:
// - we want to also handle the case when object is marked as immutable later
// - Secrets and ConfigMaps are periodically fetched by volumemanager anyway
// - doing that wouldn't provide visible scalability/performance gain - we
// already have it from here
// - doing that would require significant refactoring to reflector
// we limit ourselves to just quickly stop the reflector here.
if utilfeature.DefaultFeatureGate.Enabled(features.ImmutableEphemeralVolumes) && c.isImmutable(object) {
if item.stop() {
klog.V(4).Infof("Stopped watching for changes of %q/%q - object is immutable", namespace, name)
}
}
return object, nil
}
return nil, fmt.Errorf("unexpected object type: %v", obj)
}
// NewWatchBasedManager creates a manager that keeps a cache of all objects
// necessary for registered pods.
// It implements the following logic:
// - whenever a pod is created or updated, we start individual watches for all
// referenced objects that aren't referenced from other registered pods
// - every GetObject() returns a value from local cache propagated via watches
func NewWatchBasedManager(
listObject listObjectFunc,
watchObject watchObjectFunc,
newObject newObjectFunc,
isImmutable isImmutableFunc,
groupResource schema.GroupResource,
getReferencedObjects func(*v1.Pod) sets.String) Manager {
objectStore := NewObjectCache(listObject, watchObject, newObject, isImmutable, groupResource)
return NewCacheBasedManager(objectStore, getReferencedObjects)
}
|
package problems
func validMountainArray(A []int) bool {
if len(A) < 3 {
return false
}
var hasPeak bool
for i := 1; i < len(A); i++ {
if A[i] > A[i-1] {
if hasPeak {
return false
}
} else if A[i] < A[i-1] {
if i-1 == 0 {
return false
}
if !hasPeak {
hasPeak = true
}
} else {
return false
}
}
return hasPeak
}
// one pass
func validMountainArray1(A []int) bool {
var aLen = len(A)
var i int
// walk up
for i+1 < aLen && A[i] < A[i+1] {
i++
}
if i == 0 || i == aLen-1 {
return false
}
// walk down
for i+1 < aLen && A[i] > A[i+1] {
i++
}
return i == aLen-1
}
|
package main
import (
"C"
"fmt"
"unsafe"
"github.com/axgle/mahonia"
)
//export MakeJWT
func MakeJWT(buffer *C.char)(*C.char) {
dec := mahonia.NewDecoder("gbk")
fmt.Println( dec.ConvertString( C.GoString(buffer) ) )
enc := mahonia.NewEncoder("gbk")
return C.CString( enc.ConvertString("MakeJWT 返回 ..."))
}
func main() {
}
|
package metadata
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/root-gg/plik/server/common"
)
func TestCreateSetting(t *testing.T) {
b := newTestMetadataBackend()
defer shutdownTestMetadataBackend(b)
err := b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.NoError(t, err, "create setting error")
err = b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.Error(t, err, "create setting error expected")
}
func TestGetSetting(t *testing.T) {
b := newTestMetadataBackend()
defer shutdownTestMetadataBackend(b)
setting, err := b.GetSetting("foo")
require.NoError(t, err, "get setting error")
require.Nil(t, setting, "non nil setting")
err = b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.NoError(t, err, "create setting error")
setting, err = b.GetSetting("foo")
require.NoError(t, err, "get setting error")
require.NotNil(t, setting, "nil setting")
require.Equal(t, "foo", setting.Key, "invalid setting key")
require.Equal(t, "bar", setting.Value, "invalid setting value")
}
func TestUpdateSetting(t *testing.T) {
b := newTestMetadataBackend()
defer shutdownTestMetadataBackend(b)
err := b.UpdateSetting("foo", "bar", "baz")
require.Error(t, err, "update setting error expected")
err = b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.NoError(t, err, "create setting error")
err = b.UpdateSetting("foo", "bar", "baz")
require.NoError(t, err, "update setting error")
err = b.UpdateSetting("foo", "bar", "baz")
require.Error(t, err, "update setting error expected")
setting, err := b.GetSetting("foo")
require.NoError(t, err, "get setting error")
require.NotNil(t, setting, "nil setting")
require.Equal(t, "foo", setting.Key, "invalid setting key")
require.Equal(t, "baz", setting.Value, "invalid setting value")
}
func TestDeleteSetting(t *testing.T) {
b := newTestMetadataBackend()
defer shutdownTestMetadataBackend(b)
err := b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.NoError(t, err, "create setting error")
setting, err := b.GetSetting("foo")
require.NoError(t, err, "get setting error : %s", err)
require.NotNil(t, setting, "nil setting")
err = b.DeleteSetting("foo")
require.NoError(t, err, "delete setting error : %s", err)
setting, err = b.GetSetting("foo")
require.NoError(t, err, "get setting error : %s", err)
require.Nil(t, setting, "non nil setting")
}
func TestBackend_ForEachSetting(t *testing.T) {
b := newTestMetadataBackend()
defer shutdownTestMetadataBackend(b)
err := b.CreateSetting(&common.Setting{Key: "foo", Value: "bar"})
require.NoError(t, err, "create setting error")
count := 0
f := func(setting *common.Setting) error {
count++
require.Equal(t, "foo", setting.Key, "invalid setting key")
require.Equal(t, "bar", setting.Value, "invalid setting value")
return nil
}
err = b.ForEachSetting(f)
require.NoError(t, err, "for each setting error : %s", err)
require.Equal(t, 1, count, "invalid setting count")
f = func(setting *common.Setting) error {
return fmt.Errorf("expected")
}
err = b.ForEachSetting(f)
require.Errorf(t, err, "expected")
}
|
package gcp
import (
"fmt"
"sort"
"strconv"
"strings"
machineapi "github.com/openshift/api/machine/v1beta1"
"github.com/openshift/installer/pkg/quota"
"github.com/openshift/installer/pkg/types"
)
// Constraints returns a list of quota constraints based on the InstallConfig.
// These constraints can be used to check if there is enough quota for creating a cluster
// for the isntall config.
func Constraints(client *Client, config *types.InstallConfig, controlPlanes []machineapi.Machine, computes []machineapi.MachineSet) []quota.Constraint {
ctrplConfigs := make([]*machineapi.GCPMachineProviderSpec, len(controlPlanes))
for i, m := range controlPlanes {
ctrplConfigs[i] = m.Spec.ProviderSpec.Value.Object.(*machineapi.GCPMachineProviderSpec)
}
computeReplicas := make([]int64, len(computes))
computeConfigs := make([]*machineapi.GCPMachineProviderSpec, len(computes))
for i, w := range computes {
computeReplicas[i] = int64(*w.Spec.Replicas)
computeConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*machineapi.GCPMachineProviderSpec)
}
var ret []quota.Constraint
for _, gen := range []constraintGenerator{
network(config),
apiExternal(config),
apiInternal(config),
controlPlane(client, config, ctrplConfigs),
compute(client, config, computeReplicas, computeConfigs),
others,
} {
ret = append(ret, gen()...)
}
return aggregate(ret)
}
func aggregate(quotas []quota.Constraint) []quota.Constraint {
sort.SliceStable(quotas, func(i, j int) bool {
return quotas[i].Name < quotas[j].Name
})
i := 0
for j := 1; j < len(quotas); j++ {
if quotas[i].Name == quotas[j].Name && quotas[i].Region == quotas[j].Region {
quotas[i].Count += quotas[j].Count
} else {
i++
if i != j {
quotas[i] = quotas[j]
}
}
}
return quotas[:i+1]
}
// constraintGenerator generates a list of constraints.
type constraintGenerator func() []quota.Constraint
func network(config *types.InstallConfig) func() []quota.Constraint {
return func() []quota.Constraint {
net := []quota.Constraint{{
Name: "compute.googleapis.com/networks",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/subnetworks",
Region: "global",
Count: 2,
}, {
Name: "compute.googleapis.com/routers",
Region: "global",
Count: 1,
}}
firewalls := []quota.Constraint{{
Name: "compute.googleapis.com/firewalls",
Region: "global",
Count: 6,
}}
if len(config.Platform.GCP.Network) > 0 {
return firewalls
}
return append(net, firewalls...)
}
}
func apiExternal(config *types.InstallConfig) func() []quota.Constraint {
return func() []quota.Constraint {
if config.Publish == types.InternalPublishingStrategy {
return nil
}
return []quota.Constraint{{
Name: "compute.googleapis.com/health_checks",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/forwarding_rules",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/target_pools",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/regional_static_addresses",
Region: config.Platform.GCP.Region,
Count: 1,
}}
}
}
func apiInternal(config *types.InstallConfig) func() []quota.Constraint {
return func() []quota.Constraint {
return []quota.Constraint{{
Name: "compute.googleapis.com/health_checks",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/forwarding_rules",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/backend_services",
Region: "global",
Count: 1,
}, {
Name: "compute.googleapis.com/regional_static_addresses",
Region: config.Platform.GCP.Region,
Count: 1,
}}
}
}
func controlPlane(client MachineTypeGetter, config *types.InstallConfig, machines []*machineapi.GCPMachineProviderSpec) func() []quota.Constraint {
return func() []quota.Constraint {
var ret []quota.Constraint
for _, m := range machines {
q := machineTypeToQuota(client, m.Zone, m.MachineType)
q.Region = config.Platform.GCP.Region
ret = append(ret, q)
}
ret = append(ret, quota.Constraint{
Name: "iam.googleapis.com/quota/service-account-count",
Region: "global",
Count: 1,
})
return ret
}
}
func compute(client MachineTypeGetter, config *types.InstallConfig, replicas []int64, machines []*machineapi.GCPMachineProviderSpec) func() []quota.Constraint {
return func() []quota.Constraint {
var ret []quota.Constraint
for idx, m := range machines {
q := machineTypeToQuota(client, m.Zone, m.MachineType)
q.Count = q.Count * replicas[idx]
q.Region = config.Platform.GCP.Region
ret = append(ret, q)
}
ret = append(ret, quota.Constraint{
Name: "iam.googleapis.com/quota/service-account-count",
Region: "global",
Count: 1,
})
return ret
}
}
func others() []quota.Constraint {
return []quota.Constraint{{
Name: "compute.googleapis.com/images",
Region: "global",
Count: 1,
}, {
Name: "iam.googleapis.com/quota/service-account-count",
Region: "global",
Count: 3,
}}
}
func machineTypeToQuota(client MachineTypeGetter, zone string, machineType string) quota.Constraint {
var name string
class := strings.SplitN(machineType, "-", 2)[0]
switch class {
case "c2", "m1", "m2", "n2", "n2d":
name = fmt.Sprintf("compute.googleapis.com/%s_cpus", class)
default:
name = "compute.googleapis.com/cpus"
}
info, err := client.GetMachineType(zone, machineType)
if err != nil {
return quota.Constraint{Name: name, Count: guessMachineCPUCount(machineType)}
}
return quota.Constraint{Name: name, Count: info.GuestCpus}
}
// the guess is based on https://cloud.google.com/compute/docs/machine-types
func guessMachineCPUCount(machineType string) int64 {
split := strings.Split(machineType, "-")
switch len(split) {
case 4:
if c, err := strconv.ParseInt(split[2], 10, 0); err == nil {
return c
}
case 3:
switch split[0] {
case "c2", "m1", "m2", "n1", "n2", "n2d", "e2":
if c, err := strconv.ParseInt(split[2], 10, 0); err == nil {
return c
}
}
case 2:
switch split[0] {
case "e2":
return 2
case "f1", "g1":
return 1
}
}
return 0
}
|
package mysqldb
import (
"context"
"time"
)
// PushNotification 通知
type PushNotification struct {
PnID int32 `gorm:"pn_id"`
PnDisplayTime string `gorm:"pn_display_time"`
PnTitle string `gorm:"pn_title"`
PnImageURL string `gorm:"pn_image_url"`
PnContentURL string `gorm:"pn_content_url"`
PnType int32 `gorm:"pn_type"`
CreatedAt time.Time // 创建时间
UpdatedAt time.Time // 更新时间
DeletedAt *time.Time // 删除时间
}
// TableName 返回 QRCode 所在的表名
func (pn PushNotification) TableName() string {
return "push_notification"
}
// GetPnsByUserID 通过userID拿到未读通知记录,按时间倒序
func (db *DbClient) GetPnsByUserID(ctx context.Context, UserID int32, size int32) ([]PushNotification, error) {
var pns []PushNotification
err := db.GetDB(ctx).Raw(`Select
PN.pn_id,
PN.pn_title,
PN.pn_display_time,
PN.pn_image_url,
PN.pn_content_url
from push_notification as PN where PN.pn_id not in
(select PR.pn_id from pn_record AS PR where PR.user_id = ? ) order by PN.created_at desc`, UserID).Scan(&pns).Error
if err != nil {
return nil, err
}
return pns, nil
}
|
package auth
import (
"errors"
"fmt"
"github.com/dgrijalva/jwt-go"
)
type JwtVerifier struct {
provider PublicKeyProvider
}
func NewJwtVerifier(provider PublicKeyProvider) (*JwtVerifier, error) {
result := &JwtVerifier{
provider: provider,
}
return result, nil
}
// Parse takes the token string and a function for looking up the key. The latter is especially
// useful if you use multiple keys for your application. The standard is to use 'kid' in the
// head of the token to identify which key to use, but the parsed token (head and claims) is provided
// to the callback, providing flexibility.
func (v JwtVerifier) Validate(tokenString string) (jwt.MapClaims, error) {
publicKey, err := v.provider.Get()
if err != nil {
return nil, err
}
verifyKey, err := jwt.ParseRSAPublicKeyFromPEM(publicKey)
if err != nil {
return nil, err
}
token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) {
_, ok := token.Method.(*jwt.SigningMethodRSA)
if !ok {
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
}
return verifyKey, nil
})
if token == nil {
return nil, errors.New("token must not be nil")
}
claims := token.Claims.(jwt.MapClaims)
return claims, err
}
|
package config
import (
"fmt"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/instructure-bridge/muss/testutil"
)
// NOTE: For YAML:
// - don't let any tabs get in
// - file paths are relative to this test file's parent dir
func parseAndCompose(yaml string) (map[string]interface{}, *ProjectConfig, error) {
parsed, err := parseYaml([]byte(yaml))
if err != nil {
return nil, nil, err
}
cfg, err := NewConfigFromMap(parsed)
if err != nil {
return nil, nil, err
}
dc, err := cfg.ComposeConfig()
if err != nil {
return nil, nil, err
}
// Just for better test coverage... ensure anything we put in can come out.
_, err = cfg.ToMap()
if err != nil {
return nil, nil, err
}
return dc, cfg, nil
}
func assertConfigError(t *testing.T, config, expErr string, msgAndArgs ...interface{}) {
t.Helper()
_, _, err := parseAndCompose(config)
if err == nil {
t.Fatal("expected error, found nil")
}
assert.Contains(t, err.Error(), expErr, msgAndArgs...)
}
func assertComposed(t *testing.T, config, exp string, msgAndArgs ...interface{}) *ProjectConfig {
t.Helper()
parsedExp, err := parseYaml([]byte(exp))
if err != nil {
t.Fatalf("Error parsing exp yaml: %s", err)
}
actual, projectConfig, err := parseAndCompose(config)
if err != nil {
t.Fatalf("Error parsing config: %s", err)
}
assert.Equal(t,
parsedExp,
actual,
msgAndArgs...)
return projectConfig
}
func TestDockerComposeConfig(t *testing.T) {
moduleFiles := `
module_files:
- ../testdata/app.yml
- ../testdata/microservice.yml
- ../testdata/store.yml
`
preferRepo := `
default_module_order: [repo, registry]
`
preferRegistry := `
default_module_order: [registry, repo]
`
userRepo := `
user: {module_order: [repo, registry]}
`
userRegistry := `
user: {module_order: [registry, repo]}
`
secretConfig := `
secret_passphrase: $MUSS_TEST_PASSPHRASE
secret_commands:
print:
exec: [echo]
show:
exec: [echo]
`
expRepo := testutil.ReadFile(t, "../testdata/expectations/repo.yml")
expRegistry := testutil.ReadFile(t, "../testdata/expectations/registry.yml")
expRepoMsRemote := testutil.ReadFile(t, "../testdata/expectations/user-repo-ms-remote.yml")
expRegistryMsRepo := testutil.ReadFile(t, "../testdata/expectations/user-registry-ms-repo.yml")
t.Run("repo preference", func(t *testing.T) {
config := preferRepo + moduleFiles
assertComposed(t, config, expRepo, "Config with repo preference")
assertComposed(t, (config + userRegistry), expRegistry, "user preference overrides")
})
t.Run("registry preference", func(t *testing.T) {
config := preferRegistry + moduleFiles
assertComposed(t, config, expRegistry, "Config with registry preference")
assertComposed(t, (config + userRepo), expRepo, "user preference overrides")
})
t.Run("user but no user module_order", func(t *testing.T) {
config := preferRegistry + moduleFiles + `
user: {}
`
assertComposed(t, config, expRegistry, "user without module_order same as default")
})
t.Run("user custom module config", func(t *testing.T) {
config := preferRepo + moduleFiles + `
user_file: ../testdata/user-registry-ms-repo.yml
`
assertComposed(t, config, expRegistryMsRepo, "user preference overrides orders")
})
t.Run("env var custom module config", func(t *testing.T) {
config := preferRegistry + moduleFiles + secretConfig + `
user:
module_order: [registry]
`
defer os.Unsetenv("MUSS_MODULE_ORDER")
os.Setenv("MUSS_MODULE_ORDER", "bar,remote,repo,foo")
assertComposed(t, config, expRepoMsRemote, "MUSS_MODULE_ORDER env var overrides orders")
})
t.Run("deprecated env var works and warns", func(t *testing.T) {
config := preferRegistry + moduleFiles + secretConfig + `
user:
module_order: [repo]
`
defer os.Unsetenv("MUSS_SERVICE_PREFERENCE")
os.Setenv("MUSS_SERVICE_PREFERENCE", "remote")
cfg := assertComposed(t, config, expRepoMsRemote, "MUSS_MODULE_ORDER env var overrides orders")
assert.Equal(t, []string{"MUSS_SERVICE_PREFERENCE is deprecated in favor of MUSS_MODULE_ORDER."}, cfg.Warnings, "cfg warnings")
})
t.Run("user override", func(t *testing.T) {
config := preferRegistry + moduleFiles + `
user:
override:
version: '3.5'
volumes: {overdeps: {}}
services:
ms:
environment:
OVERRIDE: oh, the power!
work:
volumes: [overdeps:/var/deps]
`
exp := testutil.ReadFile(t, "../testdata/expectations/registry-user-override.yml")
assertComposed(t, config, exp, "user preference overrides orders")
})
t.Run("user can disable modules", func(t *testing.T) {
config := preferRegistry + moduleFiles + `
user:
modules:
app:
disabled: true
`
exp := testutil.ReadFile(t, "../testdata/expectations/user-registry-app-disabled.yml")
assertComposed(t, config, exp, "user disabled modules")
})
t.Run("config errors", func(t *testing.T) {
assertComposed(t,
(preferRegistry + moduleFiles + `
user:
modules:
microservice: {}
`),
expRegistry,
"No error on empty module config")
assertConfigError(t,
`
module_files:
- ../testdata/microservice.yml
user:
modules:
microservice:
config: not-found
`,
"Config 'not-found' for module 'microservice' does not exist",
"Errors for not-found user module config choice")
})
t.Run("secrets", func(t *testing.T) {
// We don't actually create this we just want a string.
setCacheRoot("/tmp/.muss-test-cache")
os.Setenv("MUSS_TEST_PASSPHRASE", "decomposing")
config := preferRegistry + moduleFiles + secretConfig + `
user:
module_order: [repo]
modules:
microservice:
config: remote
`
projectConfig := assertComposed(t, config, expRepoMsRemote, "module defs with secrets")
if len(projectConfig.Secrets) != 3 {
t.Fatalf("expected 3 secrets, found %d", len(projectConfig.Secrets))
}
assert.Equal(t, "MSKEY", projectConfig.Secrets[0].VarName())
assert.Equal(t, "OTHER_SECRET_TEST", projectConfig.Secrets[1].VarName())
projectConfig = assertComposed(t,
`
secret_passphrase: $MUSS_TEST_PASSPHRASE
module_definitions:
- name: one
configs:
sole:
secrets:
- varname: FOO_SECRET
exec: [echo, foo]
- name: two
configs:
sole:
secrets:
BAR_SHH:
exec: [echo, bar]
SECOND_BAR:
exec: [echo, two]
`,
`{version: '3.7'}`,
"secrets as map or list",
)
actualVarNames := make([]string, len(projectConfig.Secrets))
for i := range projectConfig.Secrets {
actualVarNames[i] = projectConfig.Secrets[i].VarName()
}
assert.ElementsMatch(t,
[]string{"FOO_SECRET", "BAR_SHH", "SECOND_BAR"},
actualVarNames)
})
t.Run("include errors", func(t *testing.T) {
assertConfigError(t, `
module_definitions:
- name: one
configs:
_base:
version: '2.1'
sole:
include:
- _no
`,
"invalid 'include'; config '_no' not found",
"bad include string")
assertConfigError(t, `
module_definitions:
- name: one
configs:
_base:
version: '2.1'
sole:
include:
- bad: map
`,
"invalid 'include' map; valid keys: 'file'",
"bad include map")
assertConfigError(t, `
module_definitions:
- name: one
configs:
_base:
version: '2.1'
sole:
include:
- [no, good]
`,
"invalid 'include' value; must be a string or a map",
"bad include type")
assertConfigError(t, `
module_definitions:
- name: one
configs:
_base:
version: '2.1'
sole:
include:
- file: no-file.txt
`,
"failed to read 'no-file.txt': open no-file.txt: no such file",
"bad include type")
})
t.Run("include", func(t *testing.T) {
assertComposed(t, `
module_definitions:
- name: one
configs:
_base:
version: '2.1'
sole:
include:
- _base
`,
"{version: '2.1'}",
"include string")
assertComposed(t, `
module_definitions:
- name: one
configs:
_base:
services:
app:
image: alpine
init: true
_edge:
services:
app:
image: alpine:edge
tty: true
sole:
include:
- _base
- _edge
`,
"{version: '3.7', services: {app: {image: alpine:edge, init: true, tty: true}}}",
"multiple include strings merge")
testutil.WithTempDir(t, func(tmpdir string) {
testutil.WriteFile(t, filepath.Join("files", "between.yml"), `
version: '2.3'
services:
app:
image: alpine:latest
stdin_open: true
`)
assertComposed(t, `
module_definitions:
- name: one
file: `+filepath.Join("files", "sd.yml")+`
configs:
sole:
include:
- file: between.yml
`,
"{version: '2.3', services: {app: {image: alpine:latest, stdin_open: true}}}",
"include file")
assertComposed(t, `
module_definitions:
- name: one
file: `+filepath.Join("files", "sd.yml")+`
configs:
_base:
services:
app:
image: alpine
init: true
_edge:
services:
app:
image: alpine:edge
tty: true
sole:
include:
- _base
- file: between.yml
- _edge
`,
"{version: '2.3', services: {app: {image: alpine:edge, init: true, tty: true, stdin_open: true}}}",
"include strings and file mixed")
})
})
}
func TestPrepareVolumes(t *testing.T) {
os.Unsetenv("MUSS_TEST_VAR")
t.Run("using ./", func(t *testing.T) {
assertPreparedVolumes(
t,
map[string]interface{}{
"volumes": []interface{}{
"named_vol:/some/vol",
"named_child:/usr/src/app/named_mount",
"/root/dir:/some/root",
"/root/sub:/usr/src/app/sub/root",
"${MUSS_TEST_VAR:-.}:/usr/src/app", // keep this in the middle (not first)
map[string]interface{}{
"type": "volume",
"source": "named_map",
"target": "/named/map",
},
map[string]interface{}{
"type": "bind",
"source": "/file",
"target": "/anywhere",
"file": true,
},
},
},
FileGenMap{
"named_mount": attemptEnsureMountPointExists,
"/root/dir": attemptEnsureMountPointExists,
"/root/sub": attemptEnsureMountPointExists,
"sub/root": attemptEnsureMountPointExists,
"/file": ensureFile,
},
)
})
t.Run("using children of ./", func(t *testing.T) {
assertPreparedVolumes(
t,
map[string]interface{}{
"volumes": []interface{}{
"${MUSS_TEST_VAR:-./foo}:/somewhere/foo",
"./bar/:/usr/src/app/bar",
"named_foo:/somewhere/foo/baz",
"./t/qux:/usr/src/app/bar/quxt",
},
},
FileGenMap{
"foo": attemptEnsureMountPointExists,
"bar": attemptEnsureMountPointExists,
"foo/baz": attemptEnsureMountPointExists,
"t/qux": attemptEnsureMountPointExists,
"bar/quxt": attemptEnsureMountPointExists,
},
)
})
}
func assertPreparedVolumes(t *testing.T, service map[string]interface{}, exp FileGenMap) {
t.Helper()
actual, err := prepareVolumes(service)
if err != nil {
t.Fatal(err)
}
// Equality assertion doesn't work on func refs so do it another way.
assert.Equal(t, len(exp), len(actual))
for k := range exp {
assert.NotNilf(t, actual[k], "actual %s not nil", k)
assert.Equalf(t, describeFunc(exp[k]), describeFunc(actual[k]), "funcs for %s", k)
}
// However, this diff can be useful when debugging.
if t.Failed() {
assert.Equal(t, exp, actual)
}
}
// Print the function address: "(func(string) error)(0x12de840)"
func describeFunc(v interface{}) string {
return fmt.Sprintf("%#v", v)
}
|
package tmpl1
import (
"strconv"
"testing"
"github.com/sko00o/leetcode-adventure/queue-stack/queue/bfs"
"github.com/sko00o/leetcode-adventure/queue-stack/queue/bfs/tmpl1"
"github.com/sko00o/leetcode-adventure/queue-stack/queue/bfs/tmpl2"
"github.com/stretchr/testify/require"
)
func TestBFS(t *testing.T) {
E := &bfs.Node{}
B := &bfs.Node{
Neighbors: []*bfs.Node{E},
}
G := &bfs.Node{}
F := &bfs.Node{
Neighbors: []*bfs.Node{G},
}
D := &bfs.Node{
Neighbors: []*bfs.Node{G},
}
C := &bfs.Node{
Neighbors: []*bfs.Node{E, F},
}
A := &bfs.Node{
Neighbors: []*bfs.Node{B, C, D},
}
tests := []struct {
root, target *bfs.Node
step int
}{
{A, B, 1},
{A, C, 1},
{A, D, 1},
{A, E, 2},
{A, F, 2},
{A, G, 2},
{C, G, 2},
{B, G, -1},
{D, E, -1},
}
for idx, BFS := range []func(root, target *bfs.Node) int{
tmpl1.BFS,
tmpl2.BFS,
} {
t.Run(strconv.Itoa(idx), func(t *testing.T) {
for idx, tst := range tests {
t.Run(strconv.Itoa(idx), func(t *testing.T) {
assert := require.New(t)
assert.Equal(tst.step, BFS(tst.root, tst.target))
})
}
})
}
}
|
package handlers
import (
"fmt"
"github.com/david-sorm/montesquieu/article"
"github.com/david-sorm/montesquieu/globals"
templates "github.com/david-sorm/montesquieu/template"
"net/http"
"strconv"
"strings"
)
type IndexView struct {
BlogName string
// a list of articles which should be displayed on the page
Articles []article.Article
// the last page, if there's one, used for the last button
// essentially Page - 1
LastPage uint64
// the current page
Page uint64
// the next page, if there's one, used for the next button
// essentially Page + 1
NextPage uint64
// the biggest page
MaxPage uint64
}
// makes sure that we have the correct number of pages
func countMaxPage(NumOfArticles uint64, ArticlesPerPage uint64) uint64 {
tempFloat := float64(NumOfArticles)/float64(ArticlesPerPage) - 1.0
tempInt := (NumOfArticles / ArticlesPerPage) - 1
if float64(tempInt) == tempFloat {
// if the result is round, it's pretty easy
return tempInt
} else {
// if it's not, lets add another page for 'leftover' articles
return tempInt + 1
}
}
// executes
func HandleIndex(rw http.ResponseWriter, req *http.Request) {
uri := req.URL.RequestURI()
indexView := IndexView{
BlogName: globals.Cfg.BlogName,
// first page if we don't specify below
Page: 0,
// -1 since pages are zero-indexed
MaxPage: countMaxPage(globals.Cfg.Store.GetArticleNumber(), globals.Cfg.ArticlesPerPage),
}
// get rid of the '/' at the beginning
uri = strings.TrimPrefix(uri, "/")
// if there's something more than just '', try to figure out whether we've got this page or not
if len(uri) > 0 {
uriNum, err := strconv.ParseUint(uri, 10, 64)
if err == nil {
// redirect page /0 to /, since it looks ugly
if uriNum == 0 {
http.Redirect(rw, req, "/", 301)
}
/*
if this is a valid number, larger or equal 0 and smaller or equal the biggest page,
set the page number to this number
*/
if uriNum >= 0 && uriNum <= indexView.MaxPage {
indexView.Page = uriNum
}
} else {
// if this is BS, send a 404
Handle404(rw, req)
return
}
}
// for the buttons
indexView.LastPage = indexView.Page - 1
indexView.NextPage = indexView.Page + 1
// calculate the articles
// articles starting from
starti := globals.Cfg.ArticlesPerPage * indexView.Page
// and ending with these...
endi := starti + globals.Cfg.ArticlesPerPage
articleNum := globals.Cfg.Store.GetArticleNumber()
if endi > articleNum {
endi = articleNum
}
// insert the actual articles into page
indexView.Articles = globals.Cfg.Store.LoadArticlesSortedByLatest(starti, endi)
// execute template
if err := templates.Store.Lookup("index.gohtml").Execute(rw, indexView); err != nil {
fmt.Println("Error while parsing template:", err.Error())
}
}
|
package main
const (
xxqrTemplate = customHead + `
<body>
<img style="display: block;margin-left: auto;margin-right: auto;" src="data:image/png;base64,{{.QrBase}}" alt="QRCode" title="scan this picture to visit"/>
</body>`
// `
//<body>
//<div>
// <p>{{.QrBase}} was/were uploaded to</p>
//<img src="data:image/png;base64,{{.QrBase}}" alt="QRCode" title="scan this picture to visit"/>
//</div>
//</body>
//`
uploadTemplate = customHead + `
<body>
<form action='/upload' method='post' enctype="multipart/form-data">
<input id='uploadInput1' class='uniform-file' name='uploadFile' type='file' multiple/>
</br>
</br>
</br>
</br>
<input type="submit" value="upload file[s]" />
</form>
</body>`
upResultTemplate = customHead + `
<body>
<p>{{.OkFiles}} was/were uploaded to {{.FilePath}}</p>
{{ if (ne .FailedFiles "") }}
<br>
<br>
<p>{{.FailedFiles}} was/were failed to upload</p>
{{ end }}
</body>`
customHead = `
<div class="nav">
<li><a href="{{ .GetFiles }}" class="child">Get Files</a></li>
<li><a href="{{ .ToQrcode }}" class="child">QR Code</a></li>
<li><a href="{{ .UploadFiles }}" class="child">Upload</a></li>
<li><a href="../" class="child">../</a></li>
</div>
<!DOCTYPE html>
<head>
<title>{{ .Title }}</title>
<style>
pre {
text-align: left;
font-size: {{ .FontSize }}%;
margin: auto;
}
label {
font-size: {{ .FontSize }}%;
}
.nav {
list-style-type: none;
margin: 0;
padding: 0;
display: flex;
background-color: silver;
}
.nav a {
text-decoration: none;
display: block;
padding: 16px;
color: white;
text-align:center;
border:1px solid #DADADA;
border-radius:5px;
cursor:pointer;
background: linear-gradient(to bottom,#F8F8F8,#27558e);
}
.nav a:hover {
background-color: lightskyblue;
}
@media (min-width:800px) {
.nav {
justify-content: flex-start;
}
li {
border-left: 1px solid silver;
}
}
@media (min-width:600px) and (max-width:800px) {
.nav li {
flex: 1;
}
li+li {
border-left: 1px solid silver;
}
}
@media (max-width: 600px) {
.nav {
flex-flow: column wrap;
}
li+li {
border-top: 1px solid silver;
}
}
.child {
float: left;
font-size: {{ .FontSize }}%;
}
form {
text-align: center;
}
input {
font-size: {{ .FontSize }}%;
}
</style>
</head>
`
)
|
package catrouter
import (
"github.com/julienschmidt/httprouter"
)
type Params struct {
params httprouter.Params
}
func (ps Params) ByName(name string) string {
return ps.params.ByName(name)
}
|
package controllers
import (
"github.com/astaxie/beego"
)
type IndexController struct {
beego.Controller
}
type AboutController struct {
beego.Controller
}
type CaseController struct {
beego.Controller
}
type NewsController struct {
beego.Controller
}
type NewsDeyailController struct {
beego.Controller
}
type ProductController struct {
beego.Controller
}
func (c *IndexController) Get() {
c.TplName = "index.html"
}
func (c *AboutController) Get() {
c.TplName = "about.html"
}
func (c *CaseController) Get() {
c.TplName = "case.html"
}
func (c *NewsController) Get() {
c.TplName = "news.html"
}
func (c *NewsDeyailController) Get() {
c.TplName = "newsDetail.html"
}
func (c *ProductController) Get() {
c.TplName = "product.html"
}
|
/*
* OpenAPI Petstore
*
* This is a sample server Petstore server. For this sample, you can use the api key `special-key` to test the authorization filters.
*
* API version: 1.0.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package petstoreserver
import (
"net/http"
"github.com/gin-gonic/gin"
)
// AddPet - Add a new pet to the store
func AddPet(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// DeletePet - Deletes a pet
func DeletePet(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// FindPetsByStatus - Finds Pets by status
func FindPetsByStatus(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// FindPetsByTags - Finds Pets by tags
func FindPetsByTags(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// GetPetById - Find pet by ID
func GetPetById(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// UpdatePet - Update an existing pet
func UpdatePet(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// UpdatePetWithForm - Updates a pet in the store with form data
func UpdatePetWithForm(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
// UploadFile - uploads an image
func UploadFile(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{})
}
|
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"log"
"net/http"
"time"
"github.com/magiconair/properties"
)
// BillingDetails customer address
type BillingDetails struct {
Zip string `json:"zip"`
}
// CardExpiry card expiry date
type CardExpiry struct {
Month string `json:"month"`
Year string `json:"year"`
}
// Card card details
type Card struct {
CardNum string `json:"cardNum"`
CardExpiry CardExpiry `json:"cardExpiry"`
}
// CardRequest the full request
type CardRequest struct {
MerchantRefNum string `json:"merchantRefNum"`
Amount int `json:"amount"`
SettleWithAuth bool `json:"settleWithAuth"`
Card Card `json:"card"`
BillingDetails BillingDetails `json:"billingDetails"`
}
// SendPurchase call the remote REST API to perform a purchase request
func SendPurchase(p *properties.Properties, cardRequest CardRequest) string {
body, err := json.Marshal(cardRequest)
if err != nil {
log.Fatalln(err)
}
timeout := time.Duration(30 * time.Second)
client := http.Client{
Timeout: timeout,
}
req, err := http.NewRequest("POST", p.MustGetString("url"), bytes.NewBuffer(body))
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Authorization", "Basic "+p.MustGetString("apikey"))
if err != nil {
log.Fatalln(err)
}
resp, err := client.Do(req)
if err != nil {
log.Fatalln(err)
}
defer resp.Body.Close()
body2, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
var respBody = string(body2)
return respBody
}
|
package queries
// SQLResponse is a JSON response for an SQL query
type SQLResponse struct {
AffectedRows int64 `json:"affectedRows"`
Data string `json:"data"`
}
|
package main
import "sort"
var father []int
// 并查集初始化
func initUFS(size int) {
father = make([]int, size)
for i := 0; i < size; i++ {
father[i] = i
}
}
// 合并两个并查集
func mergeUFS(a, b int) {
// 注意a,b必须 < father数组的长度
father1, father2 := findFather(a), findFather(b)
father[father1] = father2 // 这样也可以
/*
father[father1] = min(father1,father2) // 这种写法也可以,可以保证大的指向小的
father[father2] = min(father1,father2) // 这种写法也可以,可以保证大的指向小的
*/
}
// 路径压缩的并查集找爸爸
func findFather(i int) int {
if i == father[i] {
return i
}
father[i] = findFather(father[i]) // 路径压缩
return father[i]
}
func smallestStringWithSwaps(s string, pairs [][]int) string {
belongList := make(map[int][]uint8)
initUFS(len(s)) // 初始化并查集
// 合并
for i := 0; i < len(pairs); i++ {
mergeUFS(pairs[i][0], pairs[i][1])
}
// 将s[i]放入集合leader所对应的切片 (leader在这就是下面的fatherI)
for i := 0; i < len(s); i++ {
fatherI := findFather(i)
belongList[fatherI] = append(belongList[fatherI], s[i])
}
// 按字符大小排序
for _, list := range belongList {
sort.Slice(list, func(i, j int) bool {
return list[i] < list[j]
})
}
// 写回原字符串
bytes := []byte(s)
for i := 0; i < len(s); i++ {
fatherI := findFather(i)
bytes[i] = belongList[fatherI][0] // 这一句是取队首 (由于go没有队列,所以只能有切片模拟队列了)
belongList[fatherI] = belongList[fatherI][1:] // 这一句是出队 (由于go没有队列,所以只能有切片模拟队列了)
}
return string(bytes)
}
func min(a, b int) int {
if a > b {
return b
}
return b
}
/*
*/
/*
总结
1. 这题的总体思路是:
(1) 让可以相互交换字符的索引组成一个集合,这样就形成了多个集合。 (构建集合通过并查集实现)
(2) 将每个集合中索引对应的字符加入到对应的切片中,对切片进行排序。 (可以让每个集合有一个leader,然后将该集合的所有字符放入leader所对应的切片)
(上面代码的leader就是集合的根节点)
(3) 遍历索引[0,len(s)-1],记为i,将该索引i对应的字符写回s[i]中。 (由于go的string不能修改,所以我采用了[]byte实现)
*/ |
package contract
import "github.com/bqxtt/book_online/api/model/entity"
type ListBooksRequest struct {
Page int32 `form:"page" json:"page" binding:"required"`
PageSize int32 `form:"page_size" json:"page_size" binding:"required"`
}
type ListBooksResponse struct {
BaseResponse *BaseResponse `json:"base_response"`
Books []*entity.Book `json:"books"`
PageInfo *entity.PageInfo `json:"page_info"`
}
type CreateBookRequest struct {
Book *entity.Book `form:"book" json:"book"`
}
type CreateBookResponse struct {
BaseResponse *BaseResponse `json:"base_response"`
}
type UpdateBookRequest struct {
Book *entity.Book `form:"book" json:"book"`
}
type UpdateBookResponse struct {
BaseResponse *BaseResponse `json:"base_response"`
}
type DeleteBookRequest struct {
}
type DeleteBookResponse struct {
BaseResponse *BaseResponse `json:"base_response"`
}
|
package commands
import (
"context"
"log"
"os"
"strings"
"github.com/argoproj/pkg/errors"
argoJson "github.com/argoproj/pkg/json"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo/cmd/argo/commands/client"
workflowpkg "github.com/argoproj/argo/pkg/apiclient/workflow"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/workflow/common"
"github.com/argoproj/argo/workflow/util"
)
// cliSubmitOpts holds submission options specific to CLI submission (e.g. controlling output)
type cliSubmitOpts struct {
output string // --output
wait bool // --wait
watch bool // --watch
log bool // --log
strict bool // --strict
priority *int32 // --priority
getArgs getFlags
}
func NewSubmitCommand() *cobra.Command {
var (
submitOpts wfv1.SubmitOpts
cliSubmitOpts cliSubmitOpts
priority int32
from string
)
var command = &cobra.Command{
Use: "submit [FILE... | --from `kind/name]",
Short: "submit a workflow",
Example: `# Submit multiple workflows from files:
argo submit my-wf.yaml
# Submit and wait for completion:
argo submit --wait my-wf.yaml
# Submit and watch until completion:
argo submit --watch my-wf.yaml
# Submit and tail logs until completion:
argo submit --log my-wf.yaml
# Submit a single workflow from an existing resource
argo submit --from cronwf/my-cron-wf
`,
Run: func(cmd *cobra.Command, args []string) {
if cmd.Flag("priority").Changed {
cliSubmitOpts.priority = &priority
}
if !cliSubmitOpts.watch && len(cliSubmitOpts.getArgs.status) > 0 {
logrus.Warn("--status should only be used with --watch")
}
ctx, apiClient := client.NewAPIClient()
serviceClient := apiClient.NewWorkflowServiceClient()
namespace := client.Namespace()
if from != "" {
if len(args) != 0 {
cmd.HelpFunc()(cmd, args)
os.Exit(1)
}
submitWorkflowFromResource(ctx, serviceClient, namespace, from, &submitOpts, &cliSubmitOpts)
} else {
submitWorkflowsFromFile(ctx, serviceClient, namespace, args, &submitOpts, &cliSubmitOpts)
}
},
}
util.PopulateSubmitOpts(command, &submitOpts, true)
command.Flags().StringVarP(&cliSubmitOpts.output, "output", "o", "", "Output format. One of: name|json|yaml|wide")
command.Flags().BoolVarP(&cliSubmitOpts.wait, "wait", "w", false, "wait for the workflow to complete")
command.Flags().BoolVar(&cliSubmitOpts.watch, "watch", false, "watch the workflow until it completes")
command.Flags().BoolVar(&cliSubmitOpts.log, "log", false, "log the workflow until it completes")
command.Flags().BoolVar(&cliSubmitOpts.strict, "strict", true, "perform strict workflow validation")
command.Flags().Int32Var(&priority, "priority", 0, "workflow priority")
command.Flags().StringVar(&from, "from", "", "Submit from an existing `kind/name` E.g., --from=cronwf/hello-world-cwf")
command.Flags().StringVar(&cliSubmitOpts.getArgs.status, "status", "", "Filter by status (Pending, Running, Succeeded, Skipped, Failed, Error). Should only be used with --watch.")
command.Flags().StringVar(&cliSubmitOpts.getArgs.nodeFieldSelectorString, "node-field-selector", "", "selector of node to display, eg: --node-field-selector phase=abc")
// Only complete files with appropriate extension.
err := command.Flags().SetAnnotation("parameter-file", cobra.BashCompFilenameExt, []string{"json", "yaml", "yml"})
if err != nil {
log.Fatal(err)
}
return command
}
func submitWorkflowsFromFile(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, filePaths []string, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) {
fileContents, err := util.ReadManifest(filePaths...)
errors.CheckError(err)
var workflows []wfv1.Workflow
for _, body := range fileContents {
wfs := unmarshalWorkflows(body, cliOpts.strict)
workflows = append(workflows, wfs...)
}
submitWorkflows(ctx, serviceClient, namespace, workflows, submitOpts, cliOpts)
}
func validateOptions(workflows []wfv1.Workflow, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) {
if cliOpts.watch {
if len(workflows) > 1 {
log.Fatalf("Cannot watch more than one workflow")
}
if cliOpts.wait {
log.Fatalf("--wait cannot be combined with --watch")
}
if submitOpts.DryRun {
log.Fatalf("--watch cannot be combined with --dry-run")
}
if submitOpts.ServerDryRun {
log.Fatalf("--watch cannot be combined with --server-dry-run")
}
}
if cliOpts.wait {
if submitOpts.DryRun {
log.Fatalf("--wait cannot be combined with --dry-run")
}
if submitOpts.ServerDryRun {
log.Fatalf("--wait cannot be combined with --server-dry-run")
}
}
if submitOpts.DryRun {
if cliOpts.output == "" {
log.Fatalf("--dry-run should have an output option")
}
if submitOpts.ServerDryRun {
log.Fatalf("--dry-run cannot be combined with --server-dry-run")
}
}
if submitOpts.ServerDryRun {
if cliOpts.output == "" {
log.Fatalf("--server-dry-run should have an output option")
}
}
}
func submitWorkflowFromResource(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, resourceIdentifier string, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) {
parts := strings.SplitN(resourceIdentifier, "/", 2)
if len(parts) != 2 {
log.Fatalf("resource identifier '%s' is malformed. Should be `kind/name`, e.g. cronwf/hello-world-cwf", resourceIdentifier)
}
kind := parts[0]
name := parts[1]
tempwf := wfv1.Workflow{}
validateOptions([]wfv1.Workflow{tempwf}, submitOpts, cliOpts)
created, err := serviceClient.SubmitWorkflow(ctx, &workflowpkg.WorkflowSubmitRequest{
Namespace: namespace,
ResourceKind: kind,
ResourceName: name,
SubmitOptions: submitOpts,
})
if err != nil {
log.Fatalf("Failed to submit workflow: %v", err)
}
printWorkflow(created, getFlags{output: cliOpts.output})
waitWatchOrLog(ctx, serviceClient, namespace, []string{created.Name}, *cliOpts)
}
func submitWorkflows(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflows []wfv1.Workflow, submitOpts *wfv1.SubmitOpts, cliOpts *cliSubmitOpts) {
validateOptions(workflows, submitOpts, cliOpts)
if len(workflows) == 0 {
log.Println("No Workflow found in given files")
os.Exit(1)
}
var workflowNames []string
for _, wf := range workflows {
if wf.Namespace == "" {
// This is here to avoid passing an empty namespace when using --server-dry-run
wf.Namespace = namespace
}
err := util.ApplySubmitOpts(&wf, submitOpts)
errors.CheckError(err)
wf.Spec.Priority = cliOpts.priority
options := &metav1.CreateOptions{}
if submitOpts.DryRun {
options.DryRun = []string{"All"}
}
created, err := serviceClient.CreateWorkflow(ctx, &workflowpkg.WorkflowCreateRequest{
Namespace: wf.Namespace,
Workflow: &wf,
ServerDryRun: submitOpts.ServerDryRun,
CreateOptions: options,
})
if err != nil {
log.Fatalf("Failed to submit workflow: %v", err)
}
printWorkflow(created, getFlags{output: cliOpts.output, status: cliOpts.getArgs.status})
workflowNames = append(workflowNames, created.Name)
}
waitWatchOrLog(ctx, serviceClient, namespace, workflowNames, *cliOpts)
}
// unmarshalWorkflows unmarshals the input bytes as either json or yaml
func unmarshalWorkflows(wfBytes []byte, strict bool) []wfv1.Workflow {
var wf wfv1.Workflow
var jsonOpts []argoJson.JSONOpt
if strict {
jsonOpts = append(jsonOpts, argoJson.DisallowUnknownFields)
}
err := argoJson.Unmarshal(wfBytes, &wf, jsonOpts...)
if err == nil {
return []wfv1.Workflow{wf}
}
yamlWfs, err := common.SplitWorkflowYAMLFile(wfBytes, strict)
if err == nil {
return yamlWfs
}
log.Fatalf("Failed to parse workflow: %v", err)
return nil
}
func waitWatchOrLog(ctx context.Context, serviceClient workflowpkg.WorkflowServiceClient, namespace string, workflowNames []string, cliSubmitOpts cliSubmitOpts) {
if cliSubmitOpts.log {
for _, workflow := range workflowNames {
logWorkflow(ctx, serviceClient, namespace, workflow, "", &corev1.PodLogOptions{
Container: "main",
Follow: true,
Previous: false,
})
}
}
if cliSubmitOpts.wait {
waitWorkflows(ctx, serviceClient, namespace, workflowNames, false, !(cliSubmitOpts.output == "" || cliSubmitOpts.output == "wide"))
} else if cliSubmitOpts.watch {
for _, workflow := range workflowNames {
watchWorkflow(ctx, serviceClient, namespace, workflow, cliSubmitOpts.getArgs)
}
}
}
|
package erratum
// Use an input with a resource from the opener
func Use(o ResourceOpener, input string) (result error) {
resource, err := o()
for err != nil {
if _, ok := err.(TransientError); !ok {
return err
}
resource, err = o()
}
defer resource.Close()
defer func() {
if r := recover(); r != nil {
if frob, ok := r.(FrobError); ok {
resource.Defrob(frob.defrobTag)
}
result = r.(error)
}
}()
resource.Frob(input)
return nil
}
|
package excel
type ExcelTest struct {
}
|
package main
import (
"fmt"
"math"
"sort"
)
// https://leetcode-cn.com/problems/contains-duplicate-iii/
// 220. 存在重复元素 | Contains Duplicate III
// 其他:
// * 217: https://leetcode-cn.com/problems/contains-duplicate/
// * 219: https://leetcode-cn.com/problems/contains-duplicate-ii/
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
func containsNearbyAlmostDuplicate(nums []int, k int, t int) bool {
return containsNearbyAlmostDuplicate2(nums, k, t)
}
//------------------------------------------------------------------------------
// Solution 2
//
// 桶排序 + 滑动窗口
//
// 将数组元素放入到桶中, 各个桶中元素的范围依次为: [0,t],[t+1,2t+1],[2t+2,3t+2],...
// 那么该桶有如下性质:
// 1. 同一个桶中的各个元素的差值的绝对值一定小于等于 t.
// 2. 任意桶 i,j 中的元素 a, b, 若 |b-a|<=t 成立, 那么 |j-i\<=1, 即桶必须相邻.
// 所以, 遍历数组 A, 对于元素 A[i]:
// *. 入窗口前, 计算 A[i] 落入的桶 i, 若桶 i 中已有元素, 则返回 true.
// *. 否则, 取左右相邻桶(i-1/i+1)中的元素, 计算差绝对值, 小于等于 t 则返回 true.
// *. 否则, 将 A[i] 加入桶中, 并将 A[i-k] 元素从桶中删除.
// 每个桶中只需存储一个元素, 因为如若桶中不止1个元素, 根据性质1, 可直接返回true, 不用进入下一轮.
// 因此桶可以用 map[int]int 表示, key 为桶号, value 为数组元素值.
// 另外, 由于元素值范围: [-2^31,2^31-1], 可以映射到:[0,2^32-1], 保证计算的桶号为非负数.
//
// 复杂度分析:
// * 时间: O(N)
// * 空间: O(k)
func containsNearbyAlmostDuplicate2(nums []int, k int, t int) bool {
N := len(nums)
if N < 2 {
return false
}
abs := func(x int) int {
if x >= 0 {
return x
}
return -x
}
bkt := make(map[int64]int)
toId := func(x int) int64 {
return (int64(x) + math.MaxUint32) / (int64(t) + 1)
}
for i, v := range nums {
id := toId(v)
for j := id - 1; j <= id+1; j++ {
if p, ok := bkt[j]; ok && abs(p-v) <= t {
return true
}
}
bkt[id] = v
if i >= k {
delete(bkt, toId(nums[i-k]))
}
}
return false
}
//------------------------------------------------------------------------------
// Solution 1
//
// 类似 Solution 0 的优化思路, 利用滑动窗口和二叉搜索树. 用大小为 k+1 的窗口在数组 A 滑动, 并
// 将窗口内的元素加入到二叉搜索树中, 在新元素 A[i] 入窗口前将 A[i-k-1] 从树中删除, 再查找
// 大于或等于 A[i]-t 的最小元素 x, 若 x <= A[i]+t, 则返回true, 否则将 A[i] 加入到搜索树中.
//
// 复杂度:
// * 时间: O(N*lgk)
// * 空间: O(k)
// 二叉搜索树不好写 -_-
func containsNearbyAlmostDuplicate1(nums []int, k int, t int) bool {
return containsNearbyAlmostDuplicate0(nums, k, t)
}
//------------------------------------------------------------------------------
// Solution 0
//
// 离散化 + 树状数组(Binary Indexed Trees) + 滑动窗口
//
// 数据范围:
// * 长度: N ∊ [0,2*10^4]
// * 元素值: A[i] ∊ [-2^31, 2^31-1]
// * k: [0, 10^4]
// * t: [0, 2^31-1]
//
// 暴力解法: 对于每一个新入窗口的元素, 计算其与窗口内其他各个元素的差的绝对值.
// 时间: O(N*k), 根据给的数据范围, 最坏是 O(10^8), 超时了.
//
// 优化思路: 用大小为 k+1 的窗口在数组 A 上滑动, 统计窗口内元素在 [A[i]-t,A[i]+t] 的个数;
// 如果大于1, 则返回true. 利用树状数组进行计数, 复杂度 O(lg(MAX(A[0],A[1],...,A[N-1]))).
// 由于元素值范围过大, 直接利用 BIT 申请空间太大. 但是数组长度在 10^4 内, 可以先离散化, 缩小数据
// 范围, 最终计数的时间复杂度: O(lgN).
//
// 步骤:
// 1. 去重并离散化, 得到去重并排序后的数组, 以及离散后的哈希表:
// * ordered: 去重并升序.
// * hash: ordered[i] -> i, hash[ordered[i]] = i+1. 加1是为了保证 BIT 可用.
// 2. 构建树状数组(BIT), 用于统计和查询区间内的元素个数:
// * 范围: [1, len(ordered)]
// * 大小: n = len(ordered)+1
// * 操作 add(i,v): 更新区间 [i, n), 增加 v. v 只能是1或-1, 表示入窗口和出窗口.
// * 操作 sum(i): 查询区间 [1, i] 的元素个数.
// 3. 对于每个新入窗口的元素 A[i]:
// * A[i] 入窗口: 更新BIT, add(hash[A[i]], 1).
// * A[i-k-1] 出窗口: 当 i>=k+1 时, add(hash[A[i-k-1]], -1).
// * 确定查询区间: [A[i]-t, A[i]+t], 映射到离散后的 ordered 下标范围中, 二分查找.
// * 离散区间(l, r]: 因为 sum(r)-sum(l) 会去掉 l 处的计数.
// * l: ordered 中小于 A[i]-t 的最大元素的下标 + 1.
// * r: ordered 中小于或等于 A[i]+t 的最小元素的下标 + 1.
// * 统计区间内元素个数: sum(r)-sum(l), 表示区间 (l,r], 大于1返回 true.
//
// 复杂度分析:
// * 时间: O(N*lgN). 去重并离散 O(N*lgN), 窗口滑动 O(N), 计数 O(lgN).
// * 空间: O(N)
func containsNearbyAlmostDuplicate0(nums []int, k int, t int) bool {
N := len(nums)
if N == 0 {
return false
}
// discretize and delete duplicated values
ordered, hash, n := discretize(nums)
// Binary Indexed Trees: [1,n]
NN := n + 1
tr := make([]int, NN)
add := func(i, v int) {
for ; i < NN; i += i & -i {
tr[i] += v
}
}
sum := func(i int) (sum int) {
for ; i > 0; i -= i & -i {
sum += tr[i]
}
return sum
}
for i, v := range nums {
add(hash[v], 1)
if i >= k+1 {
add(hash[nums[i-k-1]], -1)
}
l := sort.SearchInts(ordered, v-t)
r := sort.SearchInts(ordered, v+t)
if r < n && ordered[r] == v+t {
r++
}
if sum(r)-sum(l) > 1 {
return true
}
}
return false
}
func discretize(nums []int) ([]int, map[int]int, int) {
N := len(nums)
hash := make(map[int]int)
// de-dup
for i := 0; i < N; i++ {
hash[nums[i]] = i
}
ordered, n := make([]int, len(hash)), 0
for k := range hash {
ordered[n] = k
n++
}
sort.Ints(ordered)
for i := 0; i < n; i++ {
// BIT starts with index 1
hash[ordered[i]] = i + 1
}
return ordered, hash, n
}
//------------------------------------------------------------------------------
// main
func main() {
cases := []struct {
nums []int
k, t int
}{
// true
{
[]int{1, 2, 3, 1},
3, 0,
},
{
[]int{1, 0, 1, 1},
1, 2,
},
{
[]int{1, 2, 1, 1},
1, 0,
},
// false
{
nil, 0, 0,
},
{
[]int{1, 5, 9, 1, 5, 9},
2, 3,
},
}
realCase := cases[0:]
for i, c := range realCase {
fmt.Println("## case", i)
// solve
fmt.Println(containsNearbyAlmostDuplicate2(c.nums, c.k, c.t))
fmt.Println(containsNearbyAlmostDuplicate0(c.nums, c.k, c.t))
}
}
|
package informartionAllSum
import (
"fmt"
"time"
)
const (
//TimeFormart = "2006-01-02 15:04:05"
TimeFormart = "20060102"
)
func GetYesterDay() (yd string) {
t1 := time.Now()
diff, err := time.ParseDuration("-24h")
if err != nil {
fmt.Println(err)
}
yd = t1.Add(diff).Format(TimeFormart)
return yd
}
|
package main
import (
"os"
)
func main() {
day := os.Args[1]
switch day {
case "1":
data := loadFromTextFile("day1.txt")
dayOne(data)
case "2":
data := loadFromTextFile("day2.txt")
dayTwo(data)
case "3":
data := loadFromTextFile("day3.txt")
dayThree(data)
}
}
|
package sudoku
import (
"fmt"
"github.com/kr/pretty"
"log"
"reflect"
"testing"
)
func TestTechniquesSorted(t *testing.T) {
lastLikelihood := 0.0
for i, technique := range AllTechniques {
if technique.humanLikelihood(nil) < lastLikelihood {
t.Fatal("Technique named", technique.Name(), "with index", i, "has a likelihood lower than one of the earlier ones: ", technique.humanLikelihood(nil), lastLikelihood)
}
lastLikelihood = technique.humanLikelihood(nil)
}
}
func TestAllVariantNames(t *testing.T) {
expected := []string{
"Obvious In Block",
"Obvious In Row",
"Obvious In Col",
"Necessary In Block",
"Necessary In Row",
"Necessary In Col",
"Only Legal Number",
"Naked Pair Block",
"Naked Pair Row",
"Naked Pair Col",
"Naked Triple Block",
"Naked Triple Row",
"Naked Triple Col",
"Naked Quad Block",
"Naked Quad Row",
"Naked Quad Col",
"Pointing Pair Row",
"XWing Row",
"Pointing Pair Col",
"XWing Col",
"Block Block Interactions",
"XYWing",
"XYWing (Same Block)",
"Hidden Pair Block",
"Hidden Pair Row",
"Hidden Pair Col",
"Swordfish Row",
"Swordfish Col",
"Hidden Triple Block",
"Hidden Triple Row",
"Hidden Triple Col",
"Forcing Chain (1 steps)",
"Forcing Chain (2 steps)",
"Forcing Chain (3 steps)",
"Forcing Chain (4 steps)",
"Forcing Chain (5 steps)",
"Forcing Chain (6 steps)",
"Hidden Quad Block",
"Hidden Quad Row",
"Hidden Quad Col",
"Guess",
}
if !reflect.DeepEqual(expected, AllTechniqueVariants) {
t.Error("Got wrong technique variants. Expected", expected, "got", AllTechniqueVariants, "\nDifferences:\n", pretty.Diff(expected, AllTechniqueVariants))
}
}
func TestSubsetIndexes(t *testing.T) {
result := subsetIndexes(3, 1)
expectedResult := [][]int{{0}, {1}, {2}}
subsetIndexHelper(t, result, expectedResult)
result = subsetIndexes(3, 2)
expectedResult = [][]int{{0, 1}, {0, 2}, {1, 2}}
subsetIndexHelper(t, result, expectedResult)
result = subsetIndexes(5, 3)
expectedResult = [][]int{{0, 1, 2}, {0, 1, 3}, {0, 1, 4}, {0, 2, 3}, {0, 2, 4}, {0, 3, 4}, {1, 2, 3}, {1, 2, 4}, {1, 3, 4}, {2, 3, 4}}
subsetIndexHelper(t, result, expectedResult)
if subsetIndexes(1, 2) != nil {
t.Log("Subset indexes returned a subset where the length is greater than the len")
t.Fail()
}
}
func subsetIndexHelper(t *testing.T, result [][]int, expectedResult [][]int) {
if len(result) != len(expectedResult) {
t.Log("subset indexes returned wrong number of results for: ", result, " :", expectedResult)
t.FailNow()
}
for i, item := range result {
if len(item) != len(expectedResult[0]) {
t.Log("subset indexes returned a result with wrong numbrer of items ", i, " : ", result, " : ", expectedResult)
t.FailNow()
}
for j, value := range item {
if value != expectedResult[i][j] {
t.Log("Subset indexes had wrong number at ", i, ",", j, " : ", result, " : ", expectedResult)
t.Fail()
}
}
}
}
type multipleValidStepLoopOptions struct {
targetCells CellRefSlice
targetNums IntSlice
pointerCells CellRefSlice
pointerNums IntSlice
description string
extra interface{}
variantName string
}
//TODO: rename this to fit in with the other test helpers
func multipleValidStepsTestHelper(t *testing.T, puzzleName string, techniqueName string, tests []multipleValidStepLoopOptions) {
options := solveTechniqueTestHelperOptions{
checkAllSteps: true,
}
grid, solver, steps := humanSolveTechniqueTestHelperStepGenerator(t,
puzzleName, techniqueName, options)
options.stepsToCheck.grid = grid
options.stepsToCheck.solver = solver
options.stepsToCheck.steps = steps
//OK, now we'll walk through all of the options in a loop and make sure they all show
//up in the solve steps.
for _, test := range tests {
options.targetCells = test.targetCells
options.targetNums = test.targetNums
options.pointerCells = test.pointerCells
options.pointerNums = test.pointerNums
options.description = test.description
options.extra = test.extra
options.variantName = test.variantName
humanSolveTechniqueTestHelper(t, puzzleName, techniqueName, options)
}
if len(tests) != len(steps) {
t.Error("We didn't have enough tests for all of the steps that ", techniqueName, " returned. Got", len(tests), "expected", len(steps))
}
}
func techniqueVariantsTestHelper(t *testing.T, techniqueName string, variantNames ...string) {
technique, ok := techniquesByName[techniqueName]
if !ok {
t.Fatal("Couldn't find technqiue named", techniqueName)
}
if len(variantNames) == 0 {
variantNames = []string{technique.Name()}
}
names := technique.Variants()
if len(names) != len(variantNames) {
t.Fatal("Didn't receive the right number of variants for", technique.Name(), "Got", len(names), "expected", len(variantNames))
}
for i, name := range names {
goldenName := variantNames[i]
if name != goldenName {
t.Error(i, "th variant name for", technique.Name(), "wrong. Got", name, "expected", goldenName)
}
}
}
//multiTestWrapper wraps a testing.T and makes it possible to run loops
//where at least one run through the loop must not Error for the whole test
//to pass. Call t.Reset(), and at any time call Passed() to see if t.Error()
//has been called since last reset.
//Or, if looping is false, it's just a passthrough to t.Error.
type loopTest struct {
t *testing.T
looping bool
lastMessage string
}
func (l *loopTest) Reset() {
l.lastMessage = ""
}
func (l *loopTest) Passed() bool {
return l.lastMessage == ""
}
func (l *loopTest) Error(args ...interface{}) {
if l.looping == false {
l.t.Error(args...)
} else {
l.lastMessage = fmt.Sprint(args...)
}
}
type solveTechniqueMatchMode int
const (
solveTechniqueMatchModeAll solveTechniqueMatchMode = iota
solveTechniqueMatchModeAny
)
type solveTechniqueTestHelperOptions struct {
transpose bool
//Whether the descriptions of cells are a list of legal possible individual values, or must all match.
matchMode solveTechniqueMatchMode
targetCells CellRefSlice
pointerCells CellRefSlice
targetNums IntSlice
pointerNums IntSlice
targetSame cellGroupType
targetGroup int
variantName string
extra interface{}
//If true, will loop over all steps from the technique and see if ANY of them match.
checkAllSteps bool
//A way to skip the step generator by provding your own list of steps.
//Useful if you're going to be do repeated calls to the test helper with the
//same list of steps.
stepsToCheck struct {
grid MutableGrid
solver SolveTechnique
steps []*SolveStep
}
//If description provided, the description MUST match.
description string
//If descriptions provided, ONE of the descriptions must match.
//generally used in conjunction with solveTechniqueMatchModeAny.
descriptions []string
debugPrint bool
}
//TODO: 97473c18633203a6eaa075d968ba77d85ba28390 introduced an error here where we don't return all techniques,
//at least for forcing chains technique.
func getStepsForTechnique(technique SolveTechnique, grid Grid, fetchAll bool) []*SolveStep {
maxResults := 0
if !fetchAll {
maxResults = 1
}
return technique.Candidates(grid, maxResults)
}
func humanSolveTechniqueTestHelperStepGenerator(t *testing.T, puzzleName string, techniqueName string, options solveTechniqueTestHelperOptions) (MutableGrid, SolveTechnique, []*SolveStep) {
var grid MutableGrid
if options.stepsToCheck.grid != nil {
grid = options.stepsToCheck.grid
} else {
tempGrid, err := MutableLoadSDKFromFile(puzzlePath(puzzleName))
if err != nil {
t.Fatal("Couldn't load puzzle ", puzzleName)
}
grid = tempGrid
}
if options.transpose {
newGrid := grid.(*mutableGridImpl).transpose()
grid = newGrid
}
solver := techniquesByName[techniqueName]
if solver == nil {
t.Fatal("Couldn't find technique object: ", techniqueName)
}
steps := getStepsForTechnique(solver, grid, options.checkAllSteps)
return grid, solver, steps
}
func humanSolveTechniqueTestHelper(t *testing.T, puzzleName string, techniqueName string, options solveTechniqueTestHelperOptions) {
//TODO: it's weird that you have to pass in puzzleName a second time if you're also passing in options.
//TODO: test for col and block as well
var grid Grid
var solver SolveTechnique
var steps []*SolveStep
if options.stepsToCheck.grid != nil {
grid = options.stepsToCheck.grid
solver = options.stepsToCheck.solver
steps = options.stepsToCheck.steps
} else {
grid, solver, steps = humanSolveTechniqueTestHelperStepGenerator(t, puzzleName, techniqueName, options)
}
//This is so weird... if I don't have this no-op here the compiler warns
//that grid is declared and not used... despite the fact that it OBVIOUSLY
//is.
grid.Cell(0, 0)
//Check if solveStep is nil here
if len(steps) == 0 {
t.Fatal(techniqueName, " didn't find a cell it should have.")
}
//Instead of calling error on t, we'll call it on l. If we're not in checkAllSteps mode,
//l.Error() will be pass through; otherwise we can interrogate it at any point in the loop.
l := &loopTest{t: t, looping: options.checkAllSteps}
for _, step := range steps {
l.Reset()
if options.debugPrint {
log.Println(step)
}
variantName := options.variantName
if options.variantName == "" {
variantName = techniqueName
}
if step.TechniqueVariant() != variantName {
l.Error("TechniqueVariant name was not what was expected. Got", step.TechniqueVariant(), "expected", variantName)
continue
}
foundVariantNameMatch := false
for _, variant := range AllTechniqueVariants {
if variant == step.TechniqueVariant() {
foundVariantNameMatch = true
break
}
}
if !foundVariantNameMatch {
//This is a t.error, because every step should be valid in this way.
t.Error("Found a variant name that's not in the set: ", step.TechniqueVariant())
}
if !reflect.DeepEqual(step.extra, options.extra) {
l.Error("Extra did not match. Got", step.extra, "expected", options.extra)
continue
}
if options.matchMode == solveTechniqueMatchModeAll {
//All must match
if options.targetCells != nil {
if !step.TargetCells.sameAs(options.targetCells) {
l.Error(techniqueName, " had the wrong target cells: ", step.TargetCells)
continue
}
}
if options.pointerCells != nil {
if !step.PointerCells.sameAs(options.pointerCells) {
l.Error(techniqueName, " had the wrong pointer cells: ", step.PointerCells)
continue
}
}
switch options.targetSame {
case _GROUP_ROW:
if !step.TargetCells.SameRow() || step.TargetCells.Row() != options.targetGroup {
l.Error("The target cells in the ", techniqueName, " were wrong row :", step.TargetCells.Row())
continue
}
case _GROUP_BLOCK:
if !step.TargetCells.SameBlock() || step.TargetCells.Block() != options.targetGroup {
l.Error("The target cells in the ", techniqueName, " were wrong block :", step.TargetCells.Block())
continue
}
case _GROUP_COL:
if !step.TargetCells.SameCol() || step.TargetCells.Col() != options.targetGroup {
l.Error("The target cells in the ", techniqueName, " were wrong col :", step.TargetCells.Col())
continue
}
case _GROUP_NONE:
//Do nothing
default:
l.Error("human solve technique helper error: unsupported group type: ", options.targetSame)
continue
}
if options.targetNums != nil {
if !step.TargetNums.SameContentAs(options.targetNums) {
l.Error(techniqueName, " found the wrong numbers: ", step.TargetNums)
continue
}
}
if options.pointerNums != nil {
if !step.PointerNums.SameContentAs(options.pointerNums) {
l.Error(techniqueName, "found the wrong numbers:", step.PointerNums)
continue
}
}
} else if options.matchMode == solveTechniqueMatchModeAny {
foundMatch := false
if !reflect.DeepEqual(step.extra, options.extra) {
l.Error("Extra did not match. Got", step.extra, "expected", options.extra)
continue
}
if options.targetCells != nil {
foundMatch = false
for _, ref := range options.targetCells {
for _, cell := range step.TargetCells {
if ref == cell {
//TODO: break out early
foundMatch = true
}
}
}
if !foundMatch {
l.Error(techniqueName, " had the wrong target cells: ", step.TargetCells)
continue
}
}
if options.pointerCells != nil {
l.Error("Pointer cells in match mode any not yet supported.")
continue
}
if options.targetSame != _GROUP_NONE {
l.Error("Target Same in match mode any not yet supported.")
continue
}
if options.targetNums != nil {
foundMatch = false
for _, targetNum := range options.targetNums {
for _, num := range step.TargetNums {
if targetNum == num {
foundMatch = true
//TODO: break early here.
}
}
}
if !foundMatch {
l.Error(techniqueName, " had the wrong target nums: ", step.TargetNums)
continue
}
}
if options.pointerNums != nil {
foundMatch = false
for _, pointerNum := range options.pointerNums {
for _, num := range step.PointerNums {
if pointerNum == num {
foundMatch = true
//TODO: break early here
}
}
}
if !foundMatch {
l.Error(techniqueName, " had the wrong pointer nums: ", step.PointerNums)
continue
}
}
}
if options.description != "" {
//Normalize the step so that the description will be stable for the test.
step.normalize()
description := solver.Description(step)
if description != options.description {
l.Error("Wrong description for ", techniqueName, ". Got:*", description, "* expected: *", options.description, "*")
continue
}
} else if options.descriptions != nil {
foundMatch := false
step.normalize()
description := solver.Description(step)
for _, targetDescription := range options.descriptions {
if description == targetDescription {
foundMatch = true
}
}
if !foundMatch {
l.Error("No descriptions matched for ", techniqueName, ". Got:*", description)
continue
}
}
if options.checkAllSteps && l.Passed() {
break
}
}
if !l.Passed() {
t.Error("No cells matched any of the options: ", options)
}
//TODO: we should do exhaustive testing of SolveStep application. We used to test it here, but as long as targetCells and targetNums are correct it should be fine.
}
|
/*
Copyright © 2022 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"errors"
"fmt"
"net/url"
"os"
"strings"
rdconfig "github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/config"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/shutdown"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
type shutdownSettingsStruct struct {
Verbose bool
WaitForShutdown bool
}
var commonShutdownSettings shutdownSettingsStruct
// shutdownCmd represents the shutdown command
var shutdownCmd = &cobra.Command{
Use: "shutdown",
Short: "Shuts down the running Rancher Desktop application",
Long: `Shuts down the running Rancher Desktop application.`,
RunE: func(cmd *cobra.Command, args []string) error {
if err := cobra.NoArgs(cmd, args); err != nil {
return err
}
if commonShutdownSettings.Verbose {
logrus.SetLevel(logrus.TraceLevel)
}
cmd.SilenceUsage = true
result, err := doShutdown(&commonShutdownSettings, shutdown.Shutdown)
if err != nil {
return err
}
if result != nil {
fmt.Println(string(result))
}
return nil
},
}
func init() {
rootCmd.AddCommand(shutdownCmd)
shutdownCmd.Flags().BoolVar(&commonShutdownSettings.Verbose, "verbose", false, "be verbose")
shutdownCmd.Flags().BoolVar(&commonShutdownSettings.WaitForShutdown, "wait", true, "wait for shutdown to be confirmed")
}
func doShutdown(shutdownSettings *shutdownSettingsStruct, initiatingCommand shutdown.InitiatingCommand) ([]byte, error) {
output, err := processRequestForUtility(doRequest("PUT", versionCommand("", "shutdown")))
if err != nil {
if errors.Is(err, os.ErrNotExist) {
if strings.Contains(err.Error(), rdconfig.DefaultConfigPath) {
logrus.Debugf("Can't find default config file %s, assuming Rancher Desktop isn't running.\n", rdconfig.DefaultConfigPath)
// It's probably not running, so shutdown is a no-op
return nil, nil
}
return nil, err
}
urlError := new(url.Error)
if errors.As(err, &urlError) {
return []byte("Rancher Desktop is currently not running (or can't be shutdown via this command)."), nil
}
return nil, err
}
err = shutdown.FinishShutdown(shutdownSettings.WaitForShutdown, initiatingCommand)
return output, err
}
|
package main
import (
"github.com/aws/aws-sdk-go/aws/session"
"fmt"
"os"
"log"
"github.com/mitchellh/cli"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/iam"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/hashicorp/terraform/terraform"
"github.com/hashicorp/terraform/builtin/providers/aws"
"github.com/hashicorp/terraform/config"
"github.com/aws/aws-sdk-go/service/efs"
"github.com/aws/aws-sdk-go/service/kms"
"io/ioutil"
"flag"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/sts"
)
func main() {
app := "awsweeper"
version := "0.1.0"
log.SetFlags(0)
log.SetOutput(ioutil.Discard)
versionFlag := flag.Bool("version", false, "Show version")
helpFlag := flag.Bool("help", false, "Show help")
dryRunFlag := flag.Bool("dry-run", false, "Don't delete anything, just show what would happen")
forceDeleteFlag := flag.Bool("force", false, "Start deleting without asking for confirmation")
profile := flag.String("profile", "", "Use a specific profile from your credential file")
region := flag.String("region", "", "The region to use. Overrides config/env settings")
outFileName := flag.String("output", "", "List deleted resources in yaml file")
flag.Usage = func() { fmt.Println(Help()) }
flag.Parse()
if *versionFlag {
fmt.Println(version)
os.Exit(0)
}
if *helpFlag {
fmt.Println(Help())
os.Exit(0)
}
c := &cli.CLI{
Name: app,
Version: version,
HelpFunc: BasicHelpFunc(app),
}
c.Args = append([]string{"wipe"}, flag.Args()...)
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
Profile: *profile,
}))
if *region == "" {
region = sess.Config.Region
}
p := initAwsProvider(*profile, *region)
ui := &cli.BasicUi{
Reader: os.Stdin,
Writer: os.Stdout,
ErrorWriter: os.Stderr,
}
client := &AWSClient{
autoscalingconn: autoscaling.New(sess),
ec2conn: ec2.New(sess),
elbconn: elb.New(sess),
r53conn: route53.New(sess),
cfconn: cloudformation.New(sess),
efsconn: efs.New(sess),
iamconn: iam.New(sess),
kmsconn: kms.New(sess),
s3conn: s3.New(sess),
stsconn: sts.New(sess),
}
c.Commands = map[string]cli.CommandFactory{
"wipe": func() (cli.Command, error) {
return &WipeCommand{
Ui: &cli.ColoredUi{
Ui: ui,
OutputColor: cli.UiColorBlue,
},
client: client,
provider: p,
dryRun: *dryRunFlag,
forceDelete: *forceDeleteFlag,
outFileName: *outFileName,
}, nil
},
}
exitStatus, err := c.Run()
if err != nil {
log.Println(err)
}
os.Exit(exitStatus)
}
func Help() string {
return `Usage: awsweeper [options] <config.yaml>
Delete AWS resources via a yaml configuration.
Options:
--profile Use a specific profile from your credential file
--region The region to use. Overrides config/env settings
--dry-run Don't delete anything, just show what would happen
--force Start deleting without asking for confirmation
--output=file Print infos about deleted resources to a yaml file
`
}
func BasicHelpFunc(app string) cli.HelpFunc {
return func(commands map[string]cli.CommandFactory) string {
return Help()
}
}
func initAwsProvider(profile string, region string) *terraform.ResourceProvider {
p := aws.Provider()
cfg := map[string]interface{}{
"region": region,
"profile": profile,
}
rc, err := config.NewRawConfig(cfg)
if err != nil {
fmt.Printf("bad: %s\n", err)
os.Exit(1)
}
conf := terraform.NewResourceConfig(rc)
warns, errs := p.Validate(conf)
if len(warns) > 0 {
fmt.Printf("warnings: %s\n", warns)
}
if len(errs) > 0 {
fmt.Printf("errors: %s\n", errs)
os.Exit(1)
}
if err := p.Configure(conf); err != nil {
fmt.Printf("err: %s\n", err)
os.Exit(1)
}
return &p
}
|
package schemes
import "image/color"
// PBJ is a gradient color scheme from orange to purple.
var PBJ []color.Color
func init() {
PBJ = []color.Color{
color.RGBA{R: 0x29, G: 0xa, B: 0x59, A: 0xff},
color.RGBA{R: 0x29, G: 0xa, B: 0x59, A: 0xff},
color.RGBA{R: 0x2a, G: 0xa, B: 0x59, A: 0xff},
color.RGBA{R: 0x2a, G: 0xa, B: 0x59, A: 0xff},
color.RGBA{R: 0x2a, G: 0xa, B: 0x58, A: 0xff},
color.RGBA{R: 0x2b, G: 0xa, B: 0x58, A: 0xff},
color.RGBA{R: 0x2b, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2b, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2c, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2c, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2d, G: 0xa, B: 0x59, A: 0xff},
color.RGBA{R: 0x2e, G: 0xa, B: 0x58, A: 0xff},
color.RGBA{R: 0x2e, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2f, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2f, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x2f, G: 0x9, B: 0x58, A: 0xff},
color.RGBA{R: 0x30, G: 0x8, B: 0x58, A: 0xff},
color.RGBA{R: 0x30, G: 0x8, B: 0x57, A: 0xff},
color.RGBA{R: 0x31, G: 0x8, B: 0x57, A: 0xff},
color.RGBA{R: 0x31, G: 0x8, B: 0x57, A: 0xff},
color.RGBA{R: 0x31, G: 0x7, B: 0x57, A: 0xff},
color.RGBA{R: 0x32, G: 0x7, B: 0x57, A: 0xff},
color.RGBA{R: 0x32, G: 0x7, B: 0x57, A: 0xff},
color.RGBA{R: 0x33, G: 0x7, B: 0x56, A: 0xff},
color.RGBA{R: 0x33, G: 0x6, B: 0x56, A: 0xff},
color.RGBA{R: 0x35, G: 0x7, B: 0x56, A: 0xff},
color.RGBA{R: 0x35, G: 0x7, B: 0x56, A: 0xff},
color.RGBA{R: 0x36, G: 0x7, B: 0x56, A: 0xff},
color.RGBA{R: 0x36, G: 0x6, B: 0x55, A: 0xff},
color.RGBA{R: 0x37, G: 0x6, B: 0x55, A: 0xff},
color.RGBA{R: 0x37, G: 0x6, B: 0x55, A: 0xff},
color.RGBA{R: 0x38, G: 0x5, B: 0x55, A: 0xff},
color.RGBA{R: 0x38, G: 0x5, B: 0x55, A: 0xff},
color.RGBA{R: 0x39, G: 0x5, B: 0x54, A: 0xff},
color.RGBA{R: 0x39, G: 0x5, B: 0x54, A: 0xff},
color.RGBA{R: 0x3a, G: 0x4, B: 0x54, A: 0xff},
color.RGBA{R: 0x3b, G: 0x4, B: 0x54, A: 0xff},
color.RGBA{R: 0x3b, G: 0x5, B: 0x54, A: 0xff},
color.RGBA{R: 0x3c, G: 0x4, B: 0x54, A: 0xff},
color.RGBA{R: 0x3c, G: 0x4, B: 0x54, A: 0xff},
color.RGBA{R: 0x3d, G: 0x4, B: 0x54, A: 0xff},
color.RGBA{R: 0x3d, G: 0x4, B: 0x53, A: 0xff},
color.RGBA{R: 0x3e, G: 0x3, B: 0x53, A: 0xff},
color.RGBA{R: 0x3f, G: 0x3, B: 0x53, A: 0xff},
color.RGBA{R: 0x3f, G: 0x3, B: 0x53, A: 0xff},
color.RGBA{R: 0x40, G: 0x3, B: 0x52, A: 0xff},
color.RGBA{R: 0x40, G: 0x3, B: 0x52, A: 0xff},
color.RGBA{R: 0x41, G: 0x3, B: 0x52, A: 0xff},
color.RGBA{R: 0x42, G: 0x3, B: 0x52, A: 0xff},
color.RGBA{R: 0x43, G: 0x4, B: 0x52, A: 0xff},
color.RGBA{R: 0x44, G: 0x4, B: 0x52, A: 0xff},
color.RGBA{R: 0x45, G: 0x4, B: 0x52, A: 0xff},
color.RGBA{R: 0x45, G: 0x4, B: 0x51, A: 0xff},
color.RGBA{R: 0x46, G: 0x4, B: 0x51, A: 0xff},
color.RGBA{R: 0x47, G: 0x4, B: 0x51, A: 0xff},
color.RGBA{R: 0x47, G: 0x4, B: 0x50, A: 0xff},
color.RGBA{R: 0x48, G: 0x4, B: 0x50, A: 0xff},
color.RGBA{R: 0x49, G: 0x4, B: 0x50, A: 0xff},
color.RGBA{R: 0x49, G: 0x4, B: 0x4f, A: 0xff},
color.RGBA{R: 0x4b, G: 0x5, B: 0x50, A: 0xff},
color.RGBA{R: 0x4c, G: 0x5, B: 0x50, A: 0xff},
color.RGBA{R: 0x4d, G: 0x5, B: 0x4f, A: 0xff},
color.RGBA{R: 0x4d, G: 0x5, B: 0x4f, A: 0xff},
color.RGBA{R: 0x4e, G: 0x5, B: 0x4f, A: 0xff},
color.RGBA{R: 0x4f, G: 0x5, B: 0x4e, A: 0xff},
color.RGBA{R: 0x50, G: 0x5, B: 0x4e, A: 0xff},
color.RGBA{R: 0x50, G: 0x5, B: 0x4e, A: 0xff},
color.RGBA{R: 0x50, G: 0x5, B: 0x4d, A: 0xff},
color.RGBA{R: 0x51, G: 0x5, B: 0x4d, A: 0xff},
color.RGBA{R: 0x53, G: 0x6, B: 0x4c, A: 0xff},
color.RGBA{R: 0x53, G: 0x6, B: 0x4c, A: 0xff},
color.RGBA{R: 0x54, G: 0x6, B: 0x4c, A: 0xff},
color.RGBA{R: 0x55, G: 0x6, B: 0x4b, A: 0xff},
color.RGBA{R: 0x56, G: 0x6, B: 0x4b, A: 0xff},
color.RGBA{R: 0x57, G: 0x6, B: 0x4a, A: 0xff},
color.RGBA{R: 0x58, G: 0x6, B: 0x4a, A: 0xff},
color.RGBA{R: 0x58, G: 0x6, B: 0x49, A: 0xff},
color.RGBA{R: 0x59, G: 0x6, B: 0x49, A: 0xff},
color.RGBA{R: 0x5b, G: 0x7, B: 0x49, A: 0xff},
color.RGBA{R: 0x5c, G: 0x7, B: 0x49, A: 0xff},
color.RGBA{R: 0x5d, G: 0x7, B: 0x48, A: 0xff},
color.RGBA{R: 0x5e, G: 0x7, B: 0x48, A: 0xff},
color.RGBA{R: 0x5e, G: 0x7, B: 0x47, A: 0xff},
color.RGBA{R: 0x5f, G: 0x7, B: 0x47, A: 0xff},
color.RGBA{R: 0x60, G: 0x7, B: 0x46, A: 0xff},
color.RGBA{R: 0x60, G: 0x7, B: 0x46, A: 0xff},
color.RGBA{R: 0x61, G: 0x7, B: 0x45, A: 0xff},
color.RGBA{R: 0x63, G: 0x9, B: 0x46, A: 0xff},
color.RGBA{R: 0x64, G: 0x9, B: 0x45, A: 0xff},
color.RGBA{R: 0x65, G: 0xa, B: 0x45, A: 0xff},
color.RGBA{R: 0x66, G: 0xa, B: 0x44, A: 0xff},
color.RGBA{R: 0x67, G: 0xb, B: 0x43, A: 0xff},
color.RGBA{R: 0x68, G: 0xb, B: 0x43, A: 0xff},
color.RGBA{R: 0x69, G: 0xc, B: 0x42, A: 0xff},
color.RGBA{R: 0x6a, G: 0xd, B: 0x42, A: 0xff},
color.RGBA{R: 0x6b, G: 0xe, B: 0x42, A: 0xff},
color.RGBA{R: 0x6c, G: 0xf, B: 0x41, A: 0xff},
color.RGBA{R: 0x6d, G: 0x10, B: 0x40, A: 0xff},
color.RGBA{R: 0x6e, G: 0x10, B: 0x40, A: 0xff},
color.RGBA{R: 0x6f, G: 0x11, B: 0x3f, A: 0xff},
color.RGBA{R: 0x70, G: 0x12, B: 0x3e, A: 0xff},
color.RGBA{R: 0x71, G: 0x12, B: 0x3d, A: 0xff},
color.RGBA{R: 0x72, G: 0x13, B: 0x3d, A: 0xff},
color.RGBA{R: 0x73, G: 0x14, B: 0x3c, A: 0xff},
color.RGBA{R: 0x76, G: 0x16, B: 0x3c, A: 0xff},
color.RGBA{R: 0x77, G: 0x16, B: 0x3b, A: 0xff},
color.RGBA{R: 0x78, G: 0x16, B: 0x3a, A: 0xff},
color.RGBA{R: 0x78, G: 0x17, B: 0x3a, A: 0xff},
color.RGBA{R: 0x79, G: 0x18, B: 0x39, A: 0xff},
color.RGBA{R: 0x7a, G: 0x19, B: 0x38, A: 0xff},
color.RGBA{R: 0x7c, G: 0x1a, B: 0x37, A: 0xff},
color.RGBA{R: 0x7d, G: 0x1b, B: 0x36, A: 0xff},
color.RGBA{R: 0x7f, G: 0x1d, B: 0x36, A: 0xff},
color.RGBA{R: 0x80, G: 0x1e, B: 0x36, A: 0xff},
color.RGBA{R: 0x82, G: 0x1f, B: 0x35, A: 0xff},
color.RGBA{R: 0x83, G: 0x20, B: 0x34, A: 0xff},
color.RGBA{R: 0x84, G: 0x21, B: 0x33, A: 0xff},
color.RGBA{R: 0x85, G: 0x22, B: 0x32, A: 0xff},
color.RGBA{R: 0x86, G: 0x23, B: 0x31, A: 0xff},
color.RGBA{R: 0x87, G: 0x24, B: 0x30, A: 0xff},
color.RGBA{R: 0x89, G: 0x26, B: 0x30, A: 0xff},
color.RGBA{R: 0x8a, G: 0x27, B: 0x2f, A: 0xff},
color.RGBA{R: 0x8c, G: 0x28, B: 0x2e, A: 0xff},
color.RGBA{R: 0x8d, G: 0x29, B: 0x2e, A: 0xff},
color.RGBA{R: 0x8e, G: 0x2a, B: 0x2d, A: 0xff},
color.RGBA{R: 0x8f, G: 0x2a, B: 0x2c, A: 0xff},
color.RGBA{R: 0x90, G: 0x2b, B: 0x2b, A: 0xff},
color.RGBA{R: 0x91, G: 0x2c, B: 0x2a, A: 0xff},
color.RGBA{R: 0x92, G: 0x2d, B: 0x2a, A: 0xff},
color.RGBA{R: 0x95, G: 0x2f, B: 0x29, A: 0xff},
color.RGBA{R: 0x96, G: 0x30, B: 0x29, A: 0xff},
color.RGBA{R: 0x97, G: 0x31, B: 0x28, A: 0xff},
color.RGBA{R: 0x98, G: 0x32, B: 0x27, A: 0xff},
color.RGBA{R: 0x99, G: 0x33, B: 0x26, A: 0xff},
color.RGBA{R: 0x9a, G: 0x34, B: 0x26, A: 0xff},
color.RGBA{R: 0x9b, G: 0x35, B: 0x25, A: 0xff},
color.RGBA{R: 0x9d, G: 0x37, B: 0x24, A: 0xff},
color.RGBA{R: 0x9f, G: 0x39, B: 0x24, A: 0xff},
color.RGBA{R: 0xa0, G: 0x39, B: 0x23, A: 0xff},
color.RGBA{R: 0xa0, G: 0x3a, B: 0x22, A: 0xff},
color.RGBA{R: 0xa2, G: 0x3b, B: 0x21, A: 0xff},
color.RGBA{R: 0xa3, G: 0x3c, B: 0x21, A: 0xff},
color.RGBA{R: 0xa4, G: 0x3d, B: 0x20, A: 0xff},
color.RGBA{R: 0xa5, G: 0x3e, B: 0x1f, A: 0xff},
color.RGBA{R: 0xa7, G: 0x3f, B: 0x1e, A: 0xff},
color.RGBA{R: 0xa8, G: 0x41, B: 0x1e, A: 0xff},
color.RGBA{R: 0xa9, G: 0x42, B: 0x1d, A: 0xff},
color.RGBA{R: 0xaa, G: 0x43, B: 0x1d, A: 0xff},
color.RGBA{R: 0xac, G: 0x44, B: 0x1c, A: 0xff},
color.RGBA{R: 0xad, G: 0x45, B: 0x1b, A: 0xff},
color.RGBA{R: 0xae, G: 0x46, B: 0x1a, A: 0xff},
color.RGBA{R: 0xaf, G: 0x47, B: 0x1a, A: 0xff},
color.RGBA{R: 0xb0, G: 0x47, B: 0x19, A: 0xff},
color.RGBA{R: 0xb2, G: 0x49, B: 0x19, A: 0xff},
color.RGBA{R: 0xb3, G: 0x4a, B: 0x18, A: 0xff},
color.RGBA{R: 0xb4, G: 0x4b, B: 0x18, A: 0xff},
color.RGBA{R: 0xb5, G: 0x4c, B: 0x17, A: 0xff},
color.RGBA{R: 0xb6, G: 0x4d, B: 0x17, A: 0xff},
color.RGBA{R: 0xb7, G: 0x4e, B: 0x17, A: 0xff},
color.RGBA{R: 0xb8, G: 0x4f, B: 0x16, A: 0xff},
color.RGBA{R: 0xba, G: 0x50, B: 0x16, A: 0xff},
color.RGBA{R: 0xbb, G: 0x51, B: 0x15, A: 0xff},
color.RGBA{R: 0xbc, G: 0x52, B: 0x15, A: 0xff},
color.RGBA{R: 0xbd, G: 0x53, B: 0x15, A: 0xff},
color.RGBA{R: 0xbe, G: 0x53, B: 0x14, A: 0xff},
color.RGBA{R: 0xbf, G: 0x54, B: 0x14, A: 0xff},
color.RGBA{R: 0xc0, G: 0x55, B: 0x13, A: 0xff},
color.RGBA{R: 0xc0, G: 0x56, B: 0x13, A: 0xff},
color.RGBA{R: 0xc1, G: 0x57, B: 0x12, A: 0xff},
color.RGBA{R: 0xc2, G: 0x57, B: 0x12, A: 0xff},
color.RGBA{R: 0xc4, G: 0x59, B: 0x12, A: 0xff},
color.RGBA{R: 0xc4, G: 0x5a, B: 0x12, A: 0xff},
color.RGBA{R: 0xc5, G: 0x5a, B: 0x12, A: 0xff},
color.RGBA{R: 0xc6, G: 0x5a, B: 0x12, A: 0xff},
color.RGBA{R: 0xc7, G: 0x5b, B: 0x12, A: 0xff},
color.RGBA{R: 0xc8, G: 0x5c, B: 0x12, A: 0xff},
color.RGBA{R: 0xc9, G: 0x5d, B: 0x12, A: 0xff},
color.RGBA{R: 0xca, G: 0x5d, B: 0x12, A: 0xff},
color.RGBA{R: 0xcb, G: 0x5e, B: 0x12, A: 0xff},
color.RGBA{R: 0xcc, G: 0x60, B: 0x13, A: 0xff},
color.RGBA{R: 0xcc, G: 0x60, B: 0x13, A: 0xff},
color.RGBA{R: 0xcd, G: 0x61, B: 0x13, A: 0xff},
color.RGBA{R: 0xce, G: 0x62, B: 0x13, A: 0xff},
color.RGBA{R: 0xcf, G: 0x63, B: 0x13, A: 0xff},
color.RGBA{R: 0xd0, G: 0x63, B: 0x13, A: 0xff},
color.RGBA{R: 0xd1, G: 0x64, B: 0x13, A: 0xff},
color.RGBA{R: 0xd2, G: 0x64, B: 0x13, A: 0xff},
color.RGBA{R: 0xd3, G: 0x64, B: 0x13, A: 0xff},
color.RGBA{R: 0xd4, G: 0x66, B: 0x14, A: 0xff},
color.RGBA{R: 0xd5, G: 0x67, B: 0x14, A: 0xff},
color.RGBA{R: 0xd6, G: 0x67, B: 0x14, A: 0xff},
color.RGBA{R: 0xd6, G: 0x68, B: 0x14, A: 0xff},
color.RGBA{R: 0xd7, G: 0x69, B: 0x14, A: 0xff},
color.RGBA{R: 0xd7, G: 0x69, B: 0x14, A: 0xff},
color.RGBA{R: 0xd8, G: 0x6a, B: 0x14, A: 0xff},
color.RGBA{R: 0xd9, G: 0x6b, B: 0x14, A: 0xff},
color.RGBA{R: 0xda, G: 0x6b, B: 0x14, A: 0xff},
color.RGBA{R: 0xdb, G: 0x6c, B: 0x14, A: 0xff},
color.RGBA{R: 0xdc, G: 0x6d, B: 0x15, A: 0xff},
color.RGBA{R: 0xdd, G: 0x6d, B: 0x15, A: 0xff},
color.RGBA{R: 0xde, G: 0x6e, B: 0x15, A: 0xff},
color.RGBA{R: 0xde, G: 0x6f, B: 0x15, A: 0xff},
color.RGBA{R: 0xdf, G: 0x6f, B: 0x15, A: 0xff},
color.RGBA{R: 0xe0, G: 0x70, B: 0x15, A: 0xff},
color.RGBA{R: 0xe1, G: 0x71, B: 0x15, A: 0xff},
color.RGBA{R: 0xe2, G: 0x71, B: 0x15, A: 0xff},
color.RGBA{R: 0xe3, G: 0x72, B: 0x15, A: 0xff},
color.RGBA{R: 0xe3, G: 0x72, B: 0x15, A: 0xff},
color.RGBA{R: 0xe4, G: 0x73, B: 0x16, A: 0xff},
color.RGBA{R: 0xe5, G: 0x74, B: 0x16, A: 0xff},
color.RGBA{R: 0xe5, G: 0x74, B: 0x16, A: 0xff},
color.RGBA{R: 0xe6, G: 0x75, B: 0x16, A: 0xff},
color.RGBA{R: 0xe7, G: 0x75, B: 0x16, A: 0xff},
color.RGBA{R: 0xe7, G: 0x76, B: 0x16, A: 0xff},
color.RGBA{R: 0xe8, G: 0x77, B: 0x16, A: 0xff},
color.RGBA{R: 0xe9, G: 0x77, B: 0x16, A: 0xff},
color.RGBA{R: 0xea, G: 0x78, B: 0x16, A: 0xff},
color.RGBA{R: 0xea, G: 0x78, B: 0x16, A: 0xff},
color.RGBA{R: 0xeb, G: 0x79, B: 0x16, A: 0xff},
color.RGBA{R: 0xec, G: 0x79, B: 0x16, A: 0xff},
color.RGBA{R: 0xed, G: 0x7a, B: 0x17, A: 0xff},
color.RGBA{R: 0xed, G: 0x7a, B: 0x17, A: 0xff},
color.RGBA{R: 0xee, G: 0x7b, B: 0x17, A: 0xff},
color.RGBA{R: 0xef, G: 0x7c, B: 0x17, A: 0xff},
color.RGBA{R: 0xef, G: 0x7c, B: 0x17, A: 0xff},
color.RGBA{R: 0xf0, G: 0x7d, B: 0x17, A: 0xff},
color.RGBA{R: 0xf0, G: 0x7d, B: 0x17, A: 0xff},
color.RGBA{R: 0xf1, G: 0x7e, B: 0x17, A: 0xff},
color.RGBA{R: 0xf1, G: 0x7e, B: 0x17, A: 0xff},
color.RGBA{R: 0xf2, G: 0x7f, B: 0x17, A: 0xff},
color.RGBA{R: 0xf3, G: 0x7f, B: 0x17, A: 0xff},
color.RGBA{R: 0xf3, G: 0x80, B: 0x17, A: 0xff},
color.RGBA{R: 0xf4, G: 0x80, B: 0x18, A: 0xff},
color.RGBA{R: 0xf4, G: 0x80, B: 0x18, A: 0xff},
color.RGBA{R: 0xf5, G: 0x81, B: 0x18, A: 0xff},
color.RGBA{R: 0xf6, G: 0x81, B: 0x18, A: 0xff},
color.RGBA{R: 0xf6, G: 0x82, B: 0x18, A: 0xff},
color.RGBA{R: 0xf7, G: 0x82, B: 0x18, A: 0xff},
color.RGBA{R: 0xf7, G: 0x83, B: 0x18, A: 0xff},
color.RGBA{R: 0xf8, G: 0x83, B: 0x18, A: 0xff},
color.RGBA{R: 0xf9, G: 0x83, B: 0x18, A: 0xff},
color.RGBA{R: 0xf9, G: 0x84, B: 0x18, A: 0xff},
color.RGBA{R: 0xfa, G: 0x84, B: 0x18, A: 0xff},
color.RGBA{R: 0xfa, G: 0x85, B: 0x18, A: 0xff},
color.RGBA{R: 0xfa, G: 0x85, B: 0x18, A: 0xff},
color.RGBA{R: 0xfa, G: 0x85, B: 0x18, A: 0xff},
color.RGBA{R: 0xfb, G: 0x86, B: 0x18, A: 0xff},
color.RGBA{R: 0xfb, G: 0x86, B: 0x19, A: 0xff},
color.RGBA{R: 0xfc, G: 0x87, B: 0x19, A: 0xff},
color.RGBA{R: 0xfc, G: 0x87, B: 0x19, A: 0xff},
color.RGBA{R: 0xfd, G: 0x87, B: 0x19, A: 0xff},
color.RGBA{R: 0xfd, G: 0x88, B: 0x19, A: 0xff},
color.RGBA{R: 0xfd, G: 0x88, B: 0x19, A: 0xff},
color.RGBA{R: 0xfe, G: 0x88, B: 0x19, A: 0xff},
color.RGBA{R: 0xfe, G: 0x88, B: 0x19, A: 0xff},
color.RGBA{R: 0xff, G: 0x89, B: 0x19, A: 0xff},
}
}
|
package gouldian_test
import (
"encoding/json"
"errors"
"net/http"
"testing"
µ "github.com/fogfish/gouldian/v2"
"github.com/fogfish/gouldian/v2/mock"
"github.com/fogfish/it/v2"
)
func TestHTTP(t *testing.T) {
foo := mock.Endpoint(
µ.HTTP(
http.MethodGet,
µ.URI(µ.Path("foo")),
),
)
req := mock.Input(mock.URL("/foo"))
err := foo(req)
it.Then(t).Should(
it.Nil(err),
)
}
func TestMethod(t *testing.T) {
spec := []struct {
Verb func(µ.Routable, ...µ.Endpoint) µ.Routable
Mock mock.Mock
}{
{µ.GET, mock.Method("GET")},
{µ.PUT, mock.Method("PUT")},
{µ.POST, mock.Method("POST")},
{µ.DELETE, mock.Method("DELETE")},
{µ.PATCH, mock.Method("PATCH")},
{µ.ANY, mock.Method("GET")},
{µ.ANY, mock.Method("PUT")},
}
for _, tt := range spec {
foo := mock.Endpoint(
tt.Verb(
µ.URI(µ.Path("foo")),
),
)
req := mock.Input(tt.Mock, mock.URL("/foo"))
err := foo(req)
it.Then(t).Should(
it.Nil(err),
)
}
}
func TestMethodNoMatch(t *testing.T) {
spec := []struct {
Verb func(µ.Routable, ...µ.Endpoint) µ.Routable
Mock mock.Mock
}{
{µ.GET, mock.Method("OTHER")},
{µ.PUT, mock.Method("OTHER")},
{µ.POST, mock.Method("OTHER")},
{µ.DELETE, mock.Method("OTHER")},
{µ.PATCH, mock.Method("OTHER")},
}
for _, tt := range spec {
foo := mock.Endpoint(
tt.Verb(
µ.URI(µ.Path("foo")),
),
)
req := mock.Input(tt.Mock, mock.URL("/foo"))
err := foo(req)
it.Then(t).ShouldNot(
it.Nil(err),
)
}
}
func TestBodyJson(t *testing.T) {
type foobar struct {
Foo string `json:"foo"`
Bar int `json:"bar"`
}
spec := []struct {
Mock *µ.Context
Expect foobar
}{
{
mock.Input(
mock.Header("Content-Type", "application/json"),
mock.JSON(foobar{"foo1", 10}),
),
foobar{"foo1", 10},
},
{
mock.Input(
mock.Header("Content-Type", "application/json"),
mock.Text(`{"foo":"foo1","bar":10}`),
),
foobar{"foo1", 10},
},
}
type request struct {
FooBar foobar `content:"json"`
}
var lens = µ.Optics1[request, foobar]()
for _, tt := range spec {
var req request
foo := mock.Endpoint(µ.GET(µ.URI(), µ.Body(lens)))
err := foo(tt.Mock)
it.Then(t).Should(
it.Nil(err),
it.Nil(µ.FromContext(tt.Mock, &req)),
it.Equiv(req.FooBar, tt.Expect),
)
}
}
func TestBodyJsonNoMatch(t *testing.T) {
type foobar struct {
Foo string `json:"foo"`
Bar int `json:"bar"`
}
spec := []struct {
Mock *µ.Context
}{
{
mock.Input(
mock.Header("Content-Type", "application/json"),
mock.Text(`{"foo:"foo1,"bar":10}`),
),
},
}
type request struct {
FooBar foobar `content:"json"`
}
var lens = µ.Optics1[request, foobar]()
for _, tt := range spec {
var req request
foo := mock.Endpoint(µ.GET(µ.URI(), µ.Body(lens)))
err := foo(tt.Mock)
it.Then(t).
Should(it.Nil(err)).
ShouldNot(it.Nil(µ.FromContext(tt.Mock, &req)))
}
}
func TestBodyForm(t *testing.T) {
type foobar struct {
Foo string `json:"foo"`
Bar int `json:"bar"`
}
spec := []struct {
Mock *µ.Context
Expect foobar
}{
{
mock.Input(
mock.Header("Content-Type", "application/x-www-form-urlencoded"),
mock.Text("foo=foo1&bar=10"),
),
foobar{"foo1", 10},
},
}
type request struct {
FooBar foobar `content:"form"`
}
var lens = µ.Optics1[request, foobar]()
for _, tt := range spec {
var req request
foo := mock.Endpoint(µ.GET(µ.URI(), µ.Body(lens)))
err := foo(tt.Mock)
it.Then(t).Should(
it.Nil(err),
it.Nil(µ.FromContext(tt.Mock, &req)),
it.Equiv(req.FooBar, tt.Expect),
)
}
}
func TestBodyFormNoMatch(t *testing.T) {
type foobar struct {
Foo string `json:"foo"`
Bar int `json:"bar"`
}
spec := []struct {
Mock *µ.Context
}{
{
mock.Input(
mock.Header("Content-Type", "application/x-www-form-urlencoded"),
mock.Text("foobar"),
),
},
}
type request struct {
FooBar foobar `content:"form"`
}
var lens = µ.Optics1[request, foobar]()
for _, tt := range spec {
var req request
foo := mock.Endpoint(µ.GET(µ.URI(), µ.Body(lens)))
err := foo(tt.Mock)
it.Then(t).
Should(it.Nil(err)).
ShouldNot(it.Nil(µ.FromContext(tt.Mock, &req)))
}
}
func TestBodyText(t *testing.T) {
spec := []struct {
Mock *µ.Context
Expect string
}{
{
mock.Input(
mock.Header("Content-Type", "text/plain"),
mock.Text("foobar"),
),
"foobar",
},
}
type request struct {
FooBar string
}
var lens = µ.Optics1[request, string]()
for _, tt := range spec {
var req request
foo := mock.Endpoint(µ.GET(µ.URI(), µ.Body(lens)))
err := foo(tt.Mock)
it.Then(t).Should(
it.Nil(err),
it.Nil(µ.FromContext(tt.Mock, &req)),
it.Equiv(req.FooBar, tt.Expect),
)
}
}
func TestFMapSuccess(t *testing.T) {
type T struct{ A string }
a := µ.Optics1[T, string]()
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo"), µ.Path(a)),
µ.FMap(func(ctx *µ.Context, t *T) error {
out := µ.NewOutput(http.StatusOK)
out.Body = t.A
return out
}),
),
)
req := mock.Input(mock.URL("/foo/bar"))
err := foo(req)
it.Then(t).Should(
mock.CheckOutput(err, "bar"),
)
}
func TestFMapFailure(t *testing.T) {
type T struct{ A string }
a := µ.Optics1[T, string]()
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo"), µ.Path(a)),
µ.FMap(func(*µ.Context, *T) error {
out := µ.NewOutput(http.StatusUnauthorized)
out.SetIssue(errors.New(""))
return out
}),
),
)
req := mock.Input(mock.URL("/foo/bar"))
err := foo(req)
it.Then(t).Should(
mock.CheckStatusCode(err, http.StatusUnauthorized),
)
}
func TestMapSuccess(t *testing.T) {
type T struct{ A string }
a := µ.Optics1[T, string]()
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo"), µ.Path(a)),
µ.Map(func(ctx *µ.Context, t *T) (*T, error) { return t, nil }),
),
)
req := mock.Input(mock.URL("/foo/bar"))
err := foo(req)
it.Then(t).Should(
mock.CheckOutput(err, `{"A":"bar"}`),
)
}
func TestContextFree(t *testing.T) {
foo := mock.Endpoint(µ.GET(µ.URI(µ.Path("foo"))))
req := mock.Input(mock.URL("/foo"))
err := foo(req)
it.Then(t).Should(it.Nil(err))
req.Free()
err = foo(req)
it.Then(t).ShouldNot(it.Nil(err))
}
func TestOutputFree(t *testing.T) {
out := µ.NewOutput(200)
out.SetHeader("X-Foo", "bar")
out.Body = "test"
it.Then(t).Should(
it.Equal(out.Status, 200),
it.Equal(out.GetHeader("X-Foo"), "bar"),
it.Equal(out.Body, "test"),
)
out.Free()
it.Then(t).Should(
it.Equal(out.GetHeader("X-Foo"), ""),
it.Equal(out.Body, ""),
)
}
func TestHandlerSuccess(t *testing.T) {
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo")),
mock.Output(http.StatusOK, "bar"),
),
)
req := mock.Input(mock.URL("/foo"))
err := foo(req)
it.Then(t).Should(
mock.CheckOutput(err, "bar"),
)
}
func TestHandler2Success(t *testing.T) {
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo")),
mock.Output(http.StatusOK, "bar"),
),
)
bar := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("bar")),
mock.Output(http.StatusOK, "foo"),
),
)
req := mock.Input(mock.URL("/foo"))
err := µ.Endpoints{foo, bar}.Or(req)
it.Then(t).Should(
mock.CheckOutput(err, "bar"),
)
}
func TestHandlerFailure(t *testing.T) {
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo")),
func(*µ.Context) error {
out := µ.NewOutput(http.StatusUnauthorized)
out.SetIssue(errors.New(""))
return out
},
),
)
req := mock.Input(mock.URL("/foo"))
err := foo(req)
it.Then(t).Should(
mock.CheckStatusCode(err, http.StatusUnauthorized),
)
}
func TestMapFailure(t *testing.T) {
type T struct{ A string }
a := µ.Optics1[T, string]()
foo := mock.Endpoint(
µ.GET(
µ.URI(µ.Path("foo"), µ.Path(a)),
µ.Map(func(*µ.Context, *T) (*T, error) {
out := µ.NewOutput(http.StatusUnauthorized)
out.SetIssue(errors.New(""))
return nil, out
}),
),
)
req := mock.Input(mock.URL("/foo/bar"))
err := foo(req)
it.Then(t).Should(
mock.CheckStatusCode(err, http.StatusUnauthorized),
)
}
func TestBodyLeak(t *testing.T) {
type Pair struct {
Key int `json:"key,omitempty"`
Val string `json:"val,omitempty"`
}
type Item struct {
Seq []Pair `json:"seq,omitempty"`
}
type request struct {
Item Item
}
lens := µ.Optics1[request, Item]()
endpoint := func() µ.Routable {
return µ.GET(
µ.URI(),
µ.Body(lens),
func(ctx *µ.Context) error {
var req request
if err := µ.FromContext(ctx, &req); err != nil {
return err
}
seq := []Pair{}
for key, val := range req.Item.Seq {
if val.Key == 0 {
seq = append(seq, Pair{Key: key + 1, Val: val.Val})
}
}
req.Item = Item{Seq: seq}
out := µ.NewOutput(http.StatusOK)
val, _ := json.Marshal(req.Item)
out.Body = string(val)
return out
},
)
}
foo := mock.Endpoint(endpoint())
for val, expect := range map[string]string{
"{\"seq\":[{\"val\":\"a\"},{\"val\":\"b\"}]}": "{\"seq\":[{\"key\":1,\"val\":\"a\"},{\"key\":2,\"val\":\"b\"}]}",
"{\"seq\":[{\"val\":\"c\"}]}": "{\"seq\":[{\"key\":1,\"val\":\"c\"}]}",
"{\"seq\":[{\"val\":\"d\"},{\"val\":\"e\"},{\"val\":\"f\"}]}": "{\"seq\":[{\"key\":1,\"val\":\"d\"},{\"key\":2,\"val\":\"e\"},{\"key\":3,\"val\":\"f\"}]}",
} {
req := mock.Input(
mock.Method("GET"),
mock.Header("Content-Type", "application/json"),
mock.Text(val),
)
out := foo(req)
it.Then(t).Should(
it.Equal(out.Error(), expect),
)
}
}
|
package functions
import (
"sort"
)
// Sort works similar to sort.SliceType(). However, unlike sort.SliceType the
// slice returned will be reallocated as to not modify the input slice.
//
// See Reverse() and AreSorted().
func (ss SliceType) Sort() SliceType {
// Avoid the allocation. If there is one element or less it is already
// sorted.
if len(ss) < 2 {
return ss
}
sorted := make(SliceType, len(ss))
copy(sorted, ss)
sort.Slice(sorted, func(i, j int) bool {
return sorted[i] < sorted[j]
})
return sorted
}
|
/*
Task
The input consists of a JSON object, where every value is an object (eventually empty), representing a directory structure. The output must be a list of the corresponding root-to-leaf paths.
Inspired by this comment on StackOverflow.
Input specifications
You can assume that that the input always contains a JSON object.
The input can be a empty JSON object ({}); in this case the output must be a empty list.
You can assume that the names/keys contain only printable ASCII characters, and they do not contain \0, \, /, ", ', nor `.
You can assume each JSON object does not contain duplicate names/keys.
Input format
The input can be:
a string;
a dictionary or an associative array in a language of your choice;
a list or array of tuples, where each tuples contains the name/key and the value (which is itself a list of tuples).
Output specifications
There is no need to escape any character.
You can use as directory separator either / or \, but you cannot have a mixed use of both (e.g. a/b/c and a\b\c are both valid, but a/b\c and a\b/c are not).
Each path can have a leading and/or trailing directory separator (e.g. a/b, /a/b, a/b/, and /a/b/ are equally valid).
If you output a newline-separated list, the output can have a trailing newline.
The paths must be in the same order of the input.
Test cases
Input 1:
{
"animal": {
"cat": {"Persian": {}, "British_Shorthair": {}},
"dog": {"Pug": {}, "Pitbull": {}}
},
"vehicle": {
"car": {"Mercedes": {}, "BMW": {}}
}
}
Output 1:
animal/cat/Persian
animal/cat/British_Shorthair
animal/dog/Pug
animal/dog/Pitbull
vehicle/car/Mercedes
vehicle/car/BMW
Input 2
{
"bin": {
"ls": {}
},
"home": {},
"usr": {
"bin": {
"ls": {}
},
"include": {
"sys": {}
},
"share": {}
}
}
Output 2:
/bin/ls
/home
/usr/bin/ls
/usr/include/sys
/usr/share
*/
package main
import (
"encoding/json"
"fmt"
"log"
"path"
"sort"
)
func main() {
test(data1)
test(data2)
}
func test(data string) {
fmt.Printf("%s\n", data)
paths, err := convert(data)
if err != nil {
log.Fatal(err)
}
for _, path := range paths {
fmt.Println(path)
}
}
func convert(data string) (paths []string, err error) {
var root object
err = json.Unmarshal([]byte(data), &root)
if err != nil {
return
}
walk(&paths, root, "")
sort.Strings(paths)
return
}
func walk(result *[]string, object object, name string) {
if len(object) == 0 {
if name != "" {
*result = append(*result, name)
return
}
}
for label, child := range object {
nextname := path.Join(name, label)
walk(result, child, nextname)
}
}
type object map[string]object
const data1 = `
{
"animal": {
"cat": {"Persian": {}, "British_Shorthair": {}},
"dog": {"Pug": {}, "Pitbull": {}}
},
"vehicle": {
"car": {"Mercedes": {}, "BMW": {}}
}
}
`
const data2 = `
{
"bin": {
"ls": {}
},
"home": {},
"usr": {
"bin": {
"ls": {}
},
"include": {
"sys": {}
},
"share": {}
}
}
`
|
package service
import (
"Seaman/model"
"github.com/go-xorm/xorm"
"github.com/kataras/iris/v12"
)
/**
* 管理员服务
* 标准的开发模式将每个实体的提供的功能以接口标准的形式定义,供控制层进行调用。
*
*/
type SecurityService interface {
//通过管理员用户名+密码 获取管理员实体 如果查询到,返回管理员实体,并返回true
//否则 返回 nil ,false
GetByAdminNameAndPassword(username, password string) (model.TplUserT, bool)
GetByCurrentUserId(currentUserId int64) (model.TplUserT, bool)
//获取管理员总数
GetAdminCount() (int64, error)
SaveAvatarImg(adminId int64, fileName string) bool
GetAdminList(offset, limit int) []*model.TplUserT
}
func NewSecurityService(db *xorm.Engine) SecurityService {
return &securityService{
engine: db,
}
}
/**
* 管理员的服务实现结构体
*/
type securityService struct {
engine *xorm.Engine
}
/**
* 查询管理员总数
*/
func (ac *securityService) GetAdminCount() (int64, error) {
count, err := ac.engine.Count(new(model.TplUserT))
if err != nil {
panic(err.Error())
return 0, err
}
return count, nil
}
/**
* 通过用户名和密码查询管理员
*/
func (ac *securityService) GetByAdminNameAndPassword(username, password string) (model.TplUserT, bool) {
var user model.TplUserT
ac.engine.Where(" account = ? and password = ? ", username, password).Get(&user)
return user, user.Id != 0
}
/**
* 查询管理员信息
*/
func (ac *securityService) GetByCurrentUserId(currentUserId int64) (model.TplUserT, bool) {
var user model.TplUserT
ac.engine.Id(currentUserId).Get(&user)
return user, user.Id != 0
}
/**
* 保存头像信息
*/
func (ac *securityService) SaveAvatarImg(adminId int64, fileName string) bool {
user := model.TplUserT{BackUp: fileName}
_, err := ac.engine.Id(adminId).Cols(" avatar ").Update(&user)
return err != nil
}
/**
* 获取管理员列表
* offset:获取管理员的便宜量
* limit:请求管理员的条数
*/
func (ac *securityService) GetAdminList(offset, limit int) []*model.TplUserT {
var adminList []*model.TplUserT
err := ac.engine.Limit(limit, offset).Find(&adminList)
if err != nil {
iris.New().Logger().Error(err.Error())
panic(err.Error())
return nil
}
return adminList
}
|
package aggregation
import (
"github.com/emicklei/go-restful"
api "github.com/emicklei/go-restful-openapi"
. "grm-service/dbcentral/pg"
. "grm-service/util"
"titan-statistics/dbcentral/etcd"
"titan-statistics/dbcentral/pg"
. "titan-statistics/types"
)
type AggrSvc struct {
SysDB *pg.SystemDB
MetaDB *pg.MetaDB
DynamicDB *etcd.DynamicDB
DataDir string
ConfigDir string
DataIdConns map[string]*ConnConfig
}
// WebService creates a new service that can handle REST requests for resources.
func (s AggrSvc) WebService() *restful.WebService {
ws := new(restful.WebService)
ws.Path("/aggr").
//Consumes(restful.MIME_JSON, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_JSON)
tags := []string{TR("aggregation info manage")}
//聚合条件
ws.Route(ws.POST("/data_type/{type}").To(s.SetAggr).
Doc(TR("aggregation by data type")).
Param(ws.PathParameter("type", "data type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Reads(TypeAggr{}))
ws.Route(ws.GET("/data_type/{type}").To(s.GetAggr).
Doc(TR("aggregation by data type")).
Param(ws.PathParameter("type", "data type name").DataType("string")).
Metadata(api.KeyOpenAPITags, tags).
Writes(TypeAggr{}))
//获取这个类型的聚合条件下的值列表
// ws.Route(ws.GET("/data_type/{type}/field/{field}/stat").To(s.StatByAggr).
// Doc(TR("aggregation by data type")).
// Param(ws.PathParameter("type", "data type name").DataType("string")).
// Param(ws.PathParameter("field", "stat filed").DataType("string")).
// Metadata(api.KeyOpenAPITags, tags).
// Writes(DistinctValues{}))
return ws
}
|
package model
import (
"time"
"github.com/google/uuid"
"gorm.io/gorm"
)
type User struct {
ID string `json:"-" gorm:"primaryKey"`
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty" gorm:"type:varchar(100);unique_index"`
Gender string `json:"gender"`
Password string `json:"-"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
DeletedAt gorm.DeletedAt `json:"-" sql:"index"`
}
func (i *User) BeforeCreate(tx *gorm.DB) error {
i.ID = uuid.New().String()
return nil
}
|
package main
import (
"bytes"
"context"
"crypto/sha256"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
)
func TestSuitePC(t *testing.T) {
var (
key = "fc58161e6b0da8e0cae8248f40141165"
adminUser = fmt.Sprintf("%x", sha256.Sum256([]byte("admin")))
adminPassword = fmt.Sprintf("%x", sha256.Sum256([]byte("admin")))
pcUsername = fmt.Sprintf("%x", sha256.Sum256([]byte("username")))
pcPassword = fmt.Sprintf("%x", sha256.Sum256([]byte("passwd")))
)
client, err := setupMongodb("localhost:27017")
assert.Nil(t, err)
client.Database("test_remote_pc").Drop(context.TODO())
wsController := NewWsController("admin", "admin", "localhost:27017", "test_remote_pc")
server := httptest.NewServer(wsController.routes())
defer server.Close()
t.Run("RegisterNewPC", func(t *testing.T) {
url := server.URL + "/create_pc/" + key
var jsonStr = []byte(fmt.Sprintf(`{
"username": "%s",
"password": "%s",
"key": "%s"
}`, pcUsername, pcPassword, key))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
authHeader := http.Header{"X-Username": []string{adminUser}, "X-Password": []string{adminPassword}}
req.Header = authHeader
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Nil(t, err)
assert.Equal(t, http.StatusCreated, resp.StatusCode)
})
t.Run("FailToRegisterPcAlreadyRegistered", func(t *testing.T) {
url := server.URL + "/create_pc/" + key
var jsonStr = []byte(fmt.Sprintf(`{
"username": "%s",
"password": "%s",
"key": "%s"
}`, pcUsername, pcPassword, key))
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
authHeader := http.Header{"X-Username": []string{adminUser}, "X-Password": []string{adminPassword}}
req.Header = authHeader
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
assert.Nil(t, err)
defer resp.Body.Close()
assert.Nil(t, err)
assert.Equal(t, http.StatusBadRequest, resp.StatusCode)
})
t.Run("CreateAuthenticatedConnection", func(t *testing.T) {
url := "ws" + strings.TrimPrefix(server.URL, "http") + "/connect/" + key
authHeader := http.Header{"X-Username": []string{pcUsername}, "X-Password": []string{pcPassword}}
wsPcConn, response, err := websocket.DefaultDialer.Dial(url, authHeader)
assert.Equal(t, http.StatusSwitchingProtocols, response.StatusCode)
assert.Nil(t, err)
defer wsPcConn.Close()
})
t.Run("CreateUnauthenticatedConnectionFail", func(t *testing.T) {
url := "ws" + strings.TrimPrefix(server.URL, "http") + "/connect/" + key
// authHeader := http.Header{"X-Username": []string{"username"}, "X-Password": []string{"passwd"}}
_, response, err := websocket.DefaultDialer.Dial(url, nil)
assert.Equal(t, http.StatusForbidden, response.StatusCode)
assert.NotNil(t, err)
})
}
|
package main
import (
"container/heap"
"fmt"
)
// 1091. 二进制矩阵中的最短路径
// 在一个 N × N 的方形网格中,每个单元格有两种状态:空(0)或者阻塞(1)。
// 一条从左上角到右下角、长度为 k 的畅通路径,由满足下述条件的单元格 C_1, C_2, ..., C_k 组成:
// 相邻单元格 C_i 和 C_{i+1} 在八个方向之一上连通(此时,C_i 和 C_{i+1} 不同且共享边或角)
// C_1 位于 (0, 0)(即,值为 grid[0][0])
// C_k 位于 (N-1, N-1)(即,值为 grid[N-1][N-1])
// 如果 C_i 位于 (r, c),则 grid[r][c] 为空(即,grid[r][c] == 0)
// 返回这条从左上角到右下角的最短畅通路径的长度。如果不存在这样的路径,返回 -1 。
// 提示:
// 1 <= grid.length == grid[0].length <= 100
// grid[i][j] 为 0 或 1
// https://leetcode-cn.com/problems/shortest-path-in-binary-matrix/
func main() {
// 4
fmt.Println(shortestPathBinaryMatrix2([][]int{{0, 0, 0}, {1, 1, 0}, {1, 1, 0}}))
// 6
fmt.Println(shortestPathBinaryMatrix2([][]int{{0, 1, 0, 1, 0}, {1, 0, 0, 0, 1}, {0, 0, 1, 1, 1}, {0, 0, 0, 0, 0}, {1, 0, 1, 0, 0}}))
//10
fmt.Println(shortestPathBinaryMatrix2([][]int{{0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 1, 1, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 1, 1, 0, 0, 1}, {0, 0, 1, 0, 0, 1, 0, 0, 1}, {0, 1, 0, 1, 0, 0, 1, 1, 0}, {0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 1, 0, 1, 0, 0, 1, 0, 0}, {0, 1, 1, 0, 0, 0, 0, 1, 0}}))
// 7
fmt.Println(shortestPathBinaryMatrix2([][]int{{0, 0, 0, 0, 1, 1}, {0, 1, 0, 0, 1, 0}, {1, 1, 0, 1, 0, 0}, {0, 1, 0, 0, 1, 1}, {0, 1, 0, 0, 0, 1}, {0, 0, 1, 0, 0, 0}}))
}
// 法一:BFS
var around = [8][2]int{{-1, -1}, {-1, 0}, {-1, 1}, {0, -1}, {0, 1}, {1, 1}, {1, 0}, {1, -1}}
// 省略了visied数组,直接在grid上进行修改
func shortestPathBinaryMatrix(grid [][]int) (level int) {
n := len(grid)
if grid[0][0] == 1 || grid[n-1][n-1] == 1 {
return -1
} else if n <= 2 {
return n
}
queue := [][2]int{{0, 0}}
grid[0][0] = 2
level++
for len(queue) > 0 {
size := len(queue)
level++
for i := 0; i < size; i++ {
cur := queue[i]
for _, diff := range around {
x, y := cur[0]+diff[0], cur[1]+diff[1]
if x >= 0 && x < n && y >= 0 && y < n && grid[x][y] == 0 {
if x == n-1 && y == n-1 {
return
}
queue = append(queue, [2]int{x, y})
grid[x][y] = 2
}
}
}
queue = queue[size:]
}
return -1
}
// 法二:A*
// 估价函数h(n)代表从当前点到终点的曼哈顿距离(坐标差绝对值之和)
// 优先级是估价函数的值加上当前点已走的距离,只使用曼哈顿距离只能得到一个较优值
// 使用一个小顶堆取代pq
func shortestPathBinaryMatrix2(grid [][]int) (minDist int) {
n := len(grid)
if grid[0][0] == 1 || grid[n-1][n-1] == 1 {
return -1
} else if n <= 2 {
return n
}
var pq priorityQueue
maxPos := n - 1
pq = append(pq, node{x: 0, y: 0})
dist := make(map[[2]int]int, n*n)
dist[[2]int{0, 0}] = 1
for len(pq) > 0 {
cur := heap.Pop(&pq).(node)
if grid[cur.x][cur.y] == 2 {
continue
}
if cur.x == maxPos && cur.y == maxPos {
return dist[[2]int{maxPos, maxPos}]
}
grid[cur.x][cur.y] = 2
for _, diff := range around {
x, y := cur.x+diff[0], cur.y+diff[1]
if x >= 0 && x < n && y >= 0 && y < n && grid[x][y] != 1 {
heap.Push(&pq, node{x, y, heuristicHelper(x, y, maxPos) + dist[[2]int{cur.x, cur.y}] + 1})
// 剪枝,同一个点有多条到达路径,如果有更短的路径,就更新
if dist[[2]int{x, y}] == 0 || dist[[2]int{cur.x, cur.y}]+1 < dist[[2]int{x, y}] {
dist[[2]int{x, y}] = dist[[2]int{cur.x, cur.y}] + 1
}
}
}
}
return -1
}
type priorityQueue []node
type node struct {
x, y int
priority int
}
func (pq priorityQueue) Len() int { return len(pq) }
func (pq priorityQueue) Less(i, j int) bool { return pq[i].priority < pq[j].priority }
func (pq priorityQueue) Swap(i, j int) { pq[i], pq[j] = pq[j], pq[i] }
func (pq *priorityQueue) Push(x interface{}) { *pq = append(*pq, x.(node)) }
func (pq *priorityQueue) Pop() interface{} {
last := (*pq)[len(*pq)-1]
*pq = (*pq)[:len(*pq)-1]
return last
}
// 估价函数取曼哈顿距离
// 这里做了一个优化,直接取最大的边长
// 返回值越低越好
func heuristicHelper(i, j, maxPos int) int {
return getMax((maxPos - i), (maxPos - j))
}
func getMax(a, b int) int {
if a > b {
return a
}
return b
}
|
package services
import (
Auth "LivingPointAPI/authorization/authorization"
"LivingPointAPI/clients"
DB "LivingPointAPI/database/database"
JWT "LivingPointAPI/utils/authentication"
"context"
"log"
"strconv"
"time"
jwtGo "github.com/dgrijalva/jwt-go"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// Authorization main service struct
type Authorization struct{}
// LogIn login
func (s *Authorization) LogIn(ctx context.Context, in *Auth.LogInRequest) (*Auth.LogInResponse, error) {
var (
login string
password string
)
clients.Db.Open()
clients.Db.New()
defer clients.Db.Close()
jwt := JWT.New()
login = in.GetLogin()
password = in.GetPassword()
if len(login) == 0 || len(password) == 0 {
return &Auth.LogInResponse{Token: ""}, nil
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
q := make(map[string]string)
q["login"] = login
rGet, err := clients.Db.Client.Get(ctx, &DB.TableRequest{HasQueries: true, Queries: q, Table: "users"})
if err != nil {
log.Fatalf("[error] GetUsers: %v", err)
}
log.Printf("GetUsers: %s", rGet.Fields)
if len(rGet.Fields) == 0 {
return &Auth.LogInResponse{Token: "", Message: "Undefiend user"}, nil
}
if rGet.Fields[0].Map["Password"] != password {
return &Auth.LogInResponse{Token: "", Message: "Incorrect password"}, nil
}
token, err := jwt.NewToken(map[string]interface{}{
"id": rGet.Fields[0].Map["ID"],
"exp": time.Now().Add(time.Hour * 24).Unix(),
})
if err != nil {
return &Auth.LogInResponse{Token: "", Message: err.Error()}, nil
}
q = make(map[string]string)
q["Token"] = token
q["ID"] = rGet.Fields[0].Map["ID"]
rEdit, err := clients.Db.Client.Edit(ctx, &DB.Request{Table: "users", Data: q})
if err != nil {
log.Fatalf("[error] EditUsers: %v", err)
}
log.Printf("EditUsers: %s", rEdit.Message)
id, err := strconv.Atoi(q["ID"])
return &Auth.LogInResponse{Token: token, Message: "ok", Id: int64(id)}, nil
}
// LogOut logout
func (s *Authorization) LogOut(ctx context.Context, in *Auth.Request) (*Auth.Response, error) {
clients.Db.Open()
clients.Db.New()
defer clients.Db.Close()
jwt := JWT.New()
claims, err := jwt.VerifyToken(in.GetToken())
if err != nil {
return nil, grpc.Errorf(codes.PermissionDenied, "Unauthorized")
}
tokenClaims := map[string]interface{}(claims.(jwtGo.MapClaims))
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
data := make(map[string]string)
data["Token"] = ""
data["ID"] = tokenClaims["id"].(string)
r, err := clients.Db.Client.Edit(ctx, &DB.Request{Table: "users", Data: data})
if err != nil {
log.Fatalf("[error] EditUsers: %v", err)
}
log.Printf("EditUsers: %s", r.Message)
return &Auth.Response{Message: tokenClaims["id"].(string)}, nil
}
// Register register user
func (s *Authorization) Register(ctx context.Context, in *Auth.RegisterRequest) (*Auth.LogInResponse, error) {
jwt := JWT.New()
clients.Db.Open()
clients.Db.New()
defer clients.Db.Close()
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
data := make(map[string]string)
data["Login"] = in.GetLogin()
data["Password"] = in.GetPassword()
ctxEdit, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
rAdd, err := clients.Db.Client.Add(ctxEdit, &DB.Request{Table: "users", Data: data})
if err != nil {
log.Fatalf("[error] AddUser: %v", err)
}
if rAdd.Message == "0" {
return &Auth.LogInResponse{Token: "", Message: "Dublicate login"}, nil
}
log.Printf("AddUser: %s", rAdd.Message)
token, err := jwt.NewToken(map[string]interface{}{
"id": rAdd.Message,
"exp": time.Now().Add(time.Hour * 24).Unix(),
})
if err != nil {
return &Auth.LogInResponse{Token: "", Message: err.Error()}, nil
}
data = make(map[string]string)
data["Token"] = token
data["ID"] = rAdd.Message
rEdit, err := clients.Db.Client.Edit(ctx, &DB.Request{Table: "users", Data: data})
if err != nil {
log.Fatalf("[error] EditUsers: %v", err)
}
log.Printf("EditUsers: %s", rEdit.Message)
return &Auth.LogInResponse{Token: token, Message: "ok"}, nil
}
|
package optionsgen_test
import (
"testing"
goplvalidator "github.com/go-playground/validator/v10"
testcase "github.com/kazhuravlev/options-gen/options-gen/testdata/case-10-global-override"
"github.com/kazhuravlev/options-gen/pkg/validator"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestOptionsWithOverridenValidator(t *testing.T) {
validatorInst := goplvalidator.New()
require.NoError(t, validatorInst.RegisterValidation("child", func(fl goplvalidator.FieldLevel) bool {
return fl.Field().Int() < 14
}))
old := validator.GetValidatorFor(nil)
validator.Set(validatorInst)
t.Cleanup(func() { validator.Set(old) })
t.Run("valid options", func(t *testing.T) {
opts := testcase.NewOptions(100, 13)
assert.NoError(t, opts.Validate())
})
t.Run("invalid options", func(t *testing.T) {
opts := testcase.NewOptions(100, 14)
assert.Error(t, opts.Validate())
})
}
|
package resourcetypes
import (
"encoding/json"
"os"
"github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/hyperledger"
"github.com/shwetha-pingala/HyperledgerProject/InvoiveProject/go-api/models"
)
func Index(clients *hyperledger.Clients) (resourcetypes *models.ResourceTypes, err error) {
resourcetypes = new(models.ResourceTypes)
MSPID := os.Getenv("HYPERLEDGER_MSP_ID")
if len(MSPID) == 0 {
MSPID = "ibm"
}
res, err := clients.Query(MSPID, "mainchannel", "resource_types", "index", [][]byte{
[]byte("{\"selector\":{ \"active\": { \"$eq\":true } }}"),
})
if err != nil {
return
}
if err = json.Unmarshal(res, resourcetypes); err != nil {
return
}
return
}
|
package oop1
type Number1 interface{
Equal(i int) bool
LessThan(i int) bool
MoreThan(i int) bool
} |
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package platform
import (
"context"
"time"
"github.com/sacloud/iaas-api-go"
"github.com/sacloud/iaas-api-go/types"
)
type SIMClient interface {
Find(ctx context.Context) ([]*iaas.SIM, error)
GetNetworkOperatorConfig(ctx context.Context, id types.ID) ([]*iaas.SIMNetworkOperatorConfig, error)
MonitorTraffic(ctx context.Context, id types.ID, end time.Time) (*iaas.MonitorLinkValue, error)
}
func getSIMClient(caller iaas.APICaller) SIMClient {
return &simClient{
client: iaas.NewSIMOp(caller),
}
}
type simClient struct {
client iaas.SIMAPI
}
func (c *simClient) Find(ctx context.Context) ([]*iaas.SIM, error) {
var results []*iaas.SIM
res, err := c.client.Find(ctx, &iaas.FindCondition{
Include: []string{"*", "Status.sim"},
Count: 10000,
})
if err != nil {
return results, err
}
return res.SIMs, nil
}
func (c *simClient) GetNetworkOperatorConfig(ctx context.Context, id types.ID) ([]*iaas.SIMNetworkOperatorConfig, error) {
return c.client.GetNetworkOperator(ctx, id)
}
func (c *simClient) MonitorTraffic(ctx context.Context, id types.ID, end time.Time) (*iaas.MonitorLinkValue, error) {
mvs, err := c.client.MonitorSIM(ctx, id, monitorCondition(end))
if err != nil {
return nil, err
}
return monitorLinkValue(mvs.Values), nil
}
|
package main
import (
"os/signal"
"context"
"syscall"
"flag"
"log"
"os"
"github.com/GoogleCloudPlatform/cloud-builders-community/windows-builder/builder/builder"
)
var (
hostname = flag.String("hostname", "", "Hostname of remote Windows server")
username = flag.String("username", "", "Username on remote Windows server")
password = flag.String("password", os.Getenv("PASSWORD"), "Password on remote Windows server")
command = flag.String("command", "", "Command to run on remote Windows server")
notCopyWorkspace = flag.Bool("not-copy-workspace", false, "If copy workspace or not")
workspacePath = flag.String("workspace-path", "/workspace", "The directory to copy data from")
workspaceBucket = flag.String("workspace-bucket", "", "The bucket to copy the directory to. Defaults to {project-id}_cloudbuild")
image = flag.String("image", "windows-cloud/global/images/windows-server-2019-dc-for-containers-v20191210", "Windows image to start the server from")
network = flag.String("network", "default", "The VPC name to use when creating the Windows server")
subnetwork = flag.String("subnetwork", "default", "The Subnetwork name to use when creating the Windows server")
region = flag.String("region", "us-central1", "The region name to use when creating the Windows server")
zone = flag.String("zone", "us-central1-f", "The zone name to use when creating the Windows server")
labels = flag.String("labels", "", "List of label KEY=VALUE pairs separated by comma to add when creating the Windows server")
machineType = flag.String("machineType", "", "The machine type to use when creating the Windows server")
preemptible = flag.Bool("preemptible", false, "If instance running the Windows server should be preemptible or not")
diskSizeGb = flag.Int64("diskSizeGb", 50, "The disk size to use when creating the Windows server")
diskType = flag.String("diskType", "", "The disk type to use when creating the Windows server")
commandTimeout = flag.Int("commandTimeout", 5, "The command run timeout in minutes")
copyTimeout = flag.Int("copyTimeout", 5, "The workspace copy timeout in minutes")
serviceAccount = flag.String("serviceAccount", "default", "The service account to use when creating the Windows server")
tags = flag.String("tags", "", "List of strings eparated by comma to add when creating the Windows server")
useInternalNet = flag.Bool("use-internal-network", false, "Communicate with Windows server over the internal network")
createExternalIP = flag.Bool("create-external-ip", false, "Create an external IP address when using internal network")
)
func main() {
log.Print("Starting Windows builder")
flag.Parse()
var r *builder.Remote
var s *builder.Server
var bs *builder.BuilderServer
// Connect to server
if (*hostname != "") && (*username != "") && (*password != "") {
r = &builder.Remote{
Hostname: hostname,
Username: username,
Password: password,
}
log.Printf("Connecting to existing host %s", *r.Hostname)
} else {
ctx := context.Background()
bs = &builder.BuilderServer{
ImageUrl: image,
VPC: network,
Subnet: subnetwork,
Region: region,
Zone: zone,
Labels: labels,
MachineType: machineType,
Preemptible: preemptible,
DiskSizeGb: diskSizeGb,
DiskType: diskType,
ServiceAccount: serviceAccount,
Tags: tags,
UseInternalNet: useInternalNet,
CreateExternalIP: createExternalIP,
}
s = builder.NewServer(ctx, bs)
r = &s.Remote
log.Print("Setting up termination signal handler")
sigsChannel := make(chan os.Signal, 1)
signal.Notify(sigsChannel, syscall.SIGTERM, syscall.SIGINT, syscall.SIGQUIT)
go func() {
sig := <-sigsChannel
log.Printf("Signal %+v received, terminating", sig)
deleteInstanceAndExit(s, bs, 1)
}()
}
log.Print("Waiting for server to become available")
err := r.Wait()
if err != nil {
log.Printf("Error connecting to server: %+v", err)
deleteInstanceAndExit(s, bs, 1)
}
r.BucketName = workspaceBucket
// Copy workspace to remote machine
if !*notCopyWorkspace {
log.Print("Copying workspace")
err = r.Copy(*workspacePath, *copyTimeout)
if err != nil {
log.Printf("Error copying workspace: %+v", err)
deleteInstanceAndExit(s, bs, 1)
}
}
// Execute on remote
log.Printf("Executing command %s", *command)
err = r.Run(*command, *commandTimeout)
if err != nil {
log.Printf("Error executing command: %+v", err)
deleteInstanceAndExit(s, bs, 1)
}
// Shut down server if started
deleteInstanceAndExit(s, bs, 0)
}
func deleteInstanceAndExit(s *builder.Server, bs *builder.BuilderServer, exitCode int) {
if s != nil {
err := s.DeleteInstance(bs)
if err != nil {
log.Fatalf("Failed to shut down instance: %+v", err)
} else {
log.Print("Instance shut down successfully")
}
}
os.Exit(exitCode)
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package temptable
import (
"bytes"
"context"
"math"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/infoschema"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/store/driver/txn"
"github.com/pingcap/tidb/tablecodec"
"golang.org/x/exp/maps"
)
var (
tableWithIDPrefixLen = len(tablecodec.EncodeTablePrefix(1))
)
// TemporaryTableSnapshotInterceptor implements kv.SnapshotInterceptor
type TemporaryTableSnapshotInterceptor struct {
is infoschema.InfoSchema
sessionData kv.Retriever
}
// SessionSnapshotInterceptor creates a new snapshot interceptor for temporary table data fetch
func SessionSnapshotInterceptor(sctx sessionctx.Context, is infoschema.InfoSchema) kv.SnapshotInterceptor {
if !is.HasTemporaryTable() {
return nil
}
return NewTemporaryTableSnapshotInterceptor(
is,
getSessionData(sctx),
)
}
// NewTemporaryTableSnapshotInterceptor creates a new TemporaryTableSnapshotInterceptor
func NewTemporaryTableSnapshotInterceptor(is infoschema.InfoSchema, sessionData kv.Retriever) *TemporaryTableSnapshotInterceptor {
return &TemporaryTableSnapshotInterceptor{
is: is,
sessionData: sessionData,
}
}
// OnGet intercepts Get operation for Snapshot
func (i *TemporaryTableSnapshotInterceptor) OnGet(ctx context.Context, snap kv.Snapshot, k kv.Key) ([]byte, error) {
if tblID, ok := getKeyAccessedTableID(k); ok {
if tblInfo, ok := i.temporaryTableInfoByID(tblID); ok {
return getSessionKey(ctx, tblInfo, i.sessionData, k)
}
}
return snap.Get(ctx, k)
}
func getSessionKey(ctx context.Context, tblInfo *model.TableInfo, sessionData kv.Retriever, k kv.Key) ([]byte, error) {
if tblInfo.TempTableType == model.TempTableNone {
return nil, errors.New("Cannot get normal table key from session")
}
if sessionData == nil || tblInfo.TempTableType == model.TempTableGlobal {
return nil, kv.ErrNotExist
}
val, err := sessionData.Get(ctx, k)
if err == nil && len(val) == 0 {
return nil, kv.ErrNotExist
}
return val, err
}
// OnBatchGet intercepts BatchGet operation for Snapshot
func (i *TemporaryTableSnapshotInterceptor) OnBatchGet(ctx context.Context, snap kv.Snapshot, keys []kv.Key) (map[string][]byte, error) {
keys, result, err := i.batchGetTemporaryTableKeys(ctx, keys)
if err != nil {
return nil, err
}
if len(keys) > 0 {
snapResult, err := snap.BatchGet(ctx, keys)
if err != nil {
return nil, err
}
if len(snapResult) > 0 {
maps.Copy(snapResult, result)
result = snapResult
}
}
if result == nil {
result = make(map[string][]byte)
}
return result, nil
}
func (i *TemporaryTableSnapshotInterceptor) batchGetTemporaryTableKeys(ctx context.Context, keys []kv.Key) (snapKeys []kv.Key, result map[string][]byte, err error) {
for _, k := range keys {
tblID, ok := getKeyAccessedTableID(k)
if !ok {
snapKeys = append(snapKeys, k)
continue
}
tblInfo, ok := i.temporaryTableInfoByID(tblID)
if !ok {
snapKeys = append(snapKeys, k)
continue
}
val, err := getSessionKey(ctx, tblInfo, i.sessionData, k)
if kv.ErrNotExist.Equal(err) {
continue
}
if err != nil {
return nil, nil, err
}
if result == nil {
result = make(map[string][]byte)
}
result[string(k)] = val
}
return snapKeys, result, err
}
// OnIter intercepts Iter operation for Snapshot
func (i *TemporaryTableSnapshotInterceptor) OnIter(snap kv.Snapshot, k kv.Key, upperBound kv.Key) (kv.Iterator, error) {
if notTableRange(k, upperBound) {
return snap.Iter(k, upperBound)
}
if tblID, ok := getRangeAccessedTableID(k, upperBound); ok {
return i.iterTable(tblID, snap, k, upperBound)
}
return createUnionIter(i.sessionData, snap, k, upperBound, false)
}
// OnIterReverse intercepts IterReverse operation for Snapshot
func (i *TemporaryTableSnapshotInterceptor) OnIterReverse(snap kv.Snapshot, k kv.Key, lowerBound kv.Key) (kv.Iterator, error) {
if notTableRange(nil, k) {
// scan range has no intersect with table data
return snap.IterReverse(k, lowerBound)
}
// lower bound always be nil here, so the range cannot be located in one table
return createUnionIter(i.sessionData, snap, lowerBound, k, true)
}
func (i *TemporaryTableSnapshotInterceptor) iterTable(tblID int64, snap kv.Snapshot, k, upperBound kv.Key) (kv.Iterator, error) {
tblInfo, ok := i.temporaryTableInfoByID(tblID)
if !ok {
return snap.Iter(k, upperBound)
}
if tblInfo.TempTableType == model.TempTableGlobal || i.sessionData == nil {
return &kv.EmptyIterator{}, nil
}
// still need union iter to filter out empty value in session data
return createUnionIter(i.sessionData, nil, k, upperBound, false)
}
func (i *TemporaryTableSnapshotInterceptor) temporaryTableInfoByID(tblID int64) (*model.TableInfo, bool) {
if tbl, ok := i.is.TableByID(tblID); ok {
tblInfo := tbl.Meta()
if tblInfo.TempTableType != model.TempTableNone {
return tblInfo, true
}
}
return nil, false
}
func createUnionIter(sessionData kv.Retriever, snap kv.Snapshot, k, upperBound kv.Key, reverse bool) (iter kv.Iterator, err error) {
var snapIter kv.Iterator
if snap == nil {
snapIter = &kv.EmptyIterator{}
} else {
if reverse {
snapIter, err = snap.IterReverse(upperBound, k)
} else {
snapIter, err = snap.Iter(k, upperBound)
}
}
if err != nil {
return nil, err
}
if sessionData == nil {
return snapIter, nil
}
var sessionIter kv.Iterator
if reverse {
sessionIter, err = sessionData.IterReverse(upperBound, k)
} else {
sessionIter, err = sessionData.Iter(k, upperBound)
}
if err != nil {
snapIter.Close()
return nil, err
}
iter, err = txn.NewUnionIter(sessionIter, snapIter, reverse)
if err != nil {
snapIter.Close()
sessionIter.Close()
}
return iter, err
}
func getRangeAccessedTableID(startKey, endKey kv.Key) (int64, bool) {
tblID, ok := getKeyAccessedTableID(startKey)
if !ok {
return 0, false
}
tblStart := tablecodec.EncodeTablePrefix(tblID)
tblEnd := tablecodec.EncodeTablePrefix(tblID + 1)
if bytes.HasPrefix(endKey, tblStart) || bytes.Equal(endKey, tblEnd) {
return tblID, true
}
return 0, false
}
func getKeyAccessedTableID(k kv.Key) (int64, bool) {
if bytes.HasPrefix(k, tablecodec.TablePrefix()) && len(k) >= tableWithIDPrefixLen {
if tbID := tablecodec.DecodeTableID(k); tbID > 0 && tbID != math.MaxInt64 {
return tbID, true
}
}
return 0, false
}
func notTableRange(k, upperBound kv.Key) bool {
tblPrefix := tablecodec.TablePrefix()
return bytes.Compare(k, tblPrefix) > 0 && !bytes.HasPrefix(k, tblPrefix) ||
len(upperBound) > 0 && bytes.Compare(upperBound, tblPrefix) < 0
}
|
package server
import (
"net/http"
"go.ua-ecm.com/chaki/tasks"
"github.com/labstack/echo"
)
func (s *Server) getTasks(c echo.Context) error {
sanitizedConfig := s.tasksConfig.Sanitize()
return c.JSON(http.StatusOK, sanitizedConfig)
}
type runTaskResponseStatement struct {
Data []map[string]interface{} `json:"data,omitempty"`
}
type runTaskResponse struct {
Statements []runTaskResponseStatement `json:"statements,omitempty"`
}
func (s *Server) runTask(c echo.Context) error {
log := c.Logger()
req := &struct {
Data map[string]interface{} `json:"data"`
}{}
if err := c.Bind(req); err != nil {
return err
}
name := c.Param("name")
log.Infof("running task %s", name)
result, err := s.tasksConfig.Run(name, req.Data)
if err != nil {
return err
}
var resp *runTaskResponse
switch t := result.(type) {
case *tasks.DBTaskResult:
dbres := result.(*tasks.DBTaskResult)
resp = &runTaskResponse{
Statements: make([]runTaskResponseStatement, len(dbres.Statements)),
}
for i, sr := range dbres.Statements {
resp.Statements[i].Data = sr.Data
}
default:
log.Warnf("unexpected result type %T", t)
}
if resp == nil {
resp = &runTaskResponse{}
}
return c.JSON(http.StatusOK, resp)
}
|
package Problem0506
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
nums []int
ans []string
}{
{
[]int{0, 4, 3, 2, 1},
[]string{"5", "Gold Medal", "Silver Medal", "Bronze Medal", "4"},
},
{
[]int{5, 4, 3, 2, 1},
[]string{"Gold Medal", "Silver Medal", "Bronze Medal", "4", "5"},
},
// 可以有多个 testcase
}
func Test_findRelativeRanks(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ast.Equal(tc.ans, findRelativeRanks(tc.nums), "输入:%v", tc)
}
}
func Benchmark_findRelativeRanks(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
findRelativeRanks(tc.nums)
}
}
}
|
package main
/*
Sorting slices using pkg sort
*/
import (
"fmt"
"sort"
)
func main() {
ss := []string {
"John", "Paul", "George", "Ringo",
}
si := []int {
3, 7, 2, 4, 1, 5, 8, 6,
}
fmt.Println("_________UNSORTED_________")
fmt.Println(ss)
fmt.Println(si)
fmt.Println("__________SORTED__________")
sort.Strings(ss)
sort.Ints(si)
fmt.Println(ss)
fmt.Println(si)
} |
package main
import "testing"
func TestBinarySearch(t *testing.T) {
assert([]int{}, 1, -1, t)
assert([]int{-1}, -1, 0, t)
assert([]int{-1, 1}, 1, 1, t)
assert([]int{-1, 1, 2, 3}, 3, 3, t)
assert([]int{-1, 1, 2, 3}, 4, -1, t)
assert([]int{-1, 1, 2, 3}, -4, -1, t)
assert([]int{-1, 1, 2, 3, 7}, -4, -1, t)
assert([]int{-1, 1, 2, 3, 7}, 9, -1, t)
}
func assert(array []int, target int, expected int, t *testing.T) {
if x := BinarySearch(&array, target); x != expected {
t.Errorf("BinarySearch(array, %v) = %x not %v", target, expected)
}
}
|
package interactor
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/sqlite"
"time"
)
// ReturnRes is a struct with returning message string and err.
type ReturnRes struct {
Msg string
Err error
// Code uint
}
// Reservation is a struct schema with details about the resevation.
type Reservation struct {
gorm.Model
StartTime time.Time
UserSlackID string
RoomID string
}
// User is a struct schema for keeping track of users reservations
type User struct {
gorm.Model
SlackID string
Reservations []Reservation `gorm:"foreignkey:UserSlackID;association_foreignkey:SlackID"`
}
var helpString string = `Type in <operation> <arg1> <arg2> ...
- get
- set [time]
- [time]:
time in the format yyyy-mm-ddThh:mm:ss-08:00
- cancel [reservationID]`
var returnHelp ReturnRes = ReturnRes{Msg: helpString, Err: nil}
// #TODO: investigate other db options
// DB is the gorm database.
var DB *gorm.DB
// SetDB is a function that sets value to DB variable.
func SetDB(db *gorm.DB) {
DB = db
DB.AutoMigrate(&Reservation{}, &User{})
}
// GetProcessing is function that process get requests
func GetProcessing() []Reservation {
var reservations []Reservation
DB.Find(&reservations)
// response := fmt.Sprintf("%v", reservations)
return reservations
}
// SetProcessing is function that process set requests
func SetProcessing(t1 time.Time, userID string) ReturnRes {
var reservation Reservation
var resMsg string
newRes := Reservation{StartTime: t1, UserSlackID: userID, RoomID: "1"}
DB.Where(Reservation{StartTime: t1, UserSlackID: "", RoomID: "1"}).First(&reservation)
fmt.Println("look up reservations", reservation)
if reservation == (Reservation{}) {
fmt.Println("no records")
DB.Create(&newRes)
fmt.Printf("%+v\n", newRes)
// db.Where("StartTime = ?", t1).First(&reservation)
// fmt.Println(reservation)
resMsg = "Success. Reserved " + t1.Format(time.RFC3339)
return ReturnRes{Msg: resMsg, Err: nil}
}
resMsg = "Failure. Time slot taken already."
return ReturnRes{Msg: resMsg, Err: nil}
}
// CancelProcessing is function that process cancel requests
func CancelProcessing(reservationID string, userID string) ReturnRes {
// var reservations []Reservation
var resMsg string
var toDelete Reservation
DB.First(&toDelete, reservationID)
if toDelete.UserSlackID == userID {
resMsg = "Reservations canceled"
DB.Delete(&toDelete)
return ReturnRes{Msg: resMsg, Err: nil}
}
// DB.Where(Reservation{StartTime: time.Time{}, UserSlackID: userID , RoomID: "1"}).Delete(Reservation{})
resMsg = "Slack ID mismatch. Unable to cancel reservation."
return ReturnRes{Msg: resMsg, Err: nil}
}
// var db *gorm.DB = sqldb.SetupDB()
// GetParsing is function that parse get requests
func GetParsing(args []string, userID string) ReturnRes {
// valid := GetVal(args)
reservations := GetProcessing()
response := fmt.Sprintf("%v", reservations)
return ReturnRes{Msg: response, Err: nil}
}
// SetParsing is function that parse set requests
func SetParsing(args []string, userID string) ReturnRes {
numArgs := len(args)
if numArgs != 1 {
return returnHelp
}
t1, e := time.Parse(time.RFC3339, args[0])
if e != nil {
return ReturnRes{Msg: helpString, Err: e}
}
startTime := t1.Truncate(30 * time.Minute)
response := SetProcessing(startTime, userID)
return response
}
// CancelParsing is function that parse cancel requests
func CancelParsing(args []string, userID string) ReturnRes {
numArgs := len(args)
if numArgs != 1 {
return returnHelp
}
reservationID := args[0]
response := CancelProcessing(reservationID, userID)
return response
}
|
package controllers
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/GoPex/caretaker/bindings"
"github.com/GoPex/caretaker/helpers"
)
// GetPing is the handler for the GET /info/ping route.
// This will respond by a pong JSON message if the server is alive
func GetPing(c *gin.Context) {
c.JSON(http.StatusOK, bindings.PingResponse{Pong: "OK"})
}
// GetStatus is an handler for the GET /info/status route.
// This will respond by the status of the server and of the docker host in a
// JSON message.
func GetStatus(c *gin.Context) {
c.JSON(http.StatusOK,
bindings.StatusResponse{Status: "OK"},
)
}
// GetVersion is an handler for the GET /info/version route. This will respond a
// JSON message with application version and the version of Docker running in the Docker host.
func GetVersion(c *gin.Context) {
c.JSON(http.StatusOK,
bindings.VersionResponse{Version: helpers.AppVersion},
)
}
|
// This file is subject to a 1-clause BSD license.
// Its contents can be found in the enclosed LICENSE file.
package evdev
import "unsafe"
// Absolute events describe absolute changes in a property.
// For example, a touchpad may emit coordinates for a touch location.
// A few codes have special meanings:
//
// AbsDistance is used to describe the distance of a tool
// from an interaction surface. This event should only be emitted
// while the tool is hovering, meaning in close proximity to the
// device and while the value of the BtnTouch code is 0.
// If the input device may be used freely in three dimensions,
// consider AbsZ instead.
//
// AbsMt<name> is used to describe multitouch input events.
const (
AbsX = 0x00
AbsY = 0x01
AbsZ = 0x02
AbsRX = 0x03
AbsRY = 0x04
AbsRZ = 0x05
AbsThrottle = 0x06
AbsRudder = 0x07
AbsWheel = 0x08
AbsGas = 0x09
AbsBrake = 0x0a
AbsHat0X = 0x10
AbsHat0Y = 0x11
AbsHat1X = 0x12
AbsHat1Y = 0x13
AbsHat2X = 0x14
AbsHat2Y = 0x15
AbsHat3X = 0x16
AbsHat3Y = 0x17
AbsPressure = 0x18
AbsDistance = 0x19
AbsTiltX = 0x1a
AbsTiltY = 0x1b
AbsToolWidth = 0x1c
AbsVolume = 0x20
AbsMisc = 0x28
AbsMTSlot = 0x2f // MT slot being modified
AbsMTTouchMajor = 0x30 // Major axis of touching ellipse
AbsMTTouchMinor = 0x31 // Minor axis (omit if circular)
AbsMTWidthMajor = 0x32 // Major axis of approaching ellipse
AbsMTWidthMinor = 0x33 // Minor axis (omit if circular)
AbsMTOrientation = 0x34 // Ellipse orientation
AbsMTPositionX = 0x35 // Center X touch position
AbsMTPositionY = 0x36 // Center Y touch position
AbsMTToolTYPE = 0x37 // Type of touching device
AbsMTBlobId = 0x38 // Group a set of packets as a blob
AbsMTTrackingId = 0x39 // Unique ID of initiated contact
AbsMTPressure = 0x3a // Pressure on contact area
AbsMTDistance = 0x3b // Contact hover distance
AbsMTToolX = 0x3c // Center X tool position
AbsMTToolY = 0x3d // Center Y tool position
AbsMax = 0x3f
AbsCount = AbsMax + 1
)
// AbsInfo provides information for a specific absolute axis.
// This applies to devices which support EvAbsolute events.
type AbsInfo struct {
Value int32 // Current value of the axis,
Minimum int32 // Lower limit of axis.
Maximum int32 // Upper limit of axis.
Fuzz int32 // ???
Flat int32 // Size of the 'flat' section.
Resolution int32 // Size of the error that may be present.
}
// AbsoluteAxes returns a bitfield indicating which absolute axes are
// supported by the device.
//
// This is only applicable to devices with EvAbsolute event support.
func (d *Device) AbsoluteAxes() Bitset {
bs := NewBitset(AbsMax)
buf := bs.Bytes()
ioctl(d.fd.Fd(), _EVIOCGBIT(EvAbsolute, len(buf)), unsafe.Pointer(&buf[0]))
return bs
}
// AbsoluteInfo provides state information for one absolute axis.
// If you want the global state for a device, you have to call
// the function for each axis present on the device.
// See Device.AbsoluteAxes() for details on how find them.
//
// This is only applicable to devices with EvAbsolute event support.
func (d *Device) AbsoluteInfo(axis int) AbsInfo {
var abs AbsInfo
ioctl(d.fd.Fd(), _EVIOCGABS(axis), unsafe.Pointer(&abs))
return abs
}
|
package msgHandler
import (
"bytes"
"encoding/hex"
"fmt"
"github.com/op/go-logging"
"sync"
)
type CheckFork struct {
sync.RWMutex
fork map[uint64]map[uint64][]*PeerHash
Logger *logging.Logger
}
type PeerHash struct {
peerid uint64
preHash []byte
}
func NewCheckFork() *CheckFork {
return &CheckFork{
fork: make(map[uint64]map[uint64][]*PeerHash),
}
}
func (cfh *CheckFork) isExists(Bgid uint64) bool {
cfh.RWMutex.RLock()
defer cfh.RWMutex.RUnlock()
_, ok := cfh.fork[Bgid]
return ok
}
func (cfh *CheckFork) initFork(Bgid uint64) {
cfh.RWMutex.Lock()
fork := make(map[uint64][]*PeerHash)
cfh.fork[Bgid] = fork
cfh.RWMutex.Unlock()
}
func (cfh *CheckFork) isDuplicate(Bgid uint64, height uint64, peerid uint64) (bool, error) {
cfh.RWMutex.RLock()
defer cfh.RWMutex.RUnlock()
fork := cfh.fork[Bgid]
ph, ok := fork[height]
if !ok {
return false, nil
}
for _, p := range ph {
if p == nil {
return false, fmt.Errorf("Bgid %d, height %d store nil", Bgid, height)
}
if p.peerid == peerid {
return true, nil
}
}
return false, nil
}
func (cfh *CheckFork) addFork(Bgid uint64, height uint64, peerHash *PeerHash) error {
if peerHash == nil {
return fmt.Errorf("peerHash is nil ")
}
cfh.Lock()
fork := cfh.fork[Bgid]
ph := fork[height]
cfh.Logger.Infof("addFork before Bgid %d, height %d len %d ", Bgid, height, len(ph))
ph = append(ph, peerHash)
fork[height] = ph
cfh.fork[Bgid] = fork
cfh.Unlock()
return nil
}
func (cfh *CheckFork) clean(Bgid uint64) {
cfh.RWMutex.Lock()
for bgid, _ := range cfh.fork {
if bgid <= Bgid {
delete(cfh.fork, bgid)
}
}
cfh.RWMutex.Unlock()
}
func (cfh *CheckFork) isCountToTarget(Bgid uint64, height uint64, preHash []byte, targetCount int) (bool, []uint64) {
cfh.RWMutex.RLock()
defer cfh.RWMutex.RUnlock()
fork := cfh.fork[Bgid]
//if len(fork[height]) < targetCount {
// return false
//}
//count := 0
peers := make([]uint64, 0)
cfh.Logger.Infof("Bgid %d, height %d preHash len %d ",
Bgid, height, len(fork[height]))
for index, ph := range fork[height] {
if ph == nil {
cfh.Logger.Warningf("Bgid %d, height %d store nil", Bgid, height)
continue
}
cfh.Logger.Infof("index %d Bgid %d, height %d hash store %s new %s", index, Bgid, height,
hex.EncodeToString(ph.preHash), hex.EncodeToString(preHash))
if bytes.Equal(preHash, ph.preHash) {
//count++
peers = append(peers, ph.peerid)
}
}
if len(peers) < targetCount {
cfh.Logger.Infof("Bgid %d, height %d preHash %s count %d target %d",
Bgid, height, hex.EncodeToString(preHash), len(peers), targetCount)
return false, nil
}
return true, peers
}
|
// +build spi,!i2c
package main
import (
// Modules
_ "github.com/djthorpe/gopi-hw/sys/spi"
)
const (
MODULE_NAME = "sensors/bme280/spi"
)
|
package main
import (
"fmt"
"math/rand"
"net/http"
"time"
)
type result int
func (r result) value() string {
var val = ""
switch r {
case 0:
val = "大吉"
case 1:
val = "中吉"
case 2:
val = "小吉"
case 3:
val = "吉"
case 4:
val = "凶"
case 5:
val = "大凶"
}
return val
}
// OmikujiServer is server
type OmikujiServer struct{}
func (o OmikujiServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Println(r.FormValue("msg"))
w.Header().Set("Contents-Type", "application/json; charset=utf-8")
fmt.Fprint(w, result(rand.Intn(6)).value())
}
func main() {
rand.Seed(time.Now().UnixNano())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "internal server error", http.StatusInternalServerError)
})
http.Handle("/omikuji", &OmikujiServer{})
http.ListenAndServe(":8080", nil)
}
|
package agent
import (
"net/http"
"github.com/Sirupsen/logrus"
"github.com/bryanl/dolb/service"
"github.com/gorilla/mux"
)
func UpstreamDeleteHandler(c interface{}, r *http.Request) service.Response {
config := c.(*Config)
vars := mux.Vars(r)
svcName := vars["service"]
uName := vars["upstream"]
sm := config.ServiceManagerFactory(config)
err := sm.DeleteUpstream(svcName, uName)
if err != nil {
config.GetLogger().WithError(err).WithFields(logrus.Fields{
"service-name": svcName,
"upstream-id": uName,
}).Error("could not delete upstream")
return service.Response{Body: err, Status: 404}
}
return service.Response{Status: 204}
}
|
package public
import (
"context"
"tpay_backend/merchantapi/internal/common"
"tpay_backend/model"
"tpay_backend/utils"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type RechargeLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
userId int64
}
func NewRechargeLogic(ctx context.Context, svcCtx *svc.ServiceContext, userId int64) RechargeLogic {
return RechargeLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
userId: userId,
}
}
func (l *RechargeLogic) Recharge(req types.RechargeReq) (*types.RechargeReply, error) {
if req.Amount <= 0 {
l.Errorf("充值金额不允许为0")
return nil, common.NewCodeError(common.AmountFail)
}
//查询商户的信息
merchant, err := model.NewMerchantModel(l.svcCtx.DbEngine).FindOneById(l.userId)
if err != nil {
l.Errorf("查询商户信息失败,userId=%v,err=%v", l.userId, err)
return nil, common.NewCodeError(common.ApplyFail)
}
//确认平台收款卡是否存在
platformBankCard, err := model.NewPlatformBankCardModel(l.svcCtx.DbEngine).FindOneById(req.BankCardId)
if err != nil {
l.Errorf("查询平台收款卡信息失败,BankCardId=%v,err=%v", req.BankCardId, err)
return nil, common.NewCodeError(common.HankCardNotExist)
}
if platformBankCard == nil {
l.Errorf("查询平台收款卡信息为空, BankCardId=%v", req.BankCardId)
return nil, common.NewCodeError(common.HankCardNotExist)
}
if platformBankCard.Status != model.PlatformBankCardEnable {
l.Errorf("平台收款卡状态为禁用, BankCardId=%v", req.BankCardId)
return nil, common.NewCodeError(common.HankCardNotExist)
}
//金额 + 平台收款卡今日已收金额 > 平台收款卡每日最大收款额度 将不允许充值申请
if platformBankCard.MaxAmount < platformBankCard.TodayReceived+req.Amount {
l.Error("平台收款卡已超出当天的收款额度, MaxAmount[%v], TodayReceived[%v], Amount[%v]", platformBankCard.MaxAmount, platformBankCard.TodayReceived, req.Amount)
return nil, common.NewCodeError(common.BankCardMaxAmountLacking)
}
if merchant.Currency != platformBankCard.Currency {
l.Errorf("平台收款卡币种与商户币种不一致, merchant.Currency=%v, platformBankCard.Currency=%v ", merchant.Currency, platformBankCard.Currency)
return nil, common.NewCodeError(common.HankCardNotExist)
}
d := &model.MerchantRechargeOrder{
OrderNo: utils.GetDailyId(), // 订单号
OrderAmount: req.Amount, // 订单金额
MerchantId: l.userId, // 商户id
OrderStatus: model.RechargeOrderStatusPending, // 订单状态
RechargeRemark: req.Remark, // 充值备注
PlatformBankCardId: req.BankCardId, // 平台收款卡id
BankName: platformBankCard.BankName, // 收款银行
CardNumber: platformBankCard.CardNumber, // 收款卡号
PayeeName: platformBankCard.AccountName, // 收款人姓名
BranchName: platformBankCard.BranchName, // 支行名称
Currency: merchant.Currency, // 币种
}
if err := model.NewMerchantRechargeOrderModel(l.svcCtx.DbEngine).Insert(d); err != nil {
l.Errorf("插入充值订单失败,d=[%+v] err=[%v]", d, err)
return nil, common.NewCodeError(common.RechargeFailed)
}
return &types.RechargeReply{}, nil
}
|
package models
import (
"encoding/json"
"newproject/database"
"github.com/jinzhu/gorm"
)
// Product model
type Product struct {
gorm.Model
SKU string `json:"sku" gorm:"type:varchar(20);unique_index"`
CategoryID uint `json:"categoryID"`
Name string `json:"name"`
Description string `json:"description"`
Price float32 `json:"price"`
DiscountPrice float32 `json:"discountPrice"`
Inventory int `json:"inventory"`
InInventory bool `json:"inInventory"`
}
// Category defines product categories
type Category struct {
gorm.Model
Name string `json:"name"`
Parent uint `json:"parent"`
}
//Delete instance
func (c Category) Delete(id uint) {
db := database.DB
c.ID = id
db.Delete(&c)
}
//All instances
func (p Product) All() interface{} {
db := database.DB
products := []Product{}
db.Find(&products)
return products
}
//Create instance
func (p Product) Create(data []byte) (interface{}, error) {
db := database.DB
err := json.Unmarshal(data, &p)
if err != nil {
return nil, err
}
db.Create(&p)
return p, nil
}
//Find instance
func (p Product) Find(id int) interface{} {
db := database.DB
db.First(&p, id)
return p
}
//Update instance
func (p Product) Update(id int, data []byte) (interface{}, error) {
db := database.DB
update := Product{}
err := json.Unmarshal(data, &update)
if err != nil {
return nil, err
}
db.First(&p, id).Updates(update)
return p, nil
}
//Delete instance
func (p Product) Delete(id uint) {
db := database.DB
p.ID = id
db.Delete(&p)
}
// All Category instances
func (c Category) All() interface{} {
db := database.DB
cats := []Category{}
db.Find(&cats)
return cats
}
//Create instance
func (c Category) Create(data []byte) (interface{}, error) {
db := database.DB
err := json.Unmarshal(data, &c)
if err != nil {
return nil, err
}
db.Create(&c)
return c, nil
}
//Find instance
func (c Category) Find(id int) interface{} {
db := database.DB
db.First(&c, id)
return c
}
//Update instance
func (c Category) Update(id int, data []byte) (interface{}, error) {
db := database.DB
update := Category{}
err := json.Unmarshal(data, &update)
if err != nil {
return nil, err
}
db.First(&c, id).Updates(update)
return c, nil
}
|
package skpsilk
// silk/src/SKP_Silk_scale_vector.c
// scale_vector32_Q26_lshift_18 Multiply a vector by a constant
func scale_vector32_Q26_lshift_18(data1 []int32, gain_Q26 int32, dataSize int) {
for i := 0; i < dataSize; i++ {
data1[i] = int32((int64(data1[i]) * int64(gain_Q26)) >> 8) // OUTPUT: Q18
}
}
|
package point2
import "testing"
func TestPoint2_String(t *testing.T) {
p := Point2{}
if p.String() != "(0.000000,0.000000)" {
t.Log(p)
t.Fail()
}
} |
package main
import (
"fmt"
"log"
"github.com/shanghuiyang/rpi-devices/dev"
"github.com/stianeikeland/go-rpio"
)
const (
p18 = 18
)
func main() {
if err := rpio.Open(); err != nil {
log.Fatalf("failed to open rpio, error: %v", err)
return
}
defer rpio.Close()
sg := dev.NewSG90(p18)
var angle int
for {
fmt.Printf(">>angle: ")
if n, err := fmt.Scanf("%d", &angle); n != 1 || err != nil {
log.Printf("invalid angle, error: %v", err)
continue
}
sg.Roll(angle)
}
log.Printf("quit")
}
|
package iirepo_stage
import (
"os"
"path/filepath"
)
// Walk will locate the ii repo stage for ‘path’ and call ‘fn’ for each staged file in deterministic order
// based on lexical ordering.
func Walk(path string, fn func(relstagedpath string)error) error {
stagepath, err := Locate(path)
if nil != err {
return err
}
return filepath.Walk(stagepath, walker{stagepath, fn}.WalkFunc)
}
type walker struct {
BasePath string
Func func(relstagedpath string)error
}
func (receiver walker) WalkFunc(path string, info os.FileInfo, err error) error {
if nil != err {
return err
}
relpath, err := filepath.Rel(receiver.BasePath, path)
if nil != err {
return err
}
// Skip "."
if "." == relpath {
return nil
}
return receiver.Func(relpath)
}
|
package hat_test
import (
"bytes"
"io"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
"go.coder.com/hat"
"go.coder.com/hat/asshat"
)
func TestT(tt *testing.T) {
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
io.Copy(rw, req.Body)
}))
defer s.Close()
t := hat.New(tt, s.URL)
t.Run("Run Creates deep copy", func(dt *hat.T) {
dt.URL.Path = "testing"
require.NotEqual(t, dt.URL, t.URL)
})
t.Run("RunURL Creates deep copy, and appends to URL", func(t *hat.T) {
t.RunPath("/deeper", func(dt *hat.T) {
require.Equal(t, s.URL+"/deeper", dt.URL.String())
require.NotEqual(t, dt.URL, t.URL)
})
})
t.Run("PersistentOpt", func(t *hat.T) {
pt := hat.New(tt, s.URL)
exp := []byte("Hello World!")
pt.AddPersistentOpts(hat.Body(bytes.NewBuffer(exp)))
// Opt is attached by persistent opts
pt.Get().Send(t).Assert(t, asshat.BodyEqual(exp))
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.