text stringlengths 11 4.05M |
|---|
package pulsar
import (
"fmt"
"time"
"github.com/batchcorp/plumber-schemas/build/go/protos/opts"
"github.com/pkg/errors"
"github.com/batchcorp/plumber-schemas/build/go/protos/records"
"github.com/batchcorp/plumber/printer"
)
// DisplayMessage will parse a Read record and print (pretty) output to STDOUT
func (p *Pulsar) DisplayMessage(cliOpts *opts.CLIOptions, msg *records.ReadRecord) error {
if err := validateReadRecord(msg); err != nil {
return errors.Wrap(err, "unable to validate read record")
}
record := msg.GetPulsar()
properties := [][]string{
{"Message ID", record.Id},
{"Key", record.Key},
{"Topic", record.Topic},
{"Redelivery Count", fmt.Sprintf("%d", record.RedeliveryCount)},
{"Event Time", record.EventTime},
{"Is Replicated", fmt.Sprintf("%t", record.IsReplicated)},
{"Ordering Key", record.OrderingKey},
{"Producer Name", record.ProducerName},
}
for k, v := range record.Properties {
properties = append(properties, []string{k, v})
}
receivedAt := time.Unix(msg.ReceivedAtUnixTsUtc, 0)
printer.PrintTable(cliOpts, msg.Num, receivedAt, msg.Payload, properties)
return nil
}
// DisplayError will parse an Error record and print (pretty) output to STDOUT
func (p *Pulsar) DisplayError(msg *records.ErrorRecord) error {
printer.DefaultDisplayError(msg)
return nil
}
func validateReadRecord(msg *records.ReadRecord) error {
if msg == nil {
return errors.New("msg cannot be nil")
}
if msg.GetPulsar() == nil {
return errors.New("message cannot be nil")
}
if msg.GetPulsar().Value == nil {
return errors.New("message value cannot be nil")
}
return nil
}
|
package server
import (
"net/http"
"github.com/r3labs/sse"
"github.com/gorilla/mux"
)
// Server represents a HTTP(S) proxy
type Server struct {
router *mux.Router
http *http.Server
cache *Cache
events *sse.Server
}
// NewServer returns a new HTTP(S) proxy instance
func NewServer(addr string, cachePath string) (*Server, error) {
c, err := NewCache(cachePath)
if err != nil {
return nil, err
}
s := &Server{
router: mux.NewRouter(),
cache: c,
}
s.mountConsole()
h := &http.Server{
Addr: addr,
Handler: s,
}
s.http = h
return s, nil
}
// Start begins the HTTP proxy listening
func (s *Server) Start() error {
return s.http.ListenAndServe()
}
// Stop shuts down the HTTP server
func (s *Server) Stop() error {
return s.http.Close()
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodConnect {
// Proxy CONNECT request (e.g. HTTPS, WebSocket)
s.proxyCONNECT(w, r)
} else if r.URL.Host != "" {
// Proxy HTTP request
s.proxyHTTP(w, r)
} else {
// Local request
s.router.ServeHTTP(w, r)
}
}
|
//go:generate reform
package front
//reform:cc_special
type Special struct {
ID uint `reform:"special_id,pk"`
Name string `reform:"special_name"`
Pos int64 `reform:"pos"`
}
//reform:cc_product_special
//type ProductSpecial struct {
// ID uint `reform:"id,pk"`
// SpecialID uint `reform:"special_id"`
// ProductID uint `reform:"product_id"`
//}
|
/*
* Copyright 2018- The Pixie Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package controllers_test
import (
"context"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"px.dev/pixie/src/api/proto/cloudpb"
"px.dev/pixie/src/cloud/api/controllers"
"px.dev/pixie/src/cloud/api/controllers/testutils"
"px.dev/pixie/src/cloud/artifact_tracker/artifacttrackerpb"
"px.dev/pixie/src/shared/artifacts/versionspb"
)
func TestArtifactTracker_GetArtifactList(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
_, mockClients, cleanup := testutils.CreateTestAPIEnv(t)
defer cleanup()
ctx := context.Background()
mockClients.MockArtifact.EXPECT().GetArtifactList(gomock.Any(),
&artifacttrackerpb.GetArtifactListRequest{
ArtifactName: "cli",
Limit: 1,
ArtifactType: versionspb.AT_LINUX_AMD64,
}).
Return(&versionspb.ArtifactSet{
Name: "cli",
Artifact: []*versionspb.Artifact{
{
VersionStr: "test",
},
{
VersionStr: "test2",
AvailableArtifactMirrors: []*versionspb.ArtifactMirrors{
{
ArtifactType: versionspb.AT_LINUX_AMD64,
SHA256: "abcd",
URLs: []string{
"url1",
"url2",
},
},
},
},
},
}, nil)
artifactTrackerServer := &controllers.ArtifactTrackerServer{
ArtifactTrackerClient: mockClients.MockArtifact,
}
resp, err := artifactTrackerServer.GetArtifactList(ctx, &cloudpb.GetArtifactListRequest{
ArtifactName: "cli",
Limit: 1,
ArtifactType: cloudpb.AT_LINUX_AMD64,
})
require.NoError(t, err)
assert.Equal(t, "cli", resp.Name)
assert.Equal(t, 2, len(resp.Artifact))
expectedArtifacts := []*cloudpb.Artifact{
{
VersionStr: "test",
AvailableArtifacts: []cloudpb.ArtifactType{},
AvailableArtifactMirrors: []*cloudpb.ArtifactMirrors{},
},
{
VersionStr: "test2",
AvailableArtifacts: []cloudpb.ArtifactType{},
AvailableArtifactMirrors: []*cloudpb.ArtifactMirrors{
{
ArtifactType: cloudpb.AT_LINUX_AMD64,
SHA256: "abcd",
URLs: []string{
"url1",
"url2",
},
},
},
},
}
assert.Equal(t, expectedArtifacts, resp.Artifact)
}
func TestArtifactTracker_GetDownloadLink(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
_, mockClients, cleanup := testutils.CreateTestAPIEnv(t)
defer cleanup()
ctx := context.Background()
mockClients.MockArtifact.EXPECT().GetDownloadLink(gomock.Any(),
&artifacttrackerpb.GetDownloadLinkRequest{
ArtifactName: "cli",
VersionStr: "version",
ArtifactType: versionspb.AT_LINUX_AMD64,
}).
Return(&artifacttrackerpb.GetDownloadLinkResponse{
Url: "http://localhost",
SHA256: "sha",
}, nil)
artifactTrackerServer := &controllers.ArtifactTrackerServer{
ArtifactTrackerClient: mockClients.MockArtifact,
}
resp, err := artifactTrackerServer.GetDownloadLink(ctx, &cloudpb.GetDownloadLinkRequest{
ArtifactName: "cli",
VersionStr: "version",
ArtifactType: cloudpb.AT_LINUX_AMD64,
})
require.NoError(t, err)
assert.Equal(t, "http://localhost", resp.Url)
assert.Equal(t, "sha", resp.SHA256)
}
|
package back_track
import (
"fmt"
"testing"
)
func TestGenerateBrackets(t *testing.T) {
// 3对括号合法组合
// (,(,(,),),)
// (,(,),(,),)
// (,(,),),(,)
// (,),(,(,),)
// (,),(,),(,)
for _, v := range GenerateBrackets(3) {
fmt.Println(v)
}
}
|
package main
import (
"io"
"net/http"
)
// DogHandler is a Handler
type DogHandler int
func (d DogHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
io.WriteString(res, "Snopp Doggy Dogg")
}
// --------
// CatHandler is a Handler
type CatHandler int
func (c CatHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
io.WriteString(res, "Pussy Pussy Cat Dolls")
}
// --------
func main() {
var dog DogHandler
var cat CatHandler
mux := http.NewServeMux()
mux.Handle("/", dog)
mux.Handle("/cat", cat) // this must end by exactly /cat or /cat/
// mux.Handle("/cat/", cat) // in this case even if it ends by /cat/smthing it will go through
// run tests, it's the best way to understand the differences
http.ListenAndServe(":8080", mux)
}
/*
if route ends in slash
/h/ it includes anuthing beneath
if route ends in no-slash
/h
it only includes that
*/
|
package mysqldb
import (
"context"
"fmt"
"time"
)
// Client 是客户端信息
type Client struct {
ClientID string `gorm:"primary_key"` // client 表主键
SecretKey string `gorm:"column:secret_key"` // 客户端密钥
Name string `gorm:"column:name"` // 客户端名称
Zone string `gorm:"column:zone"` // 客户端区域
CustomizedCode string `gorm:"column:customized_code"` // 定制化代码
Remark string `gorm:"column:remark"` // 备注
Usage string `gorm:"column:usage"` // 用途
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt *time.Time
}
// TableName 返回表名
func (c Client) TableName() string {
return "client"
}
// FindClientByClientID 查找一条 Client 数据记录
func (db *DbClient) FindClientByClientID(ctx context.Context, clientID string) (*Client, error) {
var client Client
if err := db.GetDB(ctx).First(&client, "( client_id = ? ) ", clientID).Error; err != nil {
return nil, err
}
return &client, nil
}
// SafeCloseDB 安全的关闭数据库连接
func (db *DbClient) SafeCloseDB(ctx context.Context) {
err := db.GetDB(ctx).Close()
if err != nil {
fmt.Println("Closing DB error:", err)
}
}
|
package main
import (
"os"
"runtime"
"strings"
"syscall"
"strconv"
"time"
"golang.org/x/net/context"
"bazil.org/fuse"
"bazil.org/fuse/fs"
)
const (
dirCacheTime = 10 * time.Second
statCacheTime = 1 * time.Second
attrValidTime = 1 * time.Minute
entryValidTime = 1 * time.Minute
)
type WebdavFS struct {
Uid uint32
Gid uint32
Mode uint32
dirMode os.FileMode
fileMode os.FileMode
blockSize uint32
root *Node
}
var FS *WebdavFS
var dav *DavClient
func attrSet(v fuse.SetattrValid, f fuse.SetattrValid) bool {
return (v & f) > 0
}
func flagSet(v fuse.OpenFlags, f fuse.OpenFlags) bool {
return (v & f) > 0
}
func getCMtime(c time.Time, m time.Time) (rc time.Time, rm time.Time) {
rc = c
rm = m
if m.IsZero() {
rm = time.Now()
}
if c.IsZero() {
rc = rm
}
return
}
func NewFS(d *DavClient, config WebdavFS) *WebdavFS {
if trace(T_FUSE) {
tPrintf("NewFS %s", tJson(config))
}
dav = d
FS = &config
FS.root = rootNode
if FS.Mode == 0 {
FS.Mode = 0700
}
FS.Mode = FS.Mode & 0777
FS.fileMode = os.FileMode(FS.Mode &^ uint32(0111))
FS.dirMode = os.FileMode(FS.Mode)
if FS.dirMode & 0007 > 0 {
FS.dirMode |= 0001
}
if FS.dirMode & 0070 > 0 {
FS.dirMode |= 0010
}
if FS.dirMode & 0700 > 0 {
FS.dirMode |= 0100
}
FS.dirMode |= os.ModeDir
FS.blockSize = 4096
if runtime.GOOS == "darwin" {
// if we set this on osxfuse, _all_ I/O will
// be limited to FS.blockSize bytes.
FS.blockSize = 0
}
return FS
}
func (fs *WebdavFS) Root() (fs.Node, error) {
return fs.root, nil
}
func (fs *WebdavFS) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) (err error) {
if trace(T_FUSE) {
tPrintf("%d Statfs()", req.Header.ID)
defer func() {
if err != nil {
tPrintf("%d Statfs(): %v",req.Header.ID, err)
} else {
tPrintf("%d Statfs(): %v", req.Header.ID, resp)
}
}()
}
wanted := []string{ "quota-available-bytes", "quota-used-bytes" }
props, err := dav.PropFind("/", 0, wanted)
if err != nil {
return
}
negOne := int64(-1)
total := uint64(negOne)
free := uint64(negOne)
if len(props) == 1 {
spaceUsed, _ := strconv.ParseUint(props[0].SpaceUsed, 10, 64)
spaceFree, _ := strconv.ParseUint(props[0].SpaceFree, 10, 64)
if spaceUsed > 0 || spaceFree > 0 {
used := (spaceUsed + 4095) / 4096
free = (spaceFree + 4095) / 4096
if free > 0 {
total = used + free
}
}
}
data := fuse.StatfsResponse{
Blocks: total,
Bfree: free,
Bavail: free,
Bsize: 4096,
Frsize: 4096,
Namelen: 255,
}
*resp = data
return
}
func (nd *Node) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (ret fs.Node, err error) {
if trace(T_FUSE) {
tPrintf("%d Mkdir(%s)", req.Header.ID, req.Name)
defer func() {
if err != nil {
tPrintf("%d Mkdir(%s): %v", req.Header.ID, req.Name, err)
} else {
tPrintf("%d Mkdir OK", req.Header.ID)
}
}()
}
nd.incMetaRefThenLock(req.Header.ID)
path := joinPath(nd.getPath(), req.Name)
nd.Unlock()
err = dav.Mkcol(addSlash(path))
nd.Lock()
if err == nil {
now := time.Now()
nn := Dnode{
Name: req.Name,
Mtime: now,
Ctime: now,
IsDir: true,
}
n := nd.addNode(nn, true)
ret = n
}
nd.decMetaRef()
nd.Unlock()
return
}
func (nd *Node) Rename(ctx context.Context, req *fuse.RenameRequest, destDir fs.Node) (err error) {
if trace(T_FUSE) {
tPrintf("%d Rename(%s, %s)", req.Header.ID, req.OldName, req.NewName)
defer func() {
if err != nil {
tPrintf("%d Rename(%s, %s): %v", req.Header.ID, req.OldName, req.NewName, err)
} else {
tPrintf("%d Rename OK", req.Header.ID)
}
}()
}
var lock1, lock2 *Node
var oldPath, newPath string
destNode := destDir.(*Node)
first := true
// Check if paths overlap. If so, only lock the
// shortest path. If not, lock both.
//
// Need to do this in a loop, every time checking if this
// condition still holds after both paths are locked.
nd.Lock()
for {
srcDirPath := nd.getPath()
dstDirPath := destNode.getPath()
oldPath = joinPath(srcDirPath, req.OldName)
newPath = joinPath(dstDirPath, req.NewName)
var newLock1, newLock2 *Node
if srcDirPath == dstDirPath {
newLock1 = nd
} else if strings.HasPrefix(srcDirPath, dstDirPath) {
newLock1 = destNode
} else if strings.HasPrefix(dstDirPath, srcDirPath) {
newLock1 = nd
} else {
newLock1 = nd
newLock2 = destNode
}
if !first {
if lock1 == newLock1 && lock2 == newLock2 {
break
}
lock1.decMetaRef()
if lock2 != nil {
lock2.decMetaRef()
}
}
first = false
lock1, lock2 = newLock1, newLock2
lock1.incMetaRef(req.Header.ID)
if lock2 != nil {
lock2.incMetaRef(req.Header.ID)
}
}
isDir := false
node := nd.getNode(req.OldName)
if node == nil {
// don't have the source node cached- need to
// find out if it's a dir or not, so stat.
nd.Unlock()
var dnode Dnode
dnode, err = dav.Stat(oldPath)
isDir = dnode.IsDir
} else {
isDir = node.IsDir
nd.Unlock()
}
if err == nil {
if isDir {
oldPath = addSlash(oldPath)
newPath = addSlash(newPath)
}
err = dav.Move(oldPath, newPath)
}
nd.Lock()
if err == nil {
nd.moveNode(destNode, req.OldName, req.NewName)
}
lock1.decMetaRef()
if lock2 != nil {
lock2.decMetaRef()
}
nd.Unlock()
return
}
func (nd *Node) Remove(ctx context.Context, req *fuse.RemoveRequest) (err error) {
if trace(T_FUSE) {
tPrintf("%d Remove(%s)", req.Header.ID, req.Name)
defer func() {
if err != nil {
tPrintf("%d Remove(%s): %v", req.Header.ID, req.Name, err)
} else {
tPrintf("%d Remove OK", req.Header.ID)
}
}()
}
nd.incMetaRefThenLock(req.Header.ID)
path := joinPath(nd.getPath(), req.Name)
nd.Unlock()
props, err := dav.PropFindWithRedirect(path, 1, nil)
if err == nil {
if len(props) != 1 {
if req.Dir {
err = fuse.Errno(syscall.ENOTEMPTY)
} else {
err = fuse.EIO
}
}
if err == nil {
isDir := false
if props[0].ResourceType == "collection" {
isDir = true
}
if req.Dir && !isDir {
err = fuse.Errno(syscall.ENOTDIR)
}
if !req.Dir && isDir {
err = fuse.Errno(syscall.EISDIR)
}
}
}
if err == nil {
if req.Dir {
path = addSlash(path)
}
err = dav.Delete(path)
}
nd.Lock()
if err == nil {
nd.delNode(req.Name)
}
nd.decMetaRef()
nd.Unlock()
return
}
func (nd *Node) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
// should not be called if Getattr exists.
r := &fuse.GetattrRequest{}
s := &fuse.GetattrResponse{}
err = nd.Getattr(ctx, r, s)
*attr = s.Attr
return
}
func (nd *Node) Getattr(ctx context.Context, req *fuse.GetattrRequest, resp *fuse.GetattrResponse) (err error) {
if trace(T_FUSE) {
tPrintf("%d Getattr(%s)", req.Header.ID, nd.Name)
defer func() {
if err != nil {
tPrintf("%d Getattr(%s): %v", req.Header.ID, nd.Name, err)
} else {
tPrintf("%d Getattr(%s): %v", req.Header.ID, nd.Name, tJson(resp))
}
}()
}
if nd.Deleted {
err = fuse.Errno(syscall.ESTALE)
return
}
nd.incIoRef(req.Header.ID)
dnode := nd.Dnode
if !nd.statInfoFresh() {
path := nd.getPath()
if nd.IsDir {
path = addSlash(path)
}
dnode, err = dav.Stat(path)
if err == nil {
nd.statInfoTouch()
}
}
if err == nil {
// Sanity check.
if nd.Name != "" && dnode.IsDir != nd.IsDir {
nd.invalidateThisNode()
err = fuse.Errno(syscall.ESTALE)
} else {
// All well, build fuse.Attr.
nd.Dnode = dnode
mode := FS.fileMode
ctime, mtime := getCMtime(nd.Ctime, nd.Mtime)
atime := nd.Atime
if atime.IsZero() {
atime = mtime
}
if nd.IsDir {
mode = FS.dirMode
}
if nd.IsLink {
mode = os.ModeSymlink | 0777
}
resp.Attr = fuse.Attr{
Valid: attrValidTime,
Inode: nd.Inode,
Size: nd.Size,
Blocks: (nd.Size + 511) / 512,
Atime: atime,
Mtime: mtime,
Ctime: ctime,
Crtime: ctime,
Mode: mode,
Nlink: 1,
Uid: FS.Uid,
Gid: FS.Gid,
BlockSize: FS.blockSize,
}
}
}
nd.decIoRef()
return
}
func (nd *Node) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.LookupResponse) (rn fs.Node, err error) {
if trace(T_FUSE) {
tPrintf("%d Lookup(%s)", req.Header.ID, req.Name)
defer func() {
if err != nil {
tPrintf("%d Lookup(%s): %v", req.Header.ID, req.Name, err)
} else {
tPrintf("%d Lookup(%s): OK", req.Header.ID, req.Name)
}
}()
}
nd.incIoRef(req.Header.ID)
defer nd.decIoRef()
// do we have a recent entry available?
nd.Lock()
nn := nd.getNode(req.Name)
valid := nn != nil && nn.statInfoFresh()
nd.Unlock()
if valid {
rn = nn
return
}
// need to call stat
path := joinPath(nd.getPath(), req.Name)
dnode, err := dav.Stat(path)
if err == nil {
node := nd.addNode(dnode, true)
rn = node
}
nd.decIoRef()
return
}
func (nd *Node) ReadDirAll(ctx context.Context) (dd []fuse.Dirent, err error) {
if trace(T_FUSE) {
tPrintf("- ReaddirAll(%s)", nd.Name)
defer func() {
if err != nil {
tPrintf("- ReadDirAll(%s): %v", nd.Name, err)
} else {
tPrintf("- ReadDirAll(%s): %d entries", nd.Name, len(dd))
}
}()
}
nd.incIoRef(0)
defer nd.decIoRef()
path := nd.getPath()
dirs, err := dav.Readdir(path, true)
if err != nil {
return
}
nd.Lock()
defer nd.Unlock()
seen := map[string]bool{}
for _, d := range dirs {
ino := nd.Inode
if d.Name != "" && d.Name != "." {
nn := nd.addNode(d, false)
ino = nn.Inode
}
tp := fuse.DT_File
if (d.IsDir) {
tp =fuse.DT_Dir
}
if (d.IsLink) {
tp =fuse.DT_Link
}
dd = append(dd, fuse.Dirent{
Name: d.Name,
Inode: ino,
Type: tp,
})
seen[d.Name] = true
}
for _, x := range nd.Child {
if !seen[x.Name] {
x.invalidateThisNode()
}
}
return
}
func (nd *Node) Create(ctx context.Context, req *fuse.CreateRequest, resp *fuse.CreateResponse) (node fs.Node, handle fs.Handle, err error) {
nd.incMetaRefThenLock(req.Header.ID)
path := nd.getPath()
nd.Unlock()
trunc := flagSet(req.Flags, fuse.OpenTruncate)
read := req.Flags.IsReadWrite() || req.Flags.IsReadOnly()
write := req.Flags.IsReadWrite() || req.Flags.IsWriteOnly()
excl := flagSet(req.Flags, fuse.OpenExclusive)
if trace(T_FUSE) {
tPrintf("%d Create(%s): trunc=%v read=%v write=%v excl=%v",
req.Header.ID, req.Name, trunc, read, write, excl)
defer func() {
if err != nil {
tPrintf("%d Create(%s): %v", req.Header.ID, req.Name, err)
} else {
tPrintf("%d Create(%s): OK", req.Header.ID, req.Name)
}
}()
}
path = joinPath(path, req.Name)
created := false
if trunc {
// A simple put with no body creates and truncates the
// file if it's not there.
created, err = dav.Put(path, []byte{}, true, excl)
} else {
// A Put-Range at offset 0 with an empty body
// creates the file if not present, but doesn't
// truncate it.
created, err = dav.PutRange(path, []byte{}, 0, true, excl)
}
if err == nil && excl && !created {
err = fuse.EEXIST
}
if err == nil {
dnode, err := dav.Stat(path)
if err == nil {
n := nd.addNode(dnode, true)
node = n
handle = n
} else {
nd.invalidateNode(req.Name)
}
}
nd.Lock()
nd.decMetaRef()
nd.Unlock()
return
}
func (nd *Node) Forget() {
if trace(T_FUSE) {
tPrintf("Forget(%s)", nd.Name)
}
// XXX FIXME add some sanity checks here-
// see if refcnt == 0, subdirs are gone
nd.Lock()
nd.forgetNode()
nd.Unlock()
}
func (nd *Node) ftruncate(ctx context.Context, size uint64, id fuse.RequestID) (err error) {
nd.incMetaRefThenLock(id)
path := nd.getPath()
nd.Unlock()
if size == 0 {
if nd.Size > 0 {
_, err = dav.Put(path, []byte{}, false, false)
}
} else if size > nd.Size {
_, err = dav.PutRange(path, []byte{0}, int64(size - 1), false, false)
} else if size != nd.Size {
err = fuse.ERANGE
}
nd.Lock()
if err == nil {
nd.Size= size
}
nd.decMetaRef()
nd.Unlock()
return
}
func (nd *Node) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) (err error) {
if trace(T_FUSE) {
tPrintf("%d Setattr(%s, %s)", req.Header.ID, nd.Name, tJson(req))
defer func() {
if err != nil {
tPrintf("%d Setattr(%s): %v", req.Header.ID, nd.Name, err)
} else {
tPrintf("%d Setattr(%s): OK", req.Header.ID, nd.Name)
}
}()
}
if nd.Deleted {
err = fuse.Errno(syscall.ESTALE)
return
}
invalid := fuse.SetattrMode | fuse. SetattrUid | fuse.SetattrGid |
fuse.SetattrBkuptime | fuse.SetattrCrtime | fuse.SetattrChgtime |
fuse.SetattrFlags | fuse.SetattrHandle
v := req.Valid
if attrSet(v, invalid) {
if trace(T_FUSE) {
tPrintf("%d Setattr($s): invalid attributes (mode %d, invalid %d)",
req.Header.ID, nd.Name, v, invalid)
}
return fuse.EPERM
}
if attrSet(v, fuse.SetattrSize) {
err = nd.ftruncate(ctx, req.Size, req.Header.ID)
if err != nil {
return
}
}
nd.Lock()
defer nd.Unlock()
// fake setting mtime if it is roughly unchanged.
if attrSet(v, fuse.SetattrMtime) {
if nd.LastStat.Add(time.Second).Before(time.Now()) ||
req.Mtime.Before(nd.Mtime.Add(-500 * time.Millisecond)) ||
req.Mtime.After(nd.Mtime.Add(500 * time.Millisecond)) {
return fuse.EPERM
}
}
// atime .. we allow it, but it's not saved.
if attrSet(v, fuse.SetattrAtime) {
nd.Atime = req.Atime
}
if attrSet(v, fuse.SetattrLockOwner) {
// we ignore this for now, it's for mandatory locking
// http://www.mail-archive.com/git-commits-head@vger.kernel.org/msg27852.html
}
mode := FS.fileMode
if nd.IsDir {
mode = FS.dirMode
}
if nd.IsLink {
mode = os.ModeSymlink | 0777
}
ctime, mtime := getCMtime(nd.Ctime, nd.Mtime)
atime := nd.Atime
if atime.IsZero() {
atime = mtime
}
attr := fuse.Attr{
Valid: attrValidTime,
Inode: nd.Inode,
Size: nd.Size,
Blocks: nd.Size / 512,
Atime: atime,
Mtime: mtime,
Ctime: ctime,
Crtime: ctime,
Mode: mode,
Nlink: 1,
Uid: FS.Uid,
Gid: FS.Gid,
BlockSize: 4096,
}
resp.Attr = attr
return
}
func (nf *Node) Readlink(ctx context.Context, req *fuse.ReadlinkRequest) (string, error) {
if !nf.IsLink {
return "", fuse.Errno(syscall.EINVAL)
}
return nf.Target, nil
}
func (nf *Node) Fsync(ctx context.Context, req *fuse.FsyncRequest) (err error) {
if trace(T_FUSE) {
tPrintf("%d Fsync(%s)", req.Header.ID, nf.Name)
defer func() {
if err != nil {
tPrintf("%d Fsync(%s): %v", req.Header.ID, nf.Name, err)
}
}()
}
if nf.Deleted {
err = fuse.Errno(syscall.ESTALE)
return
}
return nil
}
func (nf *Node) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) (err error) {
if trace(T_FUSE) {
tPrintf("%d Read(%s, %d, %d)", req.Header.ID, nf.Name, req.Offset, req.Size)
defer func() {
if err != nil {
tPrintf("%d Read(%s): %v", req.Header.ID, nf.Name, err)
} else {
tPrintf("%d Read(%s): %d bytes", req.Header.ID, nf.Name, len(resp.Data))
}
}()
}
if nf.Deleted {
err = fuse.Errno(syscall.ESTALE)
return
}
nf.incIoRef(req.Header.ID)
defer nf.decIoRef()
nf.Lock()
toRead := int64(nf.Size) - req.Offset
nf.Unlock()
if toRead <= 0 {
resp.Data = []byte{}
return
}
if toRead > int64(req.Size) {
toRead = int64(req.Size)
}
path := nf.getPath()
data, err := dav.GetRange(path, req.Offset, int(toRead))
if err == nil {
resp.Data = data
}
return
}
func (nf *Node) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) (err error) {
if trace(T_FUSE) {
tPrintf("%d Write(%s, %d, %d)", req.Header.ID, nf.Name, req.Offset, len(req.Data))
defer func() {
if err != nil {
tPrintf("%d Write(%s): %v", req.Header.ID, nf.Name, err)
} else {
tPrintf("%d Write(%s): %d bytes", req.Header.ID, nf.Name, len(req.Data))
}
}()
}
if nf.Deleted {
err = fuse.Errno(syscall.ESTALE)
return
}
if len(req.Data) == 0 {
resp.Size = 0
return
}
nf.incIoRef(req.Header.ID)
path := nf.getPath()
_, err = dav.PutRange(path, req.Data, req.Offset, false, false)
if err == nil {
resp.Size = len(req.Data)
sz := uint64(req.Offset) + uint64(len(req.Data))
nf.Lock()
if sz > nf.Size {
nf.Size = sz
}
nf.Unlock()
}
nf.decIoRef()
return
}
func (nf *Node) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.OpenResponse) (handle fs.Handle, err error) {
trunc := flagSet(req.Flags, fuse.OpenTruncate)
read := req.Flags.IsReadWrite() || req.Flags.IsReadOnly()
write := req.Flags.IsReadWrite() || req.Flags.IsWriteOnly()
if trace(T_FUSE) {
tPrintf("%d Open(%s): trunc=%v read=%v write=%v", req.Header.ID, nf.Name, trunc, read, write)
defer func() {
if err != nil {
tPrintf("%d Open(%s): %v", req.Header.ID, nf.Name, err)
} else {
tPrintf("%d Open(%s): OK", req.Header.ID, nf.Name)
}
}()
}
if nf.IsDir {
handle = nf
return
}
nf.incIoRef(req.Header.ID)
path := nf.getPath()
// See if kernel cache is still valid.
dnode, err := dav.Stat(path)
if err == nil {
nf.Lock()
nf.Dnode = dnode
nf.statInfoTouch()
if dnode.Size == nf.Size && dnode.Mtime == nf.Mtime {
resp.Flags = fuse.OpenKeepCache
}
nf.Unlock()
// This is actually not called, truncating is
// done by calling Setattr with 0 size.
if trunc {
_, err = dav.Put(path, []byte{}, false, false)
if err == nil {
nf.Size = 0
}
}
}
nf.decIoRef()
if err == nil {
handle = nf
}
return
}
|
package api
import (
"context"
"strings"
"github.com/inconshreveable/log15"
"github.com/opentracing/opentracing-go/log"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/gitserver"
store "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/lsifstore"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
// NumAncestors is the number of ancestors to query from gitserver when trying to find the closest
// ancestor we have data for. Setting this value too low (relative to a repository's commit rate)
// will cause requests for an unknown commit return too few results; setting this value too high
// will raise the latency of requests for an unknown commit.
//
// TODO(efritz) - make adjustable
const NumAncestors = 100
// FindClosestDumps returns the set of dumps that can most accurately answer code intelligence
// queries for the given path. If exactPath is true, then only dumps that definitely contain the
// exact document path are returned. Otherwise, dumps containing any document for which the given
// path is a prefix are returned. These dump IDs should be subsequently passed to invocations of
// Definitions, References, and Hover.
func (api *CodeIntelAPI) FindClosestDumps(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []store.Dump, err error) {
ctx, endObservation := api.operations.findClosestDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.String("path", path),
log.Bool("exactPath", exactPath),
log.String("indexer", indexer),
}})
defer endObservation(1, observation.Args{})
candidates, err := api.inferClosestUploads(ctx, repositoryID, commit, path, exactPath, indexer)
if err != nil {
return nil, err
}
commitExistenceCache := map[string]bool{}
var dumps []store.Dump
for _, dump := range candidates {
// We've already determined the target commit doesn't exist
if exists, ok := commitExistenceCache[dump.Commit]; ok && !exists {
continue
}
// TODO(efritz) - ensure there's a valid document path
// for the other condition. This should probably look like
// an additional parameter on the following exists query.
if exactPath {
pathExists, err := api.lsifStore.Exists(ctx, dump.ID, strings.TrimPrefix(path, dump.Root))
if err != nil {
if err == lsifstore.ErrNotFound {
log15.Warn("Bundle does not exist")
return nil, nil
}
return nil, errors.Wrap(err, "lsifStore.BundleClient")
}
if !pathExists {
continue
}
}
if dump.Commit != commit {
commitExists, err := api.gitserverClient.CommitExists(ctx, dump.RepositoryID, dump.Commit)
if err != nil {
return nil, errors.Wrap(err, "gitserverClient.CommitExists")
}
// Cache result as we're likely to have multiple
// dumps per commit if there are overlapping roots.
commitExistenceCache[dump.Commit] = commitExists
if !commitExists {
continue
}
}
dumps = append(dumps, dump)
}
return dumps, nil
}
// inferClosestUploads will return the set of visible uploads for the given commit. If this commit is
// newer than our last refresh of the lsif_nearest_uploads table for this repository, then we will mark
// the repository as dirty and quickly approximate the correct set of visible uploads.
//
// Because updating the entire commit graph is a blocking, expensive, and lock-guarded process, we want
// to only do that in the background and do something chearp in latency-sensitive paths. To construct an
// approximate result, we query gitserver for a (relatively small) set of ancestors for the given commit,
// correlate that with the upload data we have for those commits, and re-run the visibility algorithm over
// the graph. This will not always produce the full set of visible commits - some responses may not contain
// all results while a subsequent request made after the lsif_nearest_uploads has been updated to include
// this commit will.
//
// TODO(efritz) - show an indication in the GraphQL response and the UI that this repo is refreshing.
func (api *CodeIntelAPI) inferClosestUploads(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) ([]store.Dump, error) {
commitExists, err := api.dbStore.HasCommit(ctx, repositoryID, commit)
if err != nil {
return nil, errors.Wrap(err, "store.HasCommit")
}
if commitExists {
// The parameters exactPath and rootMustEnclosePath align here: if we're looking for dumps
// that can answer queries for a directory (e.g. diagnostics), we want any dump that happens
// to intersect the target directory. If we're looking for dumps that can answer queries for
// a single file, then we need a dump with a root that properly encloses that file.
dumps, err := api.dbStore.FindClosestDumps(ctx, repositoryID, commit, path, exactPath, indexer)
if err != nil {
return nil, errors.Wrap(err, "store.FindClosestDumps")
}
return dumps, nil
}
repositoryExists, err := api.dbStore.HasRepository(ctx, repositoryID)
if err != nil {
return nil, errors.Wrap(err, "store.HasRepository")
}
if !repositoryExists {
// TODO(efritz) - differentiate this error in the GraphQL response/UI
return nil, nil
}
graph, err := api.gitserverClient.CommitGraph(ctx, repositoryID, gitserver.CommitGraphOptions{
Commit: commit,
Limit: NumAncestors,
})
if err != nil {
return nil, err
}
dumps, err := api.dbStore.FindClosestDumpsFromGraphFragment(ctx, repositoryID, commit, path, exactPath, indexer, graph)
if err != nil {
return nil, err
}
if err := api.dbStore.MarkRepositoryAsDirty(ctx, repositoryID); err != nil {
return nil, errors.Wrap(err, "store.MarkRepositoryAsDirty")
}
return dumps, nil
}
|
package texthash
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/ibraimgm/jolly-crane/rest"
)
var service = NewService(NewInMemRepository())
type controller struct{}
func (c *controller) SetupRoutes(router gin.IRouter) {
router.POST("/hash", func(c *gin.Context) {
input := &TextHash{}
var err error
if err = c.ShouldBindJSON(input); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
if input, err = service.Create(input); err != nil {
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
return
}
c.JSON(http.StatusOK, input)
})
router.GET("/hashes/:hash", func(c *gin.Context) {
hash := c.Param("hash")
found := service.FindByHash(hash)
if found == nil {
c.JSON(http.StatusNotFound, gin.H{"error": "Not found."})
return
}
c.JSON(http.StatusOK, found)
})
router.GET("/hashes", func(c *gin.Context) {
c.JSON(http.StatusOK, service.FindAll())
})
// Para facilitar os testes
router.GET("/demo", func(c *gin.Context) {
tokens := []string{"frodo", "sam", "merry", "pippin"}
for _, token := range tokens {
service.Create(&TextHash{Token: token})
}
c.JSON(http.StatusOK, gin.H{"message": "Done."})
})
}
// Controller retorna o controller necessário para setar as rotas
// do endpoint principal
func Controller() rest.Controller {
return &controller{}
}
|
package config
type StapelImageInterface interface {
ImageInterface
ImageBaseConfig() *StapelImageBase
IsArtifact() bool
imports() []*Import
}
|
package migrator
import (
"fmt"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/kinesis"
"github.com/aws/aws-sdk-go/service/swf"
. "github.com/sclasen/swfsm/log"
. "github.com/sclasen/swfsm/sugar"
)
// TypesMigrator is composed of a DomainMigrator, a WorkflowTypeMigrator and an ActivityTypeMigrator.
type TypesMigrator struct {
DomainMigrator *DomainMigrator
WorkflowTypeMigrator *WorkflowTypeMigrator
ActivityTypeMigrator *ActivityTypeMigrator
StreamMigrator *StreamMigrator
}
type SWFOps interface {
DeprecateActivityType(req *swf.DeprecateActivityTypeInput) (*swf.DeprecateActivityTypeOutput, error)
DeprecateDomain(req *swf.DeprecateDomainInput) (*swf.DeprecateDomainOutput, error)
DeprecateWorkflowType(req *swf.DeprecateWorkflowTypeInput) (*swf.DeprecateWorkflowTypeOutput, error)
DescribeActivityType(req *swf.DescribeActivityTypeInput) (*swf.DescribeActivityTypeOutput, error)
DescribeDomain(req *swf.DescribeDomainInput) (*swf.DescribeDomainOutput, error)
DescribeWorkflowExecution(req *swf.DescribeWorkflowExecutionInput) (*swf.DescribeWorkflowExecutionOutput, error)
DescribeWorkflowType(req *swf.DescribeWorkflowTypeInput) (*swf.DescribeWorkflowTypeOutput, error)
RegisterActivityType(req *swf.RegisterActivityTypeInput) (*swf.RegisterActivityTypeOutput, error)
RegisterDomain(req *swf.RegisterDomainInput) (*swf.RegisterDomainOutput, error)
RegisterWorkflowType(req *swf.RegisterWorkflowTypeInput) (*swf.RegisterWorkflowTypeOutput, error)
}
type KinesisOps interface {
CreateStream(req *kinesis.CreateStreamInput) (*kinesis.CreateStreamOutput, error)
DescribeStream(req *kinesis.DescribeStreamInput) (*kinesis.DescribeStreamOutput, error)
}
// Migrate runs Migrate on the underlying DomainMigrator, a WorkflowTypeMigrator and ActivityTypeMigrator.
func (t *TypesMigrator) Migrate() {
if t.ActivityTypeMigrator == nil {
t.ActivityTypeMigrator = new(ActivityTypeMigrator)
}
if t.DomainMigrator == nil {
t.DomainMigrator = new(DomainMigrator)
}
if t.WorkflowTypeMigrator == nil {
t.WorkflowTypeMigrator = new(WorkflowTypeMigrator)
}
if t.StreamMigrator == nil {
t.StreamMigrator = new(StreamMigrator)
}
t.DomainMigrator.Migrate()
ParallelMigrate(
t.WorkflowTypeMigrator.Migrate,
t.ActivityTypeMigrator.Migrate,
t.StreamMigrator.Migrate,
)
}
func ParallelMigrate(migrators ...func()) {
fail := make(chan interface{})
done := make(chan struct{}, len(migrators))
for _, m := range migrators {
migrator := m //capture ref for goroutime
go func() {
defer func() {
if r := recover(); r != nil {
fail <- r
}
}()
migrator()
done <- struct{}{}
}()
}
for range migrators {
select {
case <-done:
case e := <-fail:
Log.Panicf("migrator failed: %v", e)
}
}
}
// DomainMigrator will register or deprecate the configured domains as required.
type DomainMigrator struct {
RegisteredDomains []swf.RegisterDomainInput
DeprecatedDomains []swf.DeprecateDomainInput
Client SWFOps
}
// Migrate asserts that DeprecatedDomains are deprecated or deprecates them, then asserts that RegisteredDomains are registered or registers them.
func (d *DomainMigrator) Migrate() { //add parallel migrations to all Migrate!
for _, dd := range d.DeprecatedDomains {
if d.isDeprecated(dd.Name) {
Log.Printf("action=migrate at=deprecate-domain domain=%s status=previously-deprecated", LS(dd.Name))
} else {
d.deprecate(dd)
Log.Printf("action=migrate at=deprecate-domain domain=%s status=deprecated", LS(dd.Name))
}
}
for _, r := range d.RegisteredDomains {
if d.isRegisteredNotDeprecated(r) {
Log.Printf("action=migrate at=register-domain domain=%s status=previously-registered", LS(r.Name))
} else {
d.register(r)
Log.Printf("action=migrate at=register-domain domain=%s status=registered", LS(r.Name))
}
}
}
func (d *DomainMigrator) isRegisteredNotDeprecated(rd swf.RegisterDomainInput) bool {
desc, err := d.describe(rd.Name)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeUnknownResourceFault {
return false
}
panicWithError(err)
}
return *desc.DomainInfo.Status == swf.RegistrationStatusRegistered
}
func (d *DomainMigrator) register(rd swf.RegisterDomainInput) {
_, err := d.Client.RegisterDomain(&rd)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeDomainAlreadyExistsFault {
return
}
panicWithError(err)
}
}
func (d *DomainMigrator) isDeprecated(domain *string) bool {
desc, err := d.describe(domain)
if err != nil {
Log.Printf("action=migrate at=is-dep domain=%s error=%q", LS(domain), err.Error())
return false
}
return *desc.DomainInfo.Status == swf.RegistrationStatusDeprecated
}
func (d *DomainMigrator) deprecate(dd swf.DeprecateDomainInput) {
_, err := d.Client.DeprecateDomain(&dd)
if err != nil {
panicWithError(err)
}
}
func (d *DomainMigrator) describe(domain *string) (*swf.DescribeDomainOutput, error) {
resp, err := d.Client.DescribeDomain(&swf.DescribeDomainInput{Name: domain})
if err != nil {
return nil, err
}
return resp, nil
}
// WorkflowTypeMigrator will register or deprecate the configured workflow types as required.
type WorkflowTypeMigrator struct {
RegisteredWorkflowTypes []swf.RegisterWorkflowTypeInput
DeprecatedWorkflowTypes []swf.DeprecateWorkflowTypeInput
Client SWFOps
}
// Migrate asserts that DeprecatedWorkflowTypes are deprecated or deprecates them, then asserts that RegisteredWorkflowTypes are registered or registers them.
func (w *WorkflowTypeMigrator) Migrate() {
for _, dd := range w.DeprecatedWorkflowTypes {
if w.isDeprecated(dd.Domain, dd.WorkflowType.Name, dd.WorkflowType.Version) {
Log.Printf("action=migrate at=deprecate-workflow domain=%s workflow=%s version=%s status=previously-deprecated", LS(dd.Domain), LS(dd.WorkflowType.Name), LS(dd.WorkflowType.Version))
} else {
w.deprecate(dd)
Log.Printf("action=migrate at=deprecate-workflow domain=%s workflow=%s version=%s status=deprecate", LS(dd.Domain), LS(dd.WorkflowType.Name), LS(dd.WorkflowType.Version))
}
}
for _, r := range w.RegisteredWorkflowTypes {
if w.isRegisteredNotDeprecated(r) {
Log.Printf("action=migrate at=register-workflow domain=%s workflow=%s version=%s status=previously-registered", LS(r.Domain), LS(r.Name), LS(r.Version))
} else {
w.register(r)
Log.Printf("action=migrate at=register-workflow domain=%s workflow=%s version=%s status=registered", LS(r.Domain), LS(r.Name), LS(r.Version))
}
}
}
func (w *WorkflowTypeMigrator) isRegisteredNotDeprecated(rd swf.RegisterWorkflowTypeInput) bool {
desc, err := w.describe(rd.Domain, rd.Name, rd.Version)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeUnknownResourceFault {
return false
}
panicWithError(err)
}
return *desc.TypeInfo.Status == swf.RegistrationStatusRegistered
}
func (w *WorkflowTypeMigrator) register(rd swf.RegisterWorkflowTypeInput) {
_, err := w.Client.RegisterWorkflowType(&rd)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeAlreadyExistsFault {
return
}
panicWithError(err)
}
}
func (w *WorkflowTypeMigrator) isDeprecated(domain *string, name *string, version *string) bool {
desc, err := w.describe(domain, name, version)
if err != nil {
Log.Printf("action=migrate at=is-dep domain=%s workflow=%s version=%s error=%q", LS(domain), LS(name), LS(version), err.Error())
return false
}
return *desc.TypeInfo.Status == swf.RegistrationStatusDeprecated
}
func (w *WorkflowTypeMigrator) deprecate(dd swf.DeprecateWorkflowTypeInput) {
_, err := w.Client.DeprecateWorkflowType(&dd)
if err != nil {
panicWithError(err)
}
}
func (w *WorkflowTypeMigrator) describe(domain *string, name *string, version *string) (*swf.DescribeWorkflowTypeOutput, error) {
resp, err := w.Client.DescribeWorkflowType(&swf.DescribeWorkflowTypeInput{Domain: domain, WorkflowType: &swf.WorkflowType{Name: name, Version: version}})
if err != nil {
return nil, err
}
return resp, nil
}
// ActivityTypeMigrator will register or deprecate the configured activity types as required.
type ActivityTypeMigrator struct {
RegisteredActivityTypes []swf.RegisterActivityTypeInput
DeprecatedActivityTypes []swf.DeprecateActivityTypeInput
Client SWFOps
}
// Migrate asserts that DeprecatedActivityTypes are deprecated or deprecates them, then asserts that RegisteredActivityTypes are registered or registers them.
func (a *ActivityTypeMigrator) Migrate() {
for _, d := range a.DeprecatedActivityTypes {
if a.isDeprecated(d.Domain, d.ActivityType.Name, d.ActivityType.Version) {
Log.Printf("action=migrate at=deprecate-activity domain=%s activity=%s version=%s status=previously-deprecated", LS(d.Domain), LS(d.ActivityType.Name), LS(d.ActivityType.Version))
} else {
a.deprecate(d)
Log.Printf("action=migrate at=depreacate-activity domain=%s activity=%s version=%s status=deprecated", LS(d.Domain), LS(d.ActivityType.Name), LS(d.ActivityType.Version))
}
}
for _, r := range a.RegisteredActivityTypes {
if a.isRegisteredNotDeprecated(r) {
Log.Printf("action=migrate at=register-activity domain=%s activity=%s version=%s status=previously-registered", LS(r.Domain), LS(r.Name), LS(r.Version))
} else {
a.register(r)
Log.Printf("action=migrate at=register-activity domain=%s activity=%s version=%s status=registered", LS(r.Domain), LS(r.Name), LS(r.Version))
}
}
}
func (a *ActivityTypeMigrator) isRegisteredNotDeprecated(rd swf.RegisterActivityTypeInput) bool {
desc, err := a.describe(rd.Domain, rd.Name, rd.Version)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeUnknownResourceFault {
return false
}
panicWithError(err)
}
return *desc.TypeInfo.Status == swf.RegistrationStatusRegistered
}
func (a *ActivityTypeMigrator) register(rd swf.RegisterActivityTypeInput) {
_, err := a.Client.RegisterActivityType(&rd)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeAlreadyExistsFault {
return
}
panicWithError(err)
}
}
func (a *ActivityTypeMigrator) isDeprecated(domain *string, name *string, version *string) bool {
desc, err := a.describe(domain, name, version)
if err != nil {
Log.Printf("action=migrate at=is-dep domain=%s activity=%s version=%s error=%q", LS(domain), LS(name), LS(version), err.Error())
return false
}
return *desc.TypeInfo.Status == swf.RegistrationStatusDeprecated
}
func (a *ActivityTypeMigrator) deprecate(dd swf.DeprecateActivityTypeInput) {
_, err := a.Client.DeprecateActivityType(&dd)
if err != nil {
panicWithError(err)
}
}
func (a *ActivityTypeMigrator) describe(domain *string, name *string, version *string) (*swf.DescribeActivityTypeOutput, error) {
resp, err := a.Client.DescribeActivityType(&swf.DescribeActivityTypeInput{Domain: domain, ActivityType: &swf.ActivityType{Name: name, Version: version}})
if err != nil {
return nil, err
}
return resp, nil
}
// StreamMigrator will create any Kinesis Streams required.
type StreamMigrator struct {
Streams []kinesis.CreateStreamInput
Client KinesisOps
Timeout int
}
// Migrate checks that the desired streams have been created and if they have not, creates them.s
func (s *StreamMigrator) Migrate() {
for _, st := range s.Streams {
if s.isCreated(st) {
Log.Printf("action=migrate at=create-stream stream=%s status=previously-created", LS(st.StreamName))
} else {
s.create(st)
Log.Printf("action=migrate at=create-stream stream=%s status=created", LS(st.StreamName))
}
s.awaitActive(st.StreamName, s.Timeout)
}
}
func (s *StreamMigrator) isCreated(st kinesis.CreateStreamInput) bool {
_, err := s.describe(st)
if err != nil {
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeStreamNotFound {
return false
}
panicWithError(err)
}
return true
}
func (s *StreamMigrator) create(st kinesis.CreateStreamInput) {
_, err := s.Client.CreateStream(&st)
if ae, ok := err.(awserr.Error); ok && ae.Code() == ErrorTypeStreamAlreadyExists {
return
}
}
func (s *StreamMigrator) describe(st kinesis.CreateStreamInput) (*kinesis.DescribeStreamOutput, error) {
req := kinesis.DescribeStreamInput{
StreamName: st.StreamName,
}
resp, err := s.Client.DescribeStream(&req)
if err != nil {
return nil, err
}
return resp, nil
}
func (s *StreamMigrator) awaitActive(stream *string, atMostSeconds int) {
waited := 0
status := kinesis.StreamStatusCreating
for status != kinesis.StreamStatusActive {
desc, err := s.Client.DescribeStream(&kinesis.DescribeStreamInput{
StreamName: stream,
})
if err != nil {
Log.Printf("component=kinesis-migrator fn=awaitActive at=describe-error error=%q", err)
panicWithError(err)
}
Log.Printf("component=kinesis-migrator fn=awaitActive stream=%s at=describe status=%s", *stream, *desc.StreamDescription.StreamStatus)
status = *desc.StreamDescription.StreamStatus
time.Sleep(1 * time.Second)
waited++
if waited >= atMostSeconds {
Log.Printf("component=kinesis-migrator fn=awaitActive stream=%s at=error error=exeeeded-max-wait", *stream)
panic("waited too long")
}
}
}
func panicWithError(err error) {
if ae, ok := err.(awserr.RequestFailure); ok {
panic(fmt.Sprintf("aws error while migrating type=%q message=%q code=%d request-id=%q", ae.Code(), ae.Message(), ae.StatusCode(), ae.RequestID()))
}
panic(err)
}
|
/*
* @lc app=leetcode.cn id=445 lang=golang
*
* [445] 两数相加 II
*/
// @lc code=start
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
// package leetcode
//
//
// type ListNode struct{
// Val int
// Next *ListNode
// }
func addTwoNumbers_(l1 *ListNode, l2 *ListNode) (int, *ListNode){
if l1 != nil && l2 != nil{
num, node := addTwoNumbers_(l1.Next, l2.Next)
l1.Next = node
val := num + l1.Val + l2.Val
l1.Val = val % 10
return val / 10, l1
}
return 0, nil
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
head1 := l1
head2 := l2
len1 := 0
len2 := 0
for l1 != nil{
len1 += 1
l1 = l1.Next
}
for l2 != nil{
len2 += 1
l2 = l2.Next
}
if len1 >= len2{
diff := len1 - len2
for diff > 0{
diff -= 1
node := &ListNode{
Val: 0,
Next: head2,
}
head2 = node
}
}else {
diff := len2 - len1
for diff > 0{
diff -= 1
node := &ListNode{
Val: 0,
Next: head1,
}
head1 = node
}
}
num, head := addTwoNumbers_(head1, head2)
if num != 0{
node := &ListNode{
Val: num,
Next: head,
}
return node
}
return head
}
// @lc code=end
|
package main
type ConcurrencySafeMap struct {
m map[string][]byte
chV chan map[string][]byte
// chGet chan struct{}
// chAdd chan struct{}
}
func NewConcurrencySafeMap() *ConcurrencySafeMap {
cm := &ConcurrencySafeMap{
m: make(map[string][]byte),
chV: make(chan map[string][]byte),
// chGet: make(chan struct{}),
// chAdd: make(chan struct{}),
}
go cm.mux()
return cm
}
func (cm *ConcurrencySafeMap) mux() {
for {
select {
case <-cm.chV:
}
}
}
func (cm *ConcurrencySafeMap) Add(key string, value []byte) {
dest := cm.m
dest[key] = value
cm.chV <- dest
cm.m = dest
}
func (cm *ConcurrencySafeMap) Get(key string) ([]byte, bool) {
bm := <-cm.chV
cm.m = bm
if b, ok := bm[key]; ok {
return b, ok
}
return nil, false
}
func (cm *ConcurrencySafeMap) Remove(key string) {
if _, ok := cm.m[key]; ok {
delete(cm.m, key)
}
}
|
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package i18n
import (
"fmt"
"os"
"strings"
"path"
"github.com/leonelquinteros/gotext"
"github.com/pkg/errors"
)
func loadSystemLanguage() string {
language := os.Getenv("LANG")
if language == "" {
return defaultLanguage
}
// Posix locale name usually has the ll_CC.encoding syntax.
parts := strings.Split(language, ".")
if len(parts) == 0 {
return defaultLanguage
}
if _, ok := supportedTranslations[parts[0]]; ok {
return parts[0]
}
return defaultLanguage
}
// LoadTranslations loads translation files and sets the locale to
// the system locale. It should be called by the main program.
func LoadTranslations() (*gotext.Locale, error) {
lang := loadSystemLanguage()
SetLanguage(lang)
dir := path.Join(defaultLocalDir, lang, defaultMessageDir)
translationFiles := []string{
path.Join(dir, fmt.Sprintf("%s.mo", defaultDomain)),
path.Join(dir, fmt.Sprintf("%s.po", defaultDomain)),
}
if _, err := os.Stat(dir); os.IsNotExist(err) {
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
}
for _, file := range translationFiles {
if _, err := os.Stat(file); os.IsNotExist(err) {
data, err := Asset(file)
if err != nil {
return nil, err
}
err = os.WriteFile(file, data, 0600)
if err != nil {
return nil, err
}
}
}
locale := gotext.NewLocale(defaultLocalDir, lang)
Initialize(locale)
return locale, nil
}
// Initialize is the translation initialization function shared by the main program and package.
func Initialize(locale *gotext.Locale) error {
if locale == nil {
return errors.New("Initialize expected locale but got nil")
}
locale.AddDomain(defaultDomain)
return nil
}
// SetLanguage sets the program's current locale. If the language is not
// supported, then the default locale is used.
func SetLanguage(language string) {
if _, ok := supportedTranslations[language]; ok {
gotext.SetLanguage(language)
return
}
gotext.SetLanguage(defaultLanguage)
}
// GetLanguage queries the program's current locale.
func GetLanguage() string {
return gotext.GetLanguage()
}
// Translator is a wrapper over gotext's Locale and provides interface to
// translate text string and produce translated error
type Translator struct {
Locale *gotext.Locale
}
// T translates a text string, based on GNU's gettext library.
func (t *Translator) T(msgid string, vars ...interface{}) string {
if t.Locale == nil {
return fmt.Sprintf(msgid, vars...)
}
return t.Locale.GetD(defaultDomain, msgid, vars...)
}
// NT translates a text string into the appropriate plural form, based on GNU's gettext library.
func (t *Translator) NT(msgid, msgidPlural string, n int, vars ...interface{}) string {
if t.Locale == nil {
return fmt.Sprintf(msgidPlural, vars...)
}
return t.Locale.GetND(defaultDomain, msgid, msgidPlural, n, vars...)
}
// Errorf produces an error with a translated error string.
func (t *Translator) Errorf(msgid string, vars ...interface{}) error {
return errors.New(t.T(msgid, vars...))
}
// NErrorf produces an error with a translated error string in the appropriate plural form.
func (t *Translator) NErrorf(msgid, msgidPlural string, n int, vars ...interface{}) error {
return errors.New(t.NT(msgid, msgidPlural, n, vars...))
}
|
/*
* Copyright © 2018-2022 Software AG, Darmstadt, Germany and/or its licensors
*
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package adatypes
import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"math" //"encoding/binary"
"regexp"
"strconv"
"strings"
)
// ConstantIndicator constant indicator is replaced with constants
const ConstantIndicator = "#"
type comparator int
const (
// EQ Equals value comparisons
EQ comparator = iota
// LT Less than comparisons
LT
// LE Less equal comparisons
LE
// GT Greater than comparisons
GT
// GE Greater equal comparisons
GE
// NE Not equal comparison
NE
// NONE No comparison (Not needed)
NONE
)
func (comp comparator) String() string {
switch comp {
case EQ:
return "EQ"
case LT:
return "LT"
case LE:
return "LE"
case GT:
return "GT"
case GE:
return "GE"
case NE:
return "NE"
case NONE:
return ""
}
return "UNKNOWN"
}
type logicBound int
const (
// EMPTY Empty (not needed)
EMPTY logicBound = iota
// AND AND logic
AND
// OR Adabas OR logic
OR
// MOR Adabas OR logic with same descriptor
MOR
// RANGE Range for a value
RANGE
// NOT NOT logic
NOT
)
var logicString = []string{"EMPTY", "AND", "OR", "MOR", "RANGE", "NOT"}
func (logic logicBound) String() string {
return logicString[logic]
}
var logicAdabas = []string{"", ",D", ",R", ",O", ",S", ",N"}
func (logic logicBound) sb() string {
return logicAdabas[logic]
}
// ISearchNode interface for adding search tree or nodes into tree
type ISearchNode interface {
addNode(*SearchNode)
addValue(*SearchValue)
String() string
Platform() *Platform
}
// SearchInfo structure containing search parameters
type SearchInfo struct {
search string
constants []string
platform *Platform
Definition *Definition
NeedSearch bool
}
// SearchTree tree entry point
type SearchTree struct {
platform *Platform
node *SearchNode
value *SearchValue
uniqueDescriptors []string
}
// String provide string of search tree
func (tree *SearchTree) String() string {
if tree.node != nil {
return fmt.Sprintf("Tree by node: \n%s", tree.node.String())
}
return fmt.Sprintf("Tree by value: %s", tree.value.String())
}
// SearchBuffer returns search buffer of the search tree
func (tree *SearchTree) SearchBuffer() []byte {
if Central.IsDebugLevel() {
Central.Log.Debugf("Create search buffer ...")
}
var buffer bytes.Buffer
if tree.node != nil {
tree.node.searchBuffer(&buffer)
} else {
tree.value.searchBuffer(&buffer)
}
buffer.WriteRune('.')
return buffer.Bytes()
}
// ValueBuffer returns value buffer of the search tree
func (tree *SearchTree) ValueBuffer(buffer *bytes.Buffer) {
if tree.node != nil {
tree.node.valueBuffer(buffer)
return
}
var intBuffer []byte
helper := NewHelper(intBuffer, math.MaxInt8, endian())
helper.search = true
if Central.IsDebugLevel() {
Central.Log.Debugf("Tree value value buffer %s", tree.value.value.String())
}
tree.value.value.StoreBuffer(helper, nil)
if tree.platform.IsMainframe() && tree.value.comp == EQ {
_ = tree.value.value.StoreBuffer(helper, nil)
}
buffer.Write(helper.buffer)
//buffer.Write(tree.value.value.Bytes())
}
func (tree *SearchTree) addNode(node *SearchNode) {
tree.node = node
node.platform = tree.platform
}
func (tree *SearchTree) addValue(value *SearchValue) {
tree.value = value
value.platform = tree.platform
}
// OrderBy provide list of descriptor names for this search
func (tree *SearchTree) OrderBy() []string {
return tree.uniqueDescriptors
}
func (tree *SearchTree) evaluateDescriptors(fields map[string]bool) bool {
if Central.IsDebugLevel() {
Central.Log.Debugf("Evaluate node descriptors")
}
needSearch := false
for k, v := range fields {
if v {
tree.uniqueDescriptors = append(tree.uniqueDescriptors, k)
} else {
needSearch = true
}
}
return needSearch || (len(tree.uniqueDescriptors) != 1)
}
// Platform returns current os platform
func (tree *SearchTree) Platform() *Platform {
return tree.platform
}
// SearchFields provide list of field names for this search
func (tree *SearchTree) SearchFields() []string {
var uniqueFields []string
if tree.node != nil {
fields := tree.node.searchFields()
for _, d := range fields {
add := true
for _, ud := range uniqueFields {
if d == ud {
add = false
break
}
}
if add {
uniqueFields = append(uniqueFields, d)
}
}
} else {
descriptor := tree.value.orderBy()
if descriptor != "" {
uniqueFields = append(uniqueFields, descriptor)
}
}
return uniqueFields
}
// SearchNode node entry in the searchtree
type SearchNode struct {
platform *Platform
nodes []*SearchNode
values []*SearchValue
logic logicBound
}
func (node *SearchNode) addNode(childNode *SearchNode) {
childNode.platform = node.platform
node.nodes = append(node.nodes, childNode)
}
func (node *SearchNode) addValue(value *SearchValue) {
value.platform = node.platform
node.values = append(node.values, value)
}
func (node *SearchNode) String() string {
var buffer bytes.Buffer
if node == nil {
return "ERROR nil node"
}
buffer.WriteString(" Nodes: " + node.logic.String() + "\n")
for i, v := range node.values {
buffer.WriteString(fmt.Sprintf(" Values: %d:%s", i, node.logic.String()))
if i > 0 {
buffer.WriteString(node.logic.String())
}
buffer.WriteString(fmt.Sprintf(" -> %d. value = %s\n", i, v.String()))
}
for i, n := range node.nodes {
buffer.WriteString(fmt.Sprintf(" SubNode: %d:%s", i, node.logic.String()))
if i > 0 {
buffer.WriteString(node.logic.String())
}
buffer.WriteString(fmt.Sprintf(" \n-> %d. node = %s\n", i, n.String()))
}
buffer.WriteString(node.logic.String() + " end\n")
return buffer.String()
}
func (node *SearchNode) searchBuffer(buffer *bytes.Buffer) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Before node %s in %s", buffer.String(), node.logic.String())
}
if len(node.nodes) > 0 && (node.logic == AND || node.logic == OR) {
node.nodes[0].searchBuffer(buffer)
}
for _, v := range node.values {
if buffer.Len() > 0 {
buffer.WriteString(node.logic.sb())
}
v.searchBuffer(buffer)
}
for i, n := range node.nodes {
if i > 0 || !(node.logic == AND || node.logic == OR) {
// if buffer.Len() > 0 {
// buffer.WriteString(n.logic.sb())
// }
n.searchBuffer(buffer)
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("After node %s in %s", buffer.String(), node.logic.String())
}
}
func (node *SearchNode) valueBuffer(buffer *bytes.Buffer) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Tree Node value buffer")
Central.Log.Debugf("Values %d", len(node.values))
}
if len(node.nodes) > 0 && (node.logic == AND || node.logic == OR) {
node.nodes[0].valueBuffer(buffer)
}
for i, v := range node.values {
var intBuffer []byte
helper := NewHelper(intBuffer, math.MaxInt8, endian())
helper.search = true
if Central.IsDebugLevel() {
Central.Log.Debugf("Tree value value buffer %s", v.value.String())
}
err := v.value.StoreBuffer(helper, nil)
if Central.IsDebugLevel() {
Central.Log.Debugf("Error store buffer: %v", err)
}
if node.platform.IsMainframe() && v.comp == EQ {
err = v.value.StoreBuffer(helper, nil)
if Central.IsDebugLevel() {
Central.Log.Debugf("Error store buffer (MF): %v", err)
}
}
buffer.Write(helper.buffer)
if Central.IsDebugLevel() {
Central.Log.Debugf("%d Len buffer %d", i, buffer.Len())
}
}
for i, n := range node.nodes {
if i > 0 || !(node.logic == AND || node.logic == OR) {
n.valueBuffer(buffer)
}
}
}
func (node *SearchNode) searchFields() []string {
var fields []string
for _, n := range node.nodes {
subFields := n.searchFields()
fields = append(fields, subFields...)
}
for _, v := range node.values {
subFields := v.searchFields()
if subFields != "" {
fields = append(fields, subFields)
}
}
return fields
}
// Platform returns current os platform
func (node *SearchNode) Platform() *Platform {
return node.platform
}
// SearchValue value endpoint
type SearchValue struct {
platform *Platform
field string
adaType IAdaType
value IAdaValue
comp comparator
}
// String shows the current value of the search value
func (value *SearchValue) String() string {
if value == nil {
return "nil"
}
if value.value == nil {
return fmt.Sprintf("%s %s undefined", value.field, value.comp.String())
}
return fmt.Sprintf("%s %s %s(%d)", value.field, value.comp.String(),
value.value.String(), value.value.Type().Length())
}
// Platform returns current os platform
func (value *SearchValue) Platform() *Platform {
return value.platform
}
func (value *SearchValue) orderBy() string {
if Central.IsDebugLevel() {
Central.Log.Debugf("Order by %s", value.adaType.Name())
}
if value.value.Type().IsOption(FieldOptionDE) || value.value.Type().IsSpecialDescriptor() {
if Central.IsDebugLevel() {
Central.Log.Debugf("Found descriptor %s", value.adaType.Name())
}
return value.value.Type().Name()
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Not a descriptor %s %T", value.adaType.Name(), value.value)
}
return ""
}
func (value *SearchValue) searchFields() string {
return value.value.Type().Name()
}
func (value *SearchValue) searchBuffer(buffer *bytes.Buffer) {
if Central.IsDebugLevel() {
Central.Log.Debugf("Before value %s", buffer.String())
}
curLen := buffer.Len()
if curLen > 0 {
curLen++
}
value.value.FormatBuffer(buffer, &BufferOption{StoreCall: true})
if value.comp != NONE {
if value.platform.IsMainframe() && value.comp == EQ {
buffer.WriteString(",S," + buffer.String()[curLen:])
} else {
buffer.WriteByte(',')
buffer.WriteString(value.comp.String())
}
}
if Central.IsDebugLevel() {
Central.Log.Debugf("After value %s", buffer.String())
}
}
func checkComparator(comp string) comparator {
switch comp {
case "=", "==":
return EQ
case "!=":
return NE
case "<=":
return LE
case ">=":
return GE
case "<>":
return NE
case "<":
return LT
case ">":
return GT
}
return NONE
}
// NewSearchInfo new search info base to create search tree
func NewSearchInfo(platform *Platform, search string) *SearchInfo {
searchInfo := SearchInfo{platform: platform, NeedSearch: false}
searchString := search
searchWithConstants := searchString
debug := Central.IsDebugLevel()
if debug {
Central.Log.Debugf("Search constants: %s", searchWithConstants)
}
index := 1
startConstants := strings.IndexByte(searchWithConstants, '\'')
/*
* Extract constant values out of string. Please leave it at String
* because
* charset problems on different charsets
*/
for startConstants != -1 {
endConstants := startConstants
partStartConstants := startConstants
searchWithConstants = searchString[partStartConstants+1:]
if debug {
Central.Log.Debugf("for: %s", searchWithConstants)
}
for {
if debug {
Central.Log.Debugf("loop: %s", searchWithConstants)
}
endConstants = partStartConstants + strings.IndexByte(searchWithConstants, '\'') + 1
searchWithConstants = searchWithConstants[endConstants-startConstants:]
partStartConstants = endConstants
if debug {
Central.Log.Debugf("start: %d end:%d rest=%s", startConstants, endConstants, searchWithConstants)
Central.Log.Debugf("Check %d", endConstants-startConstants-1)
}
if searchString[endConstants-1] != '\\' {
break
}
}
if debug {
Central.Log.Debugf("after for: %s", searchWithConstants)
Central.Log.Debugf("[%d,%d]",
startConstants, endConstants)
}
searchWithConstants = searchString
if debug {
Central.Log.Debugf("Constant %s [%d,%d]", searchWithConstants[startConstants+1:endConstants],
startConstants, endConstants)
}
constant := searchString[startConstants+1 : endConstants]
constant = strings.Replace(constant, "\\\\", "", -1)
if debug {
Central.Log.Debugf("Register constant: %s", constant)
}
searchInfo.constants = append(searchInfo.constants, constant)
searchString = searchWithConstants[0:startConstants] + ConstantIndicator + "{" + strconv.Itoa(index) + "}" + searchWithConstants[endConstants+1:]
if debug {
Central.Log.Debugf("New search constants: %s", searchString)
}
index++
for {
startConstants = strings.IndexByte(searchString, '\'')
if debug {
Central.Log.Debugf("Current index: %d", startConstants)
}
if !(startConstants > 0 && searchString[startConstants-1] == '\\') {
break
}
}
}
searchInfo.search = searchString
if debug {
Central.Log.Debugf("Result search formel: %s and %#v", searchString, searchInfo)
}
// return new SearchInfo(searchWithConstants,
// constants.toArray(new String[0]))
return &searchInfo
}
// GenerateTree generate tree search information
func (searchInfo *SearchInfo) GenerateTree() (tree *SearchTree, err error) {
Central.Log.Debugf("Generate search tree: %#v", searchInfo)
tree = &SearchTree{platform: searchInfo.platform}
fields := make(map[string]bool)
err = searchInfo.extractBinding(tree, searchInfo.search, fields)
if err != nil {
return nil, err
}
searchNeeded := tree.evaluateDescriptors(fields)
if !searchInfo.NeedSearch {
searchInfo.NeedSearch = searchNeeded
}
Central.Log.Debugf("Need search call: %v", searchInfo.NeedSearch)
return
}
func (searchInfo *SearchInfo) extractBinding(parentNode ISearchNode, bind string, fields map[string]bool) (err error) {
var node *SearchNode
Central.Log.Debugf("Extract binding of: %s in parent Node: %s", bind, parentNode.String())
binds := regexp.MustCompile(" AND | and ").Split(bind, -1)
if len(binds) > 1 {
Central.Log.Debugf("Found AND binds: %d", len(binds))
node = &SearchNode{logic: AND, platform: parentNode.Platform()}
searchInfo.NeedSearch = true
} else {
Central.Log.Debugf("Check or bindings")
binds = regexp.MustCompile(" OR | or ").Split(bind, -1)
if len(binds) > 1 {
Central.Log.Debugf("Found OR binds: %d", len(binds))
node = &SearchNode{logic: OR, platform: parentNode.Platform()}
searchInfo.NeedSearch = true
}
}
if node != nil {
Central.Log.Debugf("Go through nodes")
parentNode.addNode(node)
subFields := make(map[string]bool)
for _, bind := range binds {
Central.Log.Debugf("Go through bind: %s", bind)
err = searchInfo.extractBinding(node, bind, subFields)
if err != nil {
return
}
}
if node.logic == OR && len(subFields) == 1 {
node.logic = MOR
}
for k, v := range subFields {
fields[k] = v
}
} else {
Central.Log.Debugf("Go through value bind: %s", bind)
err = searchInfo.extractComparator(bind, parentNode, fields)
if err != nil {
return
}
}
return
}
func (searchInfo *SearchInfo) extractComparator(search string, node ISearchNode, fields map[string]bool) (err error) {
Central.Log.Debugf("Extract comparator %s", search)
parameter := regexp.MustCompile("!=|=|<=|>=|<>|<|>").Split(search, -1)
field := parameter[0]
value := parameter[len(parameter)-1]
lowerLevel := &SearchValue{field: field, platform: node.Platform()}
Central.Log.Debugf("Field: %s Value: %s from %v", lowerLevel.field, value, parameter)
/* Check for range information */
if regexp.MustCompile(`^[\[\(].*:.*[\]\)]$`).MatchString(value) {
/* Found range definition, will add lower and upper limit */
Central.Log.Debugf("Range found")
rangeNode := &SearchNode{logic: RANGE, platform: node.Platform()}
/*
* Check for lower level and upper level comparator
* Mainframe don't like comparator in range
*/
var minimumRange comparator
var maximumRange comparator
if searchInfo.platform.IsMainframe() {
minimumRange = NONE
maximumRange = NONE
} else {
if value[0] == '[' {
minimumRange = GE
} else {
minimumRange = GT
}
if value[len(value)-1] == ']' {
maximumRange = LE
} else {
maximumRange = LT
}
}
lowerLevel.comp = minimumRange
/* Generate lower level value */
columnIndex := strings.IndexByte(value, ':')
startValue := value[1:columnIndex]
Central.Log.Debugf("Search range start value %s %v", startValue, minimumRange)
err = searchInfo.searchFieldValue(lowerLevel, startValue)
if err != nil {
return
}
rangeNode.addValue(lowerLevel)
fields[lowerLevel.adaType.Name()] = lowerLevel.adaType.IsSpecialDescriptor() || lowerLevel.adaType.IsOption(FieldOptionDE)
/* Generate upper level value */
upperLevel := &SearchValue{field: strings.TrimSpace(field), comp: maximumRange, platform: node.Platform()}
endValue := value[columnIndex+1 : len(value)-1]
Central.Log.Debugf("Search range end value: %s", startValue)
err = searchInfo.searchFieldValue(upperLevel, endValue)
if err != nil {
return
}
/* On mainframe add NOT operator to exclude ranges */
if searchInfo.platform.IsMainframe() {
searchInfo.NeedSearch = true
var notLowerLevel *SearchValue
if value[0] == '(' {
notLowerLevel = &SearchValue{field: strings.TrimSpace(field), comp: NONE, platform: node.Platform()}
err = searchInfo.searchFieldValue(notLowerLevel, startValue)
if err != nil {
return
}
notRangeNode := &SearchNode{logic: NOT}
notRangeNode.addValue(notLowerLevel)
rangeNode.addNode(notRangeNode)
}
if value[len(value)-1] == ')' {
notUpperLevel := &SearchValue{field: strings.TrimSpace(field), comp: NONE, platform: node.Platform()}
err = searchInfo.searchFieldValue(notUpperLevel, endValue)
if err != nil {
return
}
if notLowerLevel == nil {
notRangeNode := &SearchNode{logic: NOT, platform: node.Platform()}
notRangeNode.addValue(notUpperLevel)
rangeNode.addNode(notRangeNode)
} else {
notRangeNode := &SearchNode{logic: AND, platform: node.Platform()}
notUpperLevel.comp = NE
notRangeNode.addValue(notUpperLevel)
rangeNode.addNode(notRangeNode)
}
}
}
rangeNode.addValue(upperLevel)
node.addNode(rangeNode)
} else {
// No range, add common value with corresponding logic operator
if len(field) > (len(search) - len(value)) {
Central.Log.Debugf("FL %d sl=%d vl=%d", len(field),
len(search), len(value))
err = NewGenericError(170)
return
}
comparer := search[len(field) : len(search)-len(value)]
Central.Log.Debugf("Comparer extracted: %s", comparer)
lowerLevel.comp = checkComparator(comparer)
if Central.IsDebugLevel() {
Central.Log.Debugf("Search value: %#v", lowerLevel)
Central.Log.Debugf("Value: %s", value)
}
err = searchInfo.searchFieldValue(lowerLevel, value)
if err != nil {
return
}
if lowerLevel.comp == NE {
searchInfo.NeedSearch = true
}
fields[lowerLevel.adaType.Name()] = lowerLevel.adaType.IsSpecialDescriptor() || lowerLevel.adaType.IsOption(FieldOptionDE)
node.addValue(lowerLevel)
}
return
}
func (searchInfo *SearchInfo) searchFieldValue(searchValue *SearchValue, value string) (err error) {
Central.Log.Debugf("Search for type %s", searchValue.field)
adaType, xerr := searchInfo.Definition.SearchType(searchValue.field)
if xerr != nil {
Central.Log.Debugf("Search error: %v", err)
return xerr
}
switch t := adaType.(type) {
case *AdaType:
var xType AdaType
xType = *t
searchValue.adaType = &xType
case *AdaSuperType:
var xType AdaSuperType
xType = *t
searchValue.adaType = &xType
default:
return NewGenericError(0)
}
if Central.IsDebugLevel() {
Central.Log.Debugf("Search value type: %T (length=%d)", searchValue.adaType, searchValue.adaType.Length())
}
searchValue.value, err = searchValue.adaType.Value()
if err != nil {
return
}
subErr := searchInfo.expandConstants(searchValue, value)
if subErr != nil {
err = subErr
return
}
return
}
func (searchInfo *SearchInfo) expandConstants(searchValue *SearchValue, value string) (err error) {
debug := Central.IsDebugLevel()
if debug {
Central.Log.Debugf("Expand constants %s", value)
}
expandedValue := value
var buffer bytes.Buffer
posIndicator := 0
postIndicator := 0
if !strings.Contains(expandedValue, ConstantIndicator) {
searchValue.value.SetStringValue(value)
return
}
numPart := false
for strings.Contains(expandedValue, ConstantIndicator) {
if debug {
Central.Log.Debugf("Work on expanded value %s", expandedValue)
}
posIndicator = strings.Index(expandedValue, ConstantIndicator+"{")
//posIndicator = strings.IndexByte(expandedValue, ConstantIndicator[0])
constantString := expandedValue[posIndicator+2:]
if debug {
Central.Log.Debugf("Constant without indicator id: %s", constantString)
}
constantString = regexp.MustCompile("}.*").ReplaceAllString(constantString, "")
postIndicator = strings.IndexByte(expandedValue, '}') + 1
if debug {
Central.Log.Debugf("Constant id: %s pos=%d post=%d", constantString, posIndicator, postIndicator)
}
index, error := strconv.Atoi(constantString)
if error != nil {
err = error
return
}
if posIndicator > 0 {
if debug {
Central.Log.Debugf("Check numeric value %s", expandedValue[:posIndicator])
}
appendNumericValue(&buffer, expandedValue[:posIndicator])
numPart = true
}
expandedValue = expandedValue[postIndicator:]
buffer.WriteString(searchInfo.constants[index-1])
if debug {
Central.Log.Debugf("Expand end=%s", expandedValue)
}
}
if debug {
Central.Log.Debugf("Rest value=%s", value[postIndicator:])
}
if expandedValue != "" {
appendNumericValue(&buffer, expandedValue)
numPart = true
}
if numPart {
if debug {
Central.Log.Debugf("Numeric part available ....")
}
searchValue.value.Type().SetLength(uint32(buffer.Len()))
err = searchValue.value.SetValue(buffer.Bytes())
} else {
if debug {
Central.Log.Debugf("No Numeric part available ....%s", string(expandedValue))
}
searchValue.value.Type().SetLength(uint32(buffer.Len()))
searchValue.value.SetStringValue(buffer.String())
}
return
}
func appendNumericValue(buffer *bytes.Buffer, v string) {
Central.Log.Debugf("Append numeric offset=%d v=%s\n", buffer.Len(), v)
if v != "" {
// Work on hexadecimal value
if strings.HasPrefix(v, "0x") {
multiplier := 1
bm := strings.Index(v, "(")
if bm > 0 {
em := strings.Index(v, ")")
Central.Log.Debugf("Multiplier %v", v[bm+1:em])
var err error
multiplier, err = strconv.Atoi(v[bm+1 : em])
if err != nil {
Central.Log.Debugf("Error multiplier %v", err)
return
}
} else {
bm = len(v)
}
Central.Log.Debugf("Range end %d %v", bm, v[2:bm])
src := []byte(v[2:bm])
Central.Log.Debugf("Append numeric %s\n", v[2:bm])
dst := make([]byte, hex.DecodedLen(len(src)))
n, err := hex.Decode(dst, src)
if err != nil {
Central.Log.Fatal(err)
}
Central.Log.Debugf("Byte value %v\n", dst[:n])
for i := 0; i < multiplier; i++ {
buffer.Write(dst[:n])
}
} else {
va, err := strconv.ParseInt(v, 10, 0)
if err != nil {
Central.Log.Fatal(err)
}
if va > math.MaxUint32 {
Central.Log.Fatal("value is greate then maximum")
// TODO add error return
return
}
if va > 0 {
bs := make([]byte, 4)
binary.LittleEndian.PutUint32(bs, uint32(va))
x := len(bs)
for x > 0 {
if bs[x-1] > 0 {
break
}
x--
}
buffer.Write(bs[:x])
Central.Log.Debugf("Byte value -> offset=%d\n", buffer.Len())
} else {
buffer.WriteByte(0)
}
}
}
}
// func (searchInfo *SearchInfo) extractBinarySearchNodeValue(value string, searchTreeNode *SearchValue) int {
// valuesTrimed := strings.TrimSpace(value)
// values := strings.Split(valuesTrimed, " ")
// var binaryValues [][]byte
// for _, part := range values {
// /* Check if parser constant found */
// if strings.Contains(part, ConstantIndicator) {
// var output bytes.Buffer
// restString := part
// Central.Log.Debugf("Work on part : %s", part)
// for {
// binaryInterpretation := false
// if regexp.MustCompile("^-?H#.*").MatchString(restString) {
// Central.Log.Debugf("Binary value found")
// binaryInterpretation = true
// }
// constantString := regexp.MustCompile(`[-H]*#\{`).ReplaceAllString(restString, "")
// constantString = regexp.MustCompile("}.*").ReplaceAllString(constantString, "")
// restString = regexp.MustCompile(`#\{[0-9]*\} *`).ReplaceAllString(restString, "")
// Central.Log.Debugf("Constant string : ",
// constantString)
// Central.Log.Debugf("Rest string : ", restString)
// intTrimed := strings.TrimSpace(constantString)
// index, err := strconv.Atoi(intTrimed)
// if err != nil {
// return -1
// }
// index--
// var binaryValue []byte
// if binaryInterpretation {
// binaryValue = []byte(searchInfo.constants[index])
// // } else {
// // // binaryValue = searchTreeNode.binaryValue( searchInfo.constants[index])
// }
// output.Write(binaryValue)
// if !strings.Contains(restString, ConstantIndicator) {
// break
// }
// }
// binaryValues = append(binaryValues, output.Bytes())
// // } else {
// // Central.Log.Debugf("Set value: ", value)
// // // binaryValues.add(searchTreeNode.binaryValue(part))
// }
// }
// // if len(values) > 1 {
// // Central.Log.Debugf("Print binary list: ")
// // // searchTreeNode.SetValue(binaryValues)
// // } else {
// // // searchTreeNode.SetValue(binaryValues.get(0))
// // }
// return 0
// }
|
package sandbox
import (
"unsafe"
"golang.org/x/sys/unix"
)
// set time limit in seconds for the process,generate SIGXCPU.
func setTimelimit(pid int, timeLimit int64) error {
var rlimit unix.Rlimit
rlimit.Cur = uint64(timeLimit)
rlimit.Max = uint64(timeLimit)
return prLimit(pid, unix.RLIMIT_CPU, &rlimit)
}
// set memory limit in bytes for the process,generate SIGSEGV or ENOMEM.
func setMemLimit(pid int, memLimit int64) error {
var rlimit unix.Rlimit
rlimit.Cur = uint64(memLimit)
rlimit.Max = uint64(memLimit)
return prLimit(pid, unix.RLIMIT_AS, &rlimit)
}
// prLimit is the wrapper for the syscall prlimit.
func prLimit(pid int, limit uintptr, rlimit *unix.Rlimit) error {
_, _, errno := unix.RawSyscall6(unix.SYS_PRLIMIT64,
uintptr(pid),
limit,
uintptr(unsafe.Pointer(rlimit)),
0, 0, 0)
var err error
if errno != 0 {
err = errno
return err
} else {
return nil
}
}
|
package csvreader
import (
"bytes"
)
var BOM_UTF8 = []byte{239, 187, 191}
func bomCheck(data []byte) []byte {
if bytes.Equal(data[:3], BOM_UTF8) {
return data[3:]
}
return data
}
|
package naive
import "testing"
type doc struct {
words []string
class int
}
type check struct {
words []string
classes []int
tied bool
}
type test struct {
docs []doc
checks []check
}
var tests = []test{
{
docs: []doc{
{
words: []string{"alpha", "beta"},
class: 0,
}, {
words: []string{"beta", "gamma"},
class: 1,
}, {
words: []string{"gamma", "delta"},
class: 2,
},
},
checks: []check{
{
words: []string{"alpha"},
classes: []int{0},
tied: false,
}, {
words: []string{"alpha", "beta"},
classes: []int{0},
tied: false,
}, {
words: []string{"beta"},
classes: []int{0, 1},
tied: true,
}, {
words: []string{"beta", "gamma"},
classes: []int{1},
tied: false,
},
},
},
}
func TestNaive(t *testing.T) {
c := NewClassifier()
for i, test := range tests {
for _, doc := range test.docs {
t.Logf("Train(%v, %d)", doc.words, doc.class)
c.Train(doc.words, doc.class)
}
for j, check := range test.checks {
class, tied, scores := c.Classify(check.words)
ok := false
// ensure that check.classes contains the returned class
for _, candidate := range check.classes {
if class == candidate {
ok = true
break
}
}
switch {
case !ok:
t.Errorf("Classify(%v) = %d (class), want value in %v; test %d, check %d",
check.words, class, check.classes, i, j)
case tied != check.tied:
t.Errorf("Classify(%v) = %t (tied), want value in %t; test %d, check %d",
check.words, tied, check.tied, i, j)
}
t.Logf("Classify(%v) = %v (scores); test %d, check %d",
check.words, scores, i, j)
}
}
}
|
package main
/**
拥有最多糖果的孩子
给你一个数组 `candies` 和一个整数 `extraCandies` ,其中 `candies[i]` 代表第 `i` 个孩子拥有的糖果数目。
对每一个孩子,检查是否存在一种方案,将额外的 `extraCandies` 个糖果分配给孩子们之后,此孩子有 最多 的糖果。注意,允许有多个孩子同时拥有 最多 的糖果数目。
示例1:
```
输入:candies = [2,3,5,1,3], extraCandies = 3
输出:[true,true,true,false,true]
解释:
孩子 1 有 2 个糖果,如果他得到所有额外的糖果(3个),那么他总共有 5 个糖果,他将成为拥有最多糖果的孩子。
孩子 2 有 3 个糖果,如果他得到至少 2 个额外糖果,那么他将成为拥有最多糖果的孩子。
孩子 3 有 5 个糖果,他已经是拥有最多糖果的孩子。
孩子 4 有 1 个糖果,即使他得到所有额外的糖果,他也只有 4 个糖果,无法成为拥有糖果最多的孩子。
孩子 5 有 3 个糖果,如果他得到至少 2 个额外糖果,那么他将成为拥有最多糖果的孩子。
```
示例2:
```
输入:candies = [4,2,1,1,2], extraCandies = 1
输出:[true,false,false,false,false]
解释:只有 1 个额外糖果,所以不管额外糖果给谁,只有孩子 1 可以成为拥有糖果最多的孩子。
```
示例3:
```
输入:candies = [12,1,12], extraCandies = 10
输出:[true,false,true]
```
提示:
- `2 <= candies.length <= 100`
- `1 <= candies[i] <= 100`
- `1 <= extraCandies <= 50`
*/
func KidsWithCandies(candies []int, extraCandies int) []bool {
lenght := len(candies)
maxVal := candies[0]
for i := 1; i < lenght; i++ {
maxVal = max(maxVal, candies[i])
}
ans := make([]bool, lenght)
for i := 0; i < lenght; i++ {
if candies[i]+extraCandies >= maxVal {
ans[i] = true
} else {
ans[i] = false
}
}
return ans
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
|
package main
import (
"fmt"
"os"
"os/signal"
"syscall"
"github.com/Bumbodosan/Gummi-Flying-Machine/bot"
"github.com/joho/godotenv"
)
func main() {
b := &bot.Bot{}
godotenv.Load()
if os.Getenv("TOKEN") == "" {
fmt.Println("Missing token.")
os.Exit(-1)
}
b.Token = os.Getenv("TOKEN")
b.Prefix = os.Getenv("PREFIX")
if b.Prefix == "" {
b.Prefix = "!"
}
if err := b.Start(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
fmt.Println("Gummi Flying Machine is in the skies! 🛩️")
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM, syscall.SIGKILL)
fmt.Println(<-ch, "Exiting...")
if err := b.Stop(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
|
package gsysint
import "testing"
func TestMutex(t *testing.T) {
l := &Mutex{}
Lock(l)
Unlock(l)
}
func BenchmarkMutexUncontended(b *testing.B) {
l := &Mutex{}
for i := 0; i < b.N; i ++ {
Lock(l)
Unlock(l)
}
} |
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import "github.com/oam-dev/kubevela/pkg/oam"
const (
// KubeVelaName name of kubevela
KubeVelaName = "kubevela"
// VelaCoreName name of vela-core
VelaCoreName = "vela-core"
)
const (
// DefaultKubeVelaReleaseName defines the default name of KubeVela Release
DefaultKubeVelaReleaseName = "kubevela"
// DefaultKubeVelaChartName defines the default chart name of KubeVela, this variable MUST align to the chart name of this repo
DefaultKubeVelaChartName = "vela-core"
// DefaultKubeVelaVersion defines the default version needed for KubeVela chart
DefaultKubeVelaVersion = ">0.0.0-0"
// DefaultEnvName defines the default environment name for Apps created by KubeVela
DefaultEnvName = "default"
// DefaultAppNamespace defines the default K8s namespace for Apps created by KubeVela
DefaultAppNamespace = "default"
// AutoDetectWorkloadDefinition defines the default workload type for ComponentDefinition which doesn't specify a workload
AutoDetectWorkloadDefinition = "autodetects.core.oam.dev"
// KubeVelaControllerDeployment defines the KubeVela controller's deployment name
KubeVelaControllerDeployment = "kubevela-vela-core"
)
// DefaultKubeVelaNS defines the default KubeVela namespace in Kubernetes
var DefaultKubeVelaNS = "vela-system"
const (
// AnnoDefinitionDescription is the annotation which describe what is the capability used for in a WorkloadDefinition/TraitDefinition Object
AnnoDefinitionDescription = "definition.oam.dev/description"
// AnnoDefinitionExampleURL is the annotation which describe url of usage examples of the capability, it will be loaded in documentation generate.
AnnoDefinitionExampleURL = "definition.oam.dev/example-url"
// AnnoDefinitionAlias is the annotation for definition alias
AnnoDefinitionAlias = "definition.oam.dev/alias"
// AnnoDefinitionIcon is the annotation which describe the icon url
AnnoDefinitionIcon = "definition.oam.dev/icon"
// AnnoDefinitionAppliedWorkloads is the annotation which describe what is the workloads used for in a TraitDefinition Object
AnnoDefinitionAppliedWorkloads = "definition.oam.dev/appliedWorkloads"
// LabelDefinition is the label for definition
LabelDefinition = "definition.oam.dev"
// LabelDefinitionName is the label for definition name
LabelDefinitionName = "definition.oam.dev/name"
// LabelDefinitionDeprecated is the label which describe whether the capability is deprecated
LabelDefinitionDeprecated = "custom.definition.oam.dev/deprecated"
// LabelDefinitionHidden is the label which describe whether the capability is hidden by UI
LabelDefinitionHidden = "custom.definition.oam.dev/ui-hidden"
// LabelNodeRoleGateway gateway role of node
LabelNodeRoleGateway = "node-role.kubernetes.io/gateway"
// LabelNodeRoleWorker worker role of node
LabelNodeRoleWorker = "node-role.kubernetes.io/worker"
// AnnoIngressControllerHTTPSPort define ingress controller listen port for https
AnnoIngressControllerHTTPSPort = "ingress.controller/https-port"
// AnnoIngressControllerHTTPPort define ingress controller listen port for http
AnnoIngressControllerHTTPPort = "ingress.controller/http-port"
// AnnoIngressControllerHost define ingress controller externally host
AnnoIngressControllerHost = "ingress.controller/host"
// LabelConfigType is the label marked as the template that generated the config.
LabelConfigType = "config.oam.dev/type"
// LabelConfigCatalog is the label marked as the secret generated from the config.
LabelConfigCatalog = "config.oam.dev/catalog"
// LabelConfigSubType is the sub-type for a config type
LabelConfigSubType = "config.oam.dev/sub-type"
// LabelConfigProject is the label for config project
LabelConfigProject = "config.oam.dev/project"
// LabelConfigSyncToMultiCluster is the label to decide whether a config will be synchronized to multi-cluster
LabelConfigSyncToMultiCluster = "config.oam.dev/multi-cluster"
// LabelConfigIdentifier is the label for config identifier
LabelConfigIdentifier = "config.oam.dev/identifier"
// LabelConfigScope is the label for config scope
LabelConfigScope = "config.oam.dev/scope"
// AnnotationConfigSensitive is the annotation for the sensitization
AnnotationConfigSensitive = "config.oam.dev/sensitive"
// AnnotationConfigTemplateNamespace is the annotation for the template namespace
AnnotationConfigTemplateNamespace = "config.oam.dev/template-namespace"
// AnnotationConfigDescription is the annotation for config description
AnnotationConfigDescription = "config.oam.dev/description"
// AnnotationConfigAlias is the annotation for config alias
AnnotationConfigAlias = "config.oam.dev/alias"
// AnnotationConfigDistributionSpec is the annotation key of the application that distributes the configs
AnnotationConfigDistributionSpec = "config.oam.dev/distribution-spec"
)
const (
// StatusDeployed represents the App was deployed
StatusDeployed = "Deployed"
// StatusStaging represents the App was changed locally and it's spec is diff from the deployed one, or not deployed at all
StatusStaging = "Staging"
)
// Config contains key/value pairs
type Config map[string]string
// EnvMeta stores the namespace for app environment
type EnvMeta struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
Labels string `json:"labels"`
Current string `json:"current"`
}
const (
// TagCommandType used for tag cli category
TagCommandType = "commandType"
// TagCommandOrder defines the order
TagCommandOrder = "commandOrder"
// TypeStart defines one category
TypeStart = "Getting Started"
// TypeApp defines one category
TypeApp = "Managing Applications"
// TypeCD defines workflow Management operations
TypeCD = "Continuous Delivery"
// TypeExtension defines one category
TypeExtension = "Managing Extensions"
// TypeSystem defines one category
TypeSystem = "System Tools"
// TypeAuxiliary defines auxiliary commands
TypeAuxiliary = "Auxiliary Tools"
// TypePlatform defines platform management commands
TypePlatform = "Managing Platform"
// TypeLegacy defines legacy commands
TypeLegacy = "Legacy Commands"
)
// LabelArg is the argument `label` of a definition
const LabelArg = "label"
// DefaultFilterAnnots are annotations that won't pass to workload or trait
var DefaultFilterAnnots = []string{
oam.AnnotationInplaceUpgrade,
oam.AnnotationFilterLabelKeys,
oam.AnnotationFilterAnnotationKeys,
oam.AnnotationLastAppliedConfiguration,
}
// ConfigType is the type of config
type ConfigType string
const (
// TerraformProvider is the config type for terraform provider
TerraformProvider = "terraform-provider"
// DexConnector is the config type for dex connector
DexConnector = "dex-connector"
// ImageRegistry is the config type for image registry
ImageRegistry = "image-registry"
// HelmRepository is the config type for Helm chart repository
HelmRepository = "helm-repository"
// CatalogConfigDistribution is the catalog type
CatalogConfigDistribution = "config-distribution"
)
const (
// TerraformComponentPrefix is the prefix of component type of terraform-xxx
TerraformComponentPrefix = "terraform-"
// ProviderAppPrefix is the prefix of the application to create a Terraform Provider
ProviderAppPrefix = "config-terraform-provider"
// ProviderNamespace is the namespace of Terraform Cloud Provider
ProviderNamespace = "default"
// VelaCoreConfig is to mark application, config and its secret or Terraform provider lelong to a KubeVela config
VelaCoreConfig = "velacore-config"
)
const (
// LabelSourceOfTruth describes the source of this app
LabelSourceOfTruth = "app.oam.dev/source-of-truth"
// FromCR means the data source of truth is from k8s CR
FromCR = "from-k8s-resource"
// FromUX means the data source of truth is from velaux data store
FromUX = "from-velaux"
// FromInner means the data source of truth is from KubeVela inner usage
// the configuration that don't want to be synced
// the addon application should be synced, but set to readonly mode
FromInner = "from-inner-system"
)
|
package util
import (
"fmt"
packetconfigv1 "github.com/packethost/cluster-api-provider-packet/pkg/apis/packetprovider/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/json"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/yaml"
)
const (
machineUIDTag = "cluster-api-provider-packet:machine-uid"
clusterIDTag = "cluster-api-provider-packet:cluster-id"
MasterTag = "kubernetes.io/role:master"
WorkerTag = "kubernetes.io/role:node"
ControlPort = 6443
AnnotationUID = "cluster.k8s.io/machine-uid"
)
func MachineProviderFromProviderConfig(providerConfig clusterv1.ProviderSpec) (*packetconfigv1.PacketMachineProviderConfig, error) {
var config packetconfigv1.PacketMachineProviderConfig
if err := yaml.Unmarshal(providerConfig.Value.Raw, &config); err != nil {
return nil, err
}
return &config, nil
}
func ClusterProviderFromProviderConfig(providerConfig clusterv1.ProviderSpec) (*packetconfigv1.PacketClusterProviderSpec, error) {
var config packetconfigv1.PacketClusterProviderSpec
if err := yaml.Unmarshal(providerConfig.Value.Raw, &config); err != nil {
return nil, err
}
return &config, nil
}
func ClusterProviderConfigFromProvider(config *packetconfigv1.PacketClusterProviderSpec) (clusterv1.ProviderSpec, error) {
provider := clusterv1.ProviderSpec{}
raw, err := json.Marshal(config)
if err != nil {
return provider, err
}
provider.Value = &runtime.RawExtension{
Raw: raw,
}
return provider, nil
}
func GenerateMachineTag(ID string) string {
return fmt.Sprintf("%s:%s", machineUIDTag, ID)
}
func GenerateClusterTag(ID string) string {
return fmt.Sprintf("%s:%s", clusterIDTag, ID)
}
// ItemInList checks if one item is in the list
func ItemInList(list []string, item string) bool {
for _, elm := range list {
if elm == item {
return true
}
}
return false
}
// ItemsInList checks if all items are in the list
func ItemsInList(list []string, items []string) bool {
// convert the items against which we are mapping into a map
itemMap := map[string]bool{}
for _, elm := range items {
itemMap[elm] = false
}
// every one that is matched goes from false to true in the map
for _, elm := range list {
if _, ok := itemMap[elm]; ok {
itemMap[elm] = true
}
}
// go through the map; if any is false, return false, else all matched so return true
for _, v := range itemMap {
if !v {
return false
}
}
return true
}
|
package editor
import (
"github.com/gdamore/tcell"
"github.com/rivo/tview"
"strconv"
)
type GotoLine struct {
*tview.Box
*Editor
input string
res int
}
// NewView returns a new view view primitive.
func (e *Editor) NewGotoLine() *GotoLine {
return &GotoLine{
Box: tview.NewBox().SetBorder(false),
Editor: e,
}
}
// Draw draws this primitive onto the screen.
func (g *GotoLine) Draw(screen tcell.Screen) {
_, bg, _ := defaultStyle.Decompose()
g.Box.SetBackgroundColor(bg).SetTitle(" Go to line ").SetTitleAlign(tview.AlignLeft).SetBorder(true).Draw(screen)
offx := g.drawText(screen, "Line: ", 0, defaultStyle.Foreground(tcell.ColorLightCyan))
offx = g.drawText(screen, g.input, offx, defaultStyle.Background(tcell.ColorDarkCyan).Foreground(tcell.ColorYellow))
}
func (g *GotoLine) drawText(screen tcell.Screen, text string, offsetX int, style tcell.Style) (offset int) {
for x, r := range text {
offset = g.draw(screen, x+offsetX, r, style)
}
return offset
}
func (g *GotoLine) draw(screen tcell.Screen, x int, r rune, style tcell.Style) int {
xr, yr, _, _ := g.Box.GetInnerRect()
screen.SetContent(xr+x, yr, r, nil, style)
return x + 1
}
func (g *GotoLine) InputHandler() func(event *tcell.EventKey, setFocus func(p tview.Primitive)) {
return g.WrapInputHandler(func(key *tcell.EventKey, setFocus func(p tview.Primitive)) {
switch key.Key() {
case tcell.KeyEsc:
g.pages.HidePage("gotoLine")
g.focusView()
case tcell.KeyRune:
r := key.Rune()
_, e := strconv.Atoi(string(r))
if e != nil {
break
}
if len(g.input) < 12 {
g.input = g.input + string(r)
}
case tcell.KeyBackspace2:
if len(g.input) > 0 {
g.input = g.input[:len(g.input)-1]
}
case tcell.KeyEnter:
i, e := strconv.Atoi(g.input)
if e != nil {
g.pages.HidePage("gotoLine")
g.focusView()
return
}
g.res = i
g.pages.HidePage("gotoLine")
g.focusView()
g.view.dataView[g.view.curViewID].GoToLine(g.res - 1)
g.input = ""
g.res = 0
}
})
}
|
package disk
import (
"encoding/binary"
"math"
"os"
"strings"
)
const (
SbSig = "NEWFATFS"
BlockSize = 4096
SbSigSize = 8
SbBlockCtOffset = 0x08
SbBlockCtSize = 2
SbRootDirIndOffset = 0x0A
SbRootDirIndSize = 2
SbDataStartIndOffset = 0x0C
SbDataStartIndSize = 2
SbDataBlockCtOffset = 0x0E
SbDataBlockCtSize = 2
SbFatBlockCtOffset = 0x10
SbFatBlockCtSize = 1
SbPaddSize = 4079
SbPaddOffset = 0x11
FatEoc = 0xFFFF
FatEntrySize = 2
FatEntryUnused = 0
RootEntrySize = 32
RootEntryFilenameSize = 16
RootEntrySizeFieldSize = 4
RootEntryStartBlockSize = 2
)
type Disk struct {
fd *os.File // file descriptor for disk file
sig string // filesystem signature
blockCt int // total disk blocks
rootDirInd int // block index of the root directory
dataStartInd int // disk block index of first data block
dataBlockCt int // number of data blocks on disk
fatBlockCt int // number of blocks used to store FAT
open map[string]bool // map of all open files
}
// Makes a new disk and initializes its filesystem
// Scope: exported
func New(filename string, dataBlocks int) (Disk, error) {
d, err := createDisk(filename, dataBlocks)
if err != nil {
return d, err
}
if err = d.initFS(); err != nil {
return Disk{}, err
}
return d, nil
}
// Loads a disk file and returns the associated structure
// Scope: exported
func Mount(filename string) (Disk, error) {
if len(filename) == 0 {
return Disk{}, InvalidFilenameError{filename}
}
// Open disk file
fd, err := os.Open(filename)
if err != nil {
fd.Close()
return Disk{}, err
}
// Create struct and read data from file
d := Disk{fd: fd}
err = d.readSuperblock()
if err != nil {
fd.Close()
return Disk{}, err
}
return d, nil
}
func (d *Disk) Create(filename string) (File, error) {
// find free data block entry in fat
blockInd, err := d.initFatChain()
if err != nil {
return File{}, err
}
// add root directory entry for file
rootInd, err := d.initRootEntry(filename, blockInd)
if err != nil {
return File{}, err
}
// set file open flag true
d.open[filename] = true
return File{
name: filename,
disk: d,
desc: rootInd,
offset: 0,
size: 0,
}, nil
}
// Opens the file with given filename, if not already open.
// Returns: (File structure reference, any error that occurred)
func (d *Disk) Open(filename string) (File, error) {
if d.checkIsOpen(filename) {
return File{}, FileAlreadyInUseError{filename}
}
file := File{
name: filename,
disk: d,
desc: 0,
offset: 0,
size: 0,
}
// load root entry values into file struct
err := d.loadRootEntry(&file)
if err != nil {
return File{}, err
}
// if no errors encountered, set open flag true
d.open[filename] = true
return file, nil
}
// Instantiates a new disk and creates the associated file
// Scope: internal
func createDisk(filename string, dataBlocks int) (Disk, error) {
if len(filename) == 0 {
return Disk{}, InvalidFilenameError{filename}
}
file, err := os.Create(filename)
if err != nil {
os.Remove(filename)
return Disk{}, err
}
return Disk{
fd: file,
dataBlockCt: dataBlocks,
open: make(map[string]bool),
}, nil
}
// Initializes the filesystem
// Scope: internal
func (d *Disk) initFS() error {
numFATBlks := int(math.Ceil((FatEntrySize * float64(d.dataBlockCt)) / BlockSize))
numTotalBlks := 2 + numFATBlks + d.dataBlockCt
// initialize full disk
_, err := d.fd.Write(make([]byte, numTotalBlks*BlockSize))
if err != nil {
return err
}
// create superblock
if err := d.initSuperblock(); err != nil {
return err
}
return nil
}
// Initializes the superblock, called by initFS()
// Scope: internal
func (d *Disk) initSuperblock() error {
// (2 bytes per FAT Entry) * (Num FAT Entries) / (Num bytes per block)
numFatBlks := int(math.Ceil((FatEntrySize * float64(d.dataBlockCt)) / BlockSize))
// 1 block for superblock + 1 block for root directory + FAT + data
numBlks := 2 + numFatBlks + d.dataBlockCt
// initialize superblock byte slice and extract subslices for each section
superblock := make([]byte, BlockSize)
sig := superblock[:SbSigSize]
blockCt := superblock[SbBlockCtOffset:(SbBlockCtOffset + SbBlockCtSize)]
rootDirInd := superblock[SbRootDirIndOffset:(SbRootDirIndOffset + SbRootDirIndSize)]
dataStartInd := superblock[SbDataStartIndOffset:(SbDataStartIndOffset + SbDataStartIndSize)]
dataBlockCt := superblock[SbDataBlockCtOffset:(SbDataBlockCtOffset + SbDataBlockCtSize)]
fatBlockCt := superblock[SbFatBlockCtOffset:(SbFatBlockCtOffset + SbFatBlockCtSize)]
// calculate values and store in disk structure
d.sig = SbSig
d.blockCt = numBlks
d.rootDirInd = 1 + numFatBlks
d.dataStartInd = 2 + numFatBlks
d.fatBlockCt = numFatBlks
// write data to each subslice
copy(sig, d.sig)
binary.LittleEndian.PutUint16(blockCt, uint16(d.blockCt))
binary.LittleEndian.PutUint16(rootDirInd, uint16(d.rootDirInd))
binary.LittleEndian.PutUint16(dataStartInd, uint16(d.dataStartInd))
binary.LittleEndian.PutUint16(dataBlockCt, uint16(d.dataBlockCt))
fatBlockCt[0] = byte(d.fatBlockCt)
// write byte slice to beginning of disk file
var offset int64 = 0
_, err := d.fd.WriteAt(superblock, offset)
if err != nil {
return err
}
return nil
}
func (d *Disk) readSuperblock() error {
var offset int64 = 0
superblock := make([]byte, BlockSize)
_, err := d.fd.ReadAt(superblock, offset)
if err != nil {
return err
}
// load fields as subslices
sig := superblock[:SbSigSize]
blockCt := superblock[SbBlockCtOffset:(SbBlockCtOffset + SbBlockCtSize)]
rootDirInd := superblock[SbRootDirIndOffset:(SbRootDirIndOffset + SbRootDirIndSize)]
dataStartInd := superblock[SbDataStartIndOffset:(SbDataStartIndOffset + SbDataStartIndSize)]
dataBlockCt := superblock[SbDataBlockCtOffset:(SbDataBlockCtOffset + SbDataBlockCtSize)]
fatBlockCt := superblock[SbFatBlockCtOffset:(SbFatBlockCtOffset + SbFatBlockCtSize)]
// read data from each subslice into correspond struct member
builder := strings.Builder{}
builder.Write(sig)
d.sig = builder.String()
d.blockCt = int(binary.LittleEndian.Uint16(blockCt))
d.rootDirInd = int(binary.LittleEndian.Uint16(rootDirInd))
d.dataStartInd = int(binary.LittleEndian.Uint16(dataStartInd))
d.dataBlockCt = int(binary.LittleEndian.Uint16(dataBlockCt))
d.fatBlockCt = int(fatBlockCt[0])
return nil
}
// Locates a free fat entry and writes End-Of-Chain value to it.
// Otherwise returns a Full Disk Error
func (d *Disk) initFatChain() (int, error) {
fatBuff := make([]byte, d.fatBlockCt*BlockSize)
offset := int64(BlockSize)
d.fd.ReadAt(fatBuff, offset)
for i := 0; i < len(fatBuff); i += FatEntrySize {
fatEntry := fatBuff[i : i+FatEntrySize]
fatVal := binary.LittleEndian.Uint16(fatEntry)
// find unused fat entry (i.e. has value 0)
if fatVal == FatEntryUnused {
binary.LittleEndian.PutUint16(fatEntry, FatEoc)
d.fd.WriteAt(fatBuff, offset)
return i, nil
}
}
return 0, FullDiskError{}
}
// Writes a new root directory entry for the specified file, if space is available
// Returns: (index of entry in directory, any error encountered)
// Scope: Internal
func (d *Disk) initRootEntry(filename string, startBlock int) (int, error) {
rootBuff := make([]byte, BlockSize)
offset := int64(d.rootDirInd * BlockSize)
d.fd.ReadAt(rootBuff, offset)
for i := 0; i < len(rootBuff); i += RootEntrySize {
rootEntry := rootBuff[i : i+RootEntrySize]
name := rootEntry[:RootEntryFilenameSize]
// check if entry is empty (i.e. name is null)
if name[0] == 0 {
// set filename
copy(name, filename)
// set first data block
dtBlkOffset := RootEntryFilenameSize + RootEntrySizeFieldSize
first := rootEntry[dtBlkOffset : dtBlkOffset+RootEntryStartBlockSize]
binary.LittleEndian.PutUint16(first, uint16(startBlock))
// write back to disk
d.fd.WriteAt(rootBuff, offset)
return i, nil
}
// check if filename already exists
builder := strings.Builder{}
builder.Write(name)
if strings.Compare(builder.String(), filename) == 0 {
return 0, FileAlreadyExistsError{filename}
}
}
return 0, RootDirFullError{}
}
func (d *Disk) checkIsOpen(filename string) bool {
// check filename is in map and open flag is set to true
v, ok := d.open[filename]
return ok && v
}
func (d *Disk) loadRootEntry(file *File) error {
if file == nil {
return CustomError{"File structure nil"}
}
if len(file.name) == 0 {
return CustomError{"Filename empty"}
}
// extract root directory
rootBuff := make([]byte, BlockSize)
rootOffset := int64(d.rootDirInd*BlockSize)
d.fd.ReadAt(rootBuff, rootOffset)
// find root entry for filename and load values into struct
for i := 0; i < len(rootBuff); i += RootEntrySize {
entry := rootBuff[i : i+RootEntrySize]
nameBuilder := strings.Builder{}
nameBuilder.Write(entry[:RootEntryFilenameSize])
// remove excess null characters
name := strings.Trim(nameBuilder.String(), "\x00")
// determine if current entry file name matches query
if 0 == strings.Compare(name, file.name) {
dtBlkOffset := RootEntryFilenameSize+RootEntrySizeFieldSize
size := entry[RootEntryFilenameSize : dtBlkOffset]
file.size = int(binary.LittleEndian.Uint32(size))
dtBlk := entry[dtBlkOffset : dtBlkOffset+RootEntryStartBlockSize]
file.desc = int(binary.LittleEndian.Uint16(dtBlk))
return nil
}
}
return FileNotFoundError{file.name}
} |
package inmemory_test
import (
"github.com/Tinee/go-graphql-chat/inmemory"
)
type Client struct {
*inmemory.Client
}
func NewClient() *Client {
inner := inmemory.NewClient()
return &Client{inner}
}
func (c *Client) Reset() {
c.Client = inmemory.NewClient()
}
func (c *Client) FillWithMockData() {
c.Client.FillWithMockData("mock_data.json")
}
|
package cooker
import (
"context"
"fmt"
"sync"
"time"
"github.com/ProfessorMc/Recipe/spoilers/appliance"
"github.com/ProfessorMc/Recipe/spoilers/dish"
)
type SuperHeatOMatic struct {
hasPower bool
isOn bool
currentTemp float32
currentDish chan *dish.Dish
completedDish chan *dish.Dish
ctx context.Context
cancel context.CancelFunc
}
func NewSuperHeatOMatic(capacity int) *SuperHeatOMatic {
ctx, cancelFunc := context.WithCancel(context.Background())
newHeatOMatic := &SuperHeatOMatic{
ctx: ctx,
cancel: cancelFunc,
currentDish: make(chan *dish.Dish, capacity),
completedDish: make(chan *dish.Dish, capacity),
}
newHeatOMatic.startDishHandlers(capacity)
return newHeatOMatic
}
func (h *SuperHeatOMatic) SetPower(power bool) {
h.hasPower = power
}
func (h *SuperHeatOMatic) TurnOn() error {
if !h.hasPower {
return appliance.BuildApplianceError("Not Plugged In", h)
}
h.isOn = true
return nil
}
func (h *SuperHeatOMatic) TurnOff() error {
h.isOn = false
h.cancel()
return nil
}
func (h SuperHeatOMatic) IsOn() bool {
return h.isOn
}
func (SuperHeatOMatic) GetName() string {
return "Super - Heat-O-Matic"
}
func (SuperHeatOMatic) GetBrand() string {
return "Brandly"
}
func (h *SuperHeatOMatic) CookDish(d *dish.Dish) error {
var wg sync.WaitGroup
var err error
go func() {
err = h.CookDishAsync(d, &wg)
}()
wg.Wait()
return err
}
func (h *SuperHeatOMatic) CookDishAsync(d *dish.Dish, wg *sync.WaitGroup) error {
defer wg.Done()
if !h.isOn {
return appliance.BuildApplianceError("appliance isn't on", h)
}
// Push dish to current dish channel
h.currentDish <- d
// Wait for completed dish to finish
<-h.completedDish
return nil
}
func (h *SuperHeatOMatic) preheatOven(temp float32) {
fmt.Printf("Preheating %s to %v degrees\n", h.GetName(), temp)
<-time.After(5 * time.Second)
}
func (h *SuperHeatOMatic) startDishHandlers(capacity int) {
fmt.Printf("[Appliance %s] Starting Dish Handlers.\n", h.GetName())
for i := 0; i < capacity; i++ {
started := make(chan struct{})
go h.dishHandler(i, started)
<-started
}
}
func (h *SuperHeatOMatic) dishHandler(handlerNumber int, started chan struct{}) {
fmt.Printf("[Appliance %s] Starting Handler %d.\n", h.GetName(), handlerNumber)
close(started)
for {
select {
case newDish := <-h.currentDish:
fmt.Printf("[Appliance %s Handler %d] Handling Dish: %s\n", h.GetName(), handlerNumber, newDish.String())
h.preheatOven(newDish.GetCookTemp())
fmt.Printf("[Appliance %s Handler %d] Cooking Dish: %s\n", h.GetName(), handlerNumber, newDish.String())
<-time.After(newDish.GetCookTime())
newDish.SetTemperature(newDish.GetCookTemp())
fmt.Printf("[Appliance %s Handler %d] Dish Complete: %s\n", h.GetName(), handlerNumber, newDish.String())
h.completedDish <- newDish
case <-h.ctx.Done():
fmt.Printf("[Appliance %s Handler %d] Shutting Down Handler.\n", h.GetName(), handlerNumber)
}
}
}
|
package externalservices
import (
"encoding/xml"
"errors"
"fmt"
"hash/fnv"
"io/ioutil"
"net/http"
"poliskarta/api/structs"
)
func CallPoliceRSSGetAll(area structs.Area, numEvents int) (structs.PoliceEvents, error) {
httpResponse, httpErr := http.Get(area.RssURL)
//If we get http-error when calling the police-RSS
if httpErr != nil {
fmt.Println(httpErr.Error())
return structs.PoliceEvents{}, errors.New("Communication error with polisen.se")
}
defer httpResponse.Body.Close()
xmlResponse, ioErr := ioutil.ReadAll(httpResponse.Body)
//If we get error while reading the response body
if ioErr != nil {
return structs.PoliceEvents{}, ioErr
}
policeEvents := policeXMLtoStructs(xmlResponse)
//If the rss-URL is faulty, we will get a 200 OK response, and the only way to know if it IS faulty is
//that the policeEvents-struct is empty
if len(policeEvents.Events) < 1 {
return structs.PoliceEvents{}, errors.New("Communication error with polisen.se (might be a faulty rss-URL)")
}
limitNumOfPoliceEvents(&policeEvents, numEvents)
addAreaInfoToResponse(&policeEvents, area)
addEventLinks(&policeEvents, area)
var err error
return policeEvents, err
}
// CallPoliceRSSGetSingle Returns a PoliceEvents instead of PoliceEvent because we want to be able to reuse filter functions
// which only accepts PoliceEvents
func CallPoliceRSSGetSingle(area structs.Area, eventID uint32) (structs.PoliceEvents, error) {
httpResponse, httpErr := http.Get(area.RssURL)
if httpErr != nil {
fmt.Println(httpErr.Error())
return structs.PoliceEvents{}, errors.New("Communication error with polisen.se")
}
defer httpResponse.Body.Close()
xmlResponse, ioErr := ioutil.ReadAll(httpResponse.Body)
if ioErr != nil {
return structs.PoliceEvents{}, ioErr
}
//Get police events
policeEvents := policeXMLtoStructs(xmlResponse)
//If the rss-URL is faulty, we will get a 200 OK response, and the only way to know if it IS faulty is
//that the policeEvents-struct is empty
if len(policeEvents.Events) < 1 {
return structs.PoliceEvents{}, errors.New("Communication error with polisen.se (might be a faulty rss-URL)")
}
//Check if eventID is found among the events
eventsSingle, idNotFoundErr := findEvent(eventID, policeEvents)
//Add area-value to event
addAreaToEvents(area, &eventsSingle)
//Has to be added because mainfilter checks if this info is for "stockholm", and acts accordingly
addAreaInfoToResponse(&eventsSingle, area)
addEventLinks(&eventsSingle, area)
return eventsSingle, idNotFoundErr
}
func policeXMLtoStructs(policeRSSxml []byte) structs.PoliceEvents {
var policeEvents structs.PoliceEvents
xml.Unmarshal(policeRSSxml, &policeEvents)
addHashAsID(&policeEvents)
return policeEvents
}
func limitNumOfPoliceEvents(policeEvents *structs.PoliceEvents, numEvents int) {
copyEvents := *policeEvents
//Limit maximum num of events to 50
if numEvents > 50 {
numEvents = 500
}
//Limit number of events to requested amount
if numEvents < len(policeEvents.Events) {
copyEvents.Events = copyEvents.Events[:numEvents]
}
*policeEvents = copyEvents
}
func addHashAsID(policeEvents *structs.PoliceEvents) {
eventsCopy := *policeEvents
for index, _ := range eventsCopy.Events {
hash := fnv.New32()
hash.Write([]byte(eventsCopy.Events[index].PoliceEventURL))
eventsCopy.Events[index].ID = hash.Sum32()
}
*policeEvents = eventsCopy
}
func addEventLinks(policeEvents *structs.PoliceEvents, area structs.Area) {
for index, event := range policeEvents.Events {
selfLink := structs.Link{"self", fmt.Sprintf(structs.APIURL+"areas/%v/%d", area.Value, event.ID)}
originLink := structs.Link{"origin", event.PoliceEventURL}
policeEvents.Events[index].Links = append(policeEvents.Events[index].Links, selfLink, originLink)
}
}
func findEvent(eventID uint32, policeEvents structs.PoliceEvents) (structs.PoliceEvents, error) {
var err error
for _, event := range policeEvents.Events {
if eventID == event.ID {
events := structs.PoliceEvents{}
events.Events = append(events.Events, event)
return events, err
}
}
return structs.PoliceEvents{}, &structs.IdNotFoundError{fmt.Sprintf("%d didn't match any events", eventID)}
}
func addAreaToEvents(area structs.Area, policeEvents *structs.PoliceEvents) {
for i, _ := range policeEvents.Events {
policeEvents.Events[i].Area = &structs.PoliceEventArea{area.Name, area.Value, area.Links}
}
}
func addAreaInfoToResponse(policeEvents *structs.PoliceEvents, area structs.Area) {
policeEvents.Name = area.Name
policeEvents.Value = area.Value
policeEvents.Latitude = area.Latitude
policeEvents.Longitude = area.Longitude
policeEvents.GoogleZoomLevel = area.GoogleZoomLevel
policeEvents.Links = area.Links
}
|
package swan
import (
"errors"
"github.com/dimuls/swan/classifier"
"github.com/dimuls/swan/postgres"
"github.com/dimuls/swan/web"
)
type Service struct {
webServer *web.Server
}
func NewService(
postgresStorageURI string,
classifierAPIURI string,
webServerBindAddr string,
webServerDebug bool,
) (*Service, error) {
s, err := postgres.NewStorage(postgresStorageURI)
if err != nil {
return nil, errors.New("failed to create postgres storage: " +
err.Error())
}
err = s.Migrate()
if err != nil {
return nil, errors.New("failed to migrate postgres storage: " +
err.Error())
}
c := classifier.NewClient(classifierAPIURI)
// TODO: implement sms and email senders
ds := dummySender{}
ws := web.NewServer(webServerBindAddr, s, ds, ds, c, webServerDebug)
return &Service{
webServer: ws,
}, nil
}
func (s *Service) Start() error {
return s.webServer.Start()
}
func (s *Service) Stop() {
s.webServer.Stop()
}
type dummySender struct{}
func (dummySender) SendSMS(phone string, text string) error {
return nil
}
func (dummySender) SendEmail(email string, text string) error {
return nil
}
|
package testutil
import (
"github.com/nsqio/go-nsq"
"time"
)
// NSQTestDelegate is a struct used in unit tests to capture
// NSQ messages and actions. The interface we're mocking is
// the MessageDelegate interface defined here:
// https://github.com/nsqio/go-nsq/blob/master/delegates.go#L35
type NSQTestDelegate struct {
Message *nsq.Message
Delay time.Duration
Backoff bool
Operation string
}
// NewNSQTestDelegate returns a pointer to a new NSQTestDelegate.
func NewNSQTestDelegate() *NSQTestDelegate {
return &NSQTestDelegate{}
}
// OnFinish receives the Finish() call from an NSQ message.
func (delegate *NSQTestDelegate) OnFinish(message *nsq.Message) {
delegate.Message = message
delegate.Operation = "finish"
}
// OnRequeue receives the Requeue() call from an NSQ message.
func (delegate *NSQTestDelegate) OnRequeue(message *nsq.Message, delay time.Duration, backoff bool) {
delegate.Message = message
delegate.Delay = delay
delegate.Backoff = backoff
delegate.Operation = "requeue"
}
// OnTouch receives the Touch() call from an NSQ message.
func (delegate *NSQTestDelegate) OnTouch(message *nsq.Message) {
delegate.Message = message
delegate.Operation = "touch"
}
|
package cloud
import (
"context"
"net/http"
"strings"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/client"
"github.com/devspace-cloud/devspace/pkg/util/log"
"github.com/devspace-cloud/devspace/pkg/util/survey"
"github.com/pkg/errors"
)
// LoginEndpoint is the cloud endpoint that will log you in
const LoginEndpoint = "/login?cli=true"
// LoginSuccessEndpoint is the url redirected to after successful login
const LoginSuccessEndpoint = "/login-success"
// TokenEndpoint is the endpoint where to get a token from
const TokenEndpoint = "/auth/token"
// Login logs the user into DevSpace Cloud
func (p *provider) Login() error {
var (
url = p.Host + LoginEndpoint
ctx = context.Background()
keyChannel = make(chan string)
)
var key string
server := startServer(p.Host+LoginSuccessEndpoint, keyChannel, p.log)
err := p.browser.Run(url)
if err != nil {
p.log.Infof("Unable to open web browser for login page.\n\n Please follow these instructions for manually loggin in:\n\n 1. Open this URL in a browser: %s\n 2. After logging in, click the 'Create Key' button\n 3. Enter a key name (e.g. my-key) and click 'Create Access Key'\n 4. Copy the generated key from the input field", p.Host+"/settings/access-keys")
key, err = p.log.Question(&survey.QuestionOptions{
Question: "5. Enter the access key here:",
IsPassword: true,
})
if err != nil {
close(keyChannel)
server.Shutdown(ctx)
return err
}
key = strings.TrimSpace(key)
p.log.WriteString("\n")
// Check if we got access
p.Key = key
if p.client == nil {
p.client = client.NewClient(p.Name, p.Host, key, "", p.loader)
}
_, err := p.client.GetSpaces()
if err != nil {
server.Shutdown(ctx)
close(keyChannel)
return errors.Wrap(err, "login")
}
} else {
p.log.Infof("If the browser does not open automatically, please navigate to %s", url)
p.log.StartWait("Logging into cloud provider...")
defer p.log.StopWait()
key = <-keyChannel
}
err = server.Shutdown(ctx)
if err != nil {
return err
}
close(keyChannel)
p.Key = key
p.client = client.NewClient(p.Name, p.Host, key, p.Token, p.loader)
return nil
}
func startServer(redirectURI string, keyChannel chan string, log log.Logger) *http.Server {
srv := &http.Server{Addr: ":25853"}
http.HandleFunc("/key", func(w http.ResponseWriter, r *http.Request) {
keys, ok := r.URL.Query()["key"]
if !ok || len(keys[0]) == 0 {
log.Warn("Login: the key used to login is not valid")
return
}
keyChannel <- keys[0]
http.Redirect(w, r, redirectURI, http.StatusSeeOther)
})
go func() {
if err := srv.ListenAndServe(); err != nil {
// cannot panic, because this probably is an intentional close
}
}()
// returning reference so caller can call Shutdown()
return srv
}
|
// Package sms provides an XMPP component (XEP-0114) which acts as a
// gateway or proxy between XMPP and SMS. It allows you to send and
// receive SMS messages as if they were XMPP messages. This lets you
// interact with the SMS network using your favorite XMPP client.
//
// Many users will be satisfied to run the sms-over-xmpp command with
// an appropriate configuration file. This Go package is intended for
// those who want greater control over their SMS gateway or who wish
// to incorporate the XMPP component into existing Go code.
package sms
|
package decoder
import (
"encoding/json"
"errors"
"time"
"github.com/Tanibox/tania-core/src/assets/domain"
"github.com/mitchellh/mapstructure"
)
type MaterialEventWrapper EventWrapper
func (w *MaterialEventWrapper) UnmarshalJSON(b []byte) error {
wrapper := EventWrapper{}
err := json.Unmarshal(b, &wrapper)
if err != nil {
return err
}
mapped, ok := wrapper.EventData.(map[string]interface{})
if !ok {
return errors.New("error type assertion")
}
f := mapstructure.ComposeDecodeHookFunc(
UIDHook(),
TimeHook(time.RFC3339),
MaterialTypeHook(),
)
switch wrapper.EventName {
case "MaterialCreated":
e := domain.MaterialCreated{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialNameChanged":
e := domain.MaterialNameChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialPriceChanged":
e := domain.MaterialPriceChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialQuantityChanged":
e := domain.MaterialQuantityChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialTypeChanged":
e := domain.MaterialTypeChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialExpirationDateChanged":
e := domain.MaterialExpirationDateChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialNotesChanged":
e := domain.MaterialNotesChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
case "MaterialProducedByChanged":
e := domain.MaterialProducedByChanged{}
_, err := Decode(f, &mapped, &e)
if err != nil {
return err
}
w.EventData = e
}
return nil
}
|
package main
import "github.com/TomWeek/hellogo/service"
func main() {
service.PrintHello()
}
|
package util
import (
"io/ioutil"
"os"
)
type Dir struct {
FilePath string
}
//获取目录下文件列表
func (d *Dir) GetFileList() []os.FileInfo {
fileList, _ := ioutil.ReadDir(d.FilePath)
return fileList
}
//判断是否是目录
func (d *Dir) IsDir() bool {
dir, err := os.Stat(d.FilePath)
if err != nil {
return false
}
return dir.IsDir()
}
//创建目录
func (d *Dir) CreateDir() bool {
err := os.MkdirAll(d.FilePath, 0777)
if err != nil {
return false
}
return true
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"bytes"
"encoding/binary"
"encoding/gob"
"errors"
"flag"
"github.com/boltdb/bolt"
"log"
"time"
)
var (
bucketDoesNotExistError = errors.New("bucket does not exist")
keyDoesNotExist = errors.New("key does not exist")
bucketName = []byte("bucket")
keyName = []byte("key")
)
var db *bolt.DB
// TODO(evg): review it
type weatherInfo struct {
ID uint64
TimeStamp int64 // Unix TimeStamp
Temp float64
Humidity float64
Pressure float64
WindSpeed float64
WindDirection string
Rainfall float64
}
// TODO(evg): remove it?
func (w *weatherInfo) String() string {
return fmt.Sprintf("Temp: %v", w.Temp)
}
func (w *weatherInfo) Serialize() ([]byte, error) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(w); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (w *weatherInfo) Deserialize(data []byte) error {
buf := bytes.NewBuffer(data)
return gob.NewDecoder(buf).Decode(w)
}
func dataHandler(resp http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
// var jsonText []byte
dataSlice := make([][]byte, 0)
err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
if b == nil {
return bucketDoesNotExistError
}
return b.ForEach(func(key, value []byte) error {
data := make([]byte, len(value))
copy(data, value)
dataSlice = append(dataSlice, data)
return nil
})
})
if err != nil {
fmt.Printf("db's view error: %v\n", err)
return
}
weatherInfoSlice := make([]*weatherInfo, len(dataSlice))
for i, data := range dataSlice {
weatherInfo := &weatherInfo{}
if err := weatherInfo.Deserialize(data); err != nil {
fmt.Println(err)
return
}
weatherInfoSlice[i] = weatherInfo
}
jsonText, err := json.Marshal(weatherInfoSlice)
if err != nil {
fmt.Println(err)
return
}
resp.Header().Set("Access-Control-Allow-Origin", "*")
if _, err := resp.Write(jsonText); err != nil {
fmt.Println(err)
}
case "POST":
data, err := ioutil.ReadAll(req.Body)
if err != nil {
fmt.Printf("can't read data: %v\n", err)
return
}
weatherInfo := weatherInfo{}
if err := json.Unmarshal(data, &weatherInfo); err != nil {
fmt.Printf("can't parse data: %v\n", err)
return
}
err = db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucketIfNotExists(bucketName)
if err != nil {
return err
}
id, _ := b.NextSequence()
weatherInfo.ID = id
weatherInfo.TimeStamp = time.Now().UnixNano()
data, err := weatherInfo.Serialize()
if err != nil {
return err
}
return b.Put(itob(id), data)
})
if err != nil {
fmt.Printf("can't write data: %v\n", err)
return
}
}
}
func dataLastHandler(resp http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
var data []byte
err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucketName)
if b == nil {
return bucketDoesNotExistError
}
_, value := b.Cursor().Last()
if value == nil {
return keyDoesNotExist
}
data = make([]byte, len(value))
copy(data, value)
return nil
})
if err != nil {
fmt.Printf("db's view error: %v\n", err)
return
}
weatherInfo := weatherInfo{}
if err := weatherInfo.Deserialize(data); err != nil {
fmt.Println(err)
return
}
jsonText, err := json.Marshal(weatherInfo)
if err != nil {
fmt.Println(err)
return
}
resp.Header().Set("Access-Control-Allow-Origin", "*")
if _, err := resp.Write(jsonText); err != nil {
fmt.Println(err)
}
}
}
// itob returns an 8-byte big endian representation of v.
func itob(v uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
func main() {
listenAddr := flag.String("listen", "0.0.0.0:9000", "address of http server, format: host:port")
daPath := flag.String("dbpath", "data.db", "absolute path to database, example: /tmp/my.db")
flag.Parse()
var err error
db, err = bolt.Open(*daPath, 0600, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
http.HandleFunc("/data", dataHandler)
http.HandleFunc("/data/last", dataLastHandler)
http.ListenAndServe(*listenAddr, nil)
}
|
// Copyright 2014 William H. St. Clair
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package schema
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"io"
)
type RandomProvider interface {
Get(int64) float64
}
type TransformF func([]string) []string
type Transform struct {
Name string
Description string
Instance func(map[string]interface{}) (TransformF, error)
}
type TransformI struct {
Name string `json:"function"`
Arguments map[string]interface{} `json:"arguments,omitempty"`
Fn TransformF `json:"-"`
}
func (ti *TransformI) hydrate() (err error) {
for _, xf := range Transforms {
if ti.Name == xf.Name {
ti.Fn, err = xf.Instance(ti.Arguments)
return
}
}
err = fmt.Errorf("function not found: %s", ti.Name)
return
}
type Field struct {
Comment string `json:"comment"`
Attrs []string `json:"attrs"`
Transforms []*TransformI `json:"transforms"`
Classifier Classifier `json:"-"`
}
func (d *Field) hydrate() (err error) {
if d.Classifier == nil {
d.Classifier = NewTfIdfClassifier()
}
for _, t := range d.Transforms {
err = t.hydrate()
}
return
}
func (d *Field) Load(data []byte) (err error) {
err = json.Unmarshal(data, &d)
if err != nil {
return
}
d.hydrate()
return
}
func (d *Field) xform(term string) (out []string) {
out = []string{term}
for _, t := range d.Transforms {
out = t.Fn(out)
}
return
}
func (d *Field) pick(record map[string]string) []string {
var buf bytes.Buffer
for _, attr := range d.Attrs {
buf.WriteString(record[attr])
}
return d.xform(buf.String())
}
func (d *Field) Learn(record map[string]string) {
for _, t := range d.pick(record) {
d.Classifier.Learn(t)
}
}
func (d *Field) Signature(record map[string]string, n int, r RandomProvider, offset int64) (s []float64, err error) {
sig := make([]float64, n)
for _, t := range d.pick(record) {
s, err = d.Classifier.Signature(t, n, r, offset)
if err != nil {
return sig, err
}
for i, v := range s {
sig[i] += v
}
}
return sig, nil
}
type Schema struct {
HashCount int `json:"hash_count"`
Width int `json:"chunk_size"`
Fields []*Field `json:"fields"`
}
func (s *Schema) SignatureLen() int {
return s.HashCount / s.Width
}
func (s *Schema) ChunkBits() int {
return s.Width
}
func (s *Schema) LoadJSON(data []byte) (err error) {
err = json.Unmarshal(data, &s)
if err != nil {
return
}
s.hydrate()
return
}
func (s *Schema) Hyd() {
s.hydrate()
}
func (s *Schema) hydrate() {
for _, d := range s.Fields {
d.hydrate()
}
}
func (s *Schema) Learn(c chan map[string]string) {
for record := range c {
for _, d := range s.Fields {
d.Learn(record)
}
}
}
func (s *Schema) LearnRecords(c chan *Record) {
for record := range c {
for _, d := range s.Fields {
d.Learn(record.Attrs)
}
}
}
func (s *Schema) Sign(record map[string]string, r RandomProvider) ([]uint32, error) {
var raw [][]float64
var signatures []uint32
o := int64(0)
for _, d := range s.Fields {
sig, err := d.Signature(record, s.HashCount, r, o)
if err != nil {
return nil, err
}
raw = append(raw, sig)
o += int64(d.Classifier.Dimension() * s.HashCount)
}
chunks := s.HashCount / s.Width
for i := 0; i < chunks; i++ {
var chunk uint32
for j := 0; j < s.Width; j++ {
sum := 0.0
for _, v := range raw {
sum += v[(i*s.Width)+j]
}
if sum >= 0.0 {
chunk |= (1 << uint(j))
}
}
signatures = append(signatures, chunk)
}
return signatures, nil
}
func (s *Schema) Save(w io.Writer) (err error) {
enc := gob.NewEncoder(w)
err = enc.Encode(s)
return
}
func (s *Schema) Load(r io.Reader) (err error) {
dec := gob.NewDecoder(r)
err = dec.Decode(s)
s.hydrate()
return
}
|
package main
import "fmt"
func main() {
var x [10]int
fmt.Println("length: ", len(x))
fmt.Println(x)
for i := 0; i < 10; i++ {
x[i] = i
}
for _, v := range x {
fmt.Printf("%v - %T - %b \n", v, v, v)
}
}
// length: 10
// [0 0 0 0 0 0 0 0 0 0]
// 0 - int - 0
// 1 - int - 1
// 2 - int - 10
// 3 - int - 11
// 4 - int - 100
// 5 - int - 101
// 6 - int - 110
// 7 - int - 111
// 8 - int - 1000
// 9 - int - 1001
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package application
import (
"context"
"sync"
"github.com/kubevela/workflow/pkg/cue/packages"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"
monitorContext "github.com/kubevela/pkg/monitor/context"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
terraformtypes "github.com/oam-dev/terraform-controller/api/types"
terraforv1beta1 "github.com/oam-dev/terraform-controller/api/v1beta1"
terraforv1beta2 "github.com/oam-dev/terraform-controller/api/v1beta2"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/apis/types"
"github.com/oam-dev/kubevela/pkg/appfile"
"github.com/oam-dev/kubevela/pkg/monitor/metrics"
"github.com/oam-dev/kubevela/pkg/multicluster"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/oam/util"
"github.com/oam-dev/kubevela/pkg/resourcekeeper"
)
// AppHandler handles application reconcile
type AppHandler struct {
client.Client
pd *packages.PackageDiscover
app *v1beta1.Application
currentAppRev *v1beta1.ApplicationRevision
latestAppRev *v1beta1.ApplicationRevision
resourceKeeper resourcekeeper.ResourceKeeper
isNewRevision bool
currentRevHash string
services []common.ApplicationComponentStatus
appliedResources []common.ClusterObjectReference
deletedResources []common.ClusterObjectReference
mu sync.Mutex
}
// NewAppHandler create new app handler
func NewAppHandler(ctx context.Context, r *Reconciler, app *v1beta1.Application) (*AppHandler, error) {
if ctx, ok := ctx.(monitorContext.Context); ok {
subCtx := ctx.Fork("create-app-handler", monitorContext.DurationMetric(func(v float64) {
metrics.AppReconcileStageDurationHistogram.WithLabelValues("create-app-handler").Observe(v)
}))
defer subCtx.Commit("finish create appHandler")
}
resourceHandler, err := resourcekeeper.NewResourceKeeper(ctx, r.Client, app)
if err != nil {
return nil, errors.Wrapf(err, "failed to create resourceKeeper")
}
return &AppHandler{
Client: r.Client,
pd: r.pd,
app: app,
resourceKeeper: resourceHandler,
}, nil
}
// Dispatch apply manifests into k8s.
func (h *AppHandler) Dispatch(ctx context.Context, cluster string, owner string, manifests ...*unstructured.Unstructured) error {
manifests = multicluster.ResourcesWithClusterName(cluster, manifests...)
if err := h.resourceKeeper.Dispatch(ctx, manifests, nil); err != nil {
return err
}
for _, mf := range manifests {
if mf == nil {
continue
}
if oam.GetCluster(mf) != "" {
cluster = oam.GetCluster(mf)
}
ref := common.ClusterObjectReference{
Cluster: cluster,
Creator: owner,
ObjectReference: corev1.ObjectReference{
Name: mf.GetName(),
Namespace: mf.GetNamespace(),
Kind: mf.GetKind(),
APIVersion: mf.GetAPIVersion(),
},
}
h.addAppliedResource(false, ref)
}
return nil
}
// Delete delete manifests from k8s.
func (h *AppHandler) Delete(ctx context.Context, cluster string, owner string, manifest *unstructured.Unstructured) error {
manifests := multicluster.ResourcesWithClusterName(cluster, manifest)
if err := h.resourceKeeper.Delete(ctx, manifests); err != nil {
return err
}
ref := common.ClusterObjectReference{
Cluster: cluster,
Creator: owner,
ObjectReference: corev1.ObjectReference{
Name: manifest.GetName(),
Namespace: manifest.GetNamespace(),
Kind: manifest.GetKind(),
APIVersion: manifest.GetAPIVersion(),
},
}
h.deleteAppliedResource(ref)
return nil
}
// addAppliedResource recorde applied resource.
// reconcile run at single threaded. So there is no need to consider to use locker.
func (h *AppHandler) addAppliedResource(previous bool, refs ...common.ClusterObjectReference) {
h.mu.Lock()
defer h.mu.Unlock()
for _, ref := range refs {
if previous {
for i, deleted := range h.deletedResources {
if deleted.Equal(ref) {
h.deletedResources = removeResources(h.deletedResources, i)
return
}
}
}
found := false
for _, current := range h.appliedResources {
if current.Equal(ref) {
found = true
break
}
}
if !found {
h.appliedResources = append(h.appliedResources, ref)
}
}
}
func (h *AppHandler) deleteAppliedResource(ref common.ClusterObjectReference) {
delIndex := -1
for i, current := range h.appliedResources {
if current.Equal(ref) {
delIndex = i
}
}
if delIndex < 0 {
isDeleted := false
for _, deleted := range h.deletedResources {
if deleted.Equal(ref) {
isDeleted = true
break
}
}
if !isDeleted {
h.deletedResources = append(h.deletedResources, ref)
}
} else {
h.appliedResources = removeResources(h.appliedResources, delIndex)
}
}
func removeResources(elements []common.ClusterObjectReference, index int) []common.ClusterObjectReference {
elements[index] = elements[len(elements)-1]
return elements[:len(elements)-1]
}
// getServiceStatus get specified component status
func (h *AppHandler) getServiceStatus(svc common.ApplicationComponentStatus) common.ApplicationComponentStatus {
for i := range h.services {
current := h.services[i]
if current.Equal(svc) {
return current
}
}
return svc
}
// addServiceStatus recorde the whole component status.
// reconcile run at single threaded. So there is no need to consider to use locker.
func (h *AppHandler) addServiceStatus(cover bool, svcs ...common.ApplicationComponentStatus) {
h.mu.Lock()
defer h.mu.Unlock()
for _, svc := range svcs {
found := false
for i := range h.services {
current := h.services[i]
if current.Equal(svc) {
if cover {
h.services[i] = svc
}
found = true
break
}
}
if !found {
h.services = append(h.services, svc)
}
}
}
// ProduceArtifacts will produce Application artifacts that will be saved in configMap.
func (h *AppHandler) ProduceArtifacts(ctx context.Context, comps []*types.ComponentManifest, policies []*unstructured.Unstructured) error {
return h.createResourcesConfigMap(ctx, h.currentAppRev, comps, policies)
}
// collectTraitHealthStatus collect trait health status
func (h *AppHandler) collectTraitHealthStatus(comp *appfile.Component, tr *appfile.Trait, appRev *v1beta1.ApplicationRevision, overrideNamespace string) (common.ApplicationTraitStatus, []*unstructured.Unstructured, error) {
defer func(clusterName string) {
comp.Ctx.SetCtx(pkgmulticluster.WithCluster(comp.Ctx.GetCtx(), clusterName))
}(multicluster.ClusterNameInContext(comp.Ctx.GetCtx()))
var (
pCtx = comp.Ctx
appName = appRev.Spec.Application.Name
traitStatus = common.ApplicationTraitStatus{
Type: tr.Name,
Healthy: true,
}
traitOverrideNamespace = overrideNamespace
err error
)
if tr.FullTemplate.TraitDefinition.Spec.ControlPlaneOnly {
traitOverrideNamespace = appRev.GetNamespace()
pCtx.SetCtx(pkgmulticluster.WithCluster(pCtx.GetCtx(), pkgmulticluster.Local))
}
_accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, traitOverrideNamespace)
templateContext, err := tr.GetTemplateContext(pCtx, h.Client, _accessor)
if err != nil {
return common.ApplicationTraitStatus{}, nil, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, get template context error", appName, comp.Name, tr.Name)
}
if ok, err := tr.EvalHealth(templateContext); !ok || err != nil {
traitStatus.Healthy = false
}
traitStatus.Message, err = tr.EvalStatus(templateContext)
if err != nil {
return common.ApplicationTraitStatus{}, nil, errors.WithMessagef(err, "app=%s, comp=%s, trait=%s, evaluate status message error", appName, comp.Name, tr.Name)
}
return traitStatus, extractOutputs(templateContext), nil
}
// collectWorkloadHealthStatus collect workload health status
func (h *AppHandler) collectWorkloadHealthStatus(ctx context.Context, comp *appfile.Component, appRev *v1beta1.ApplicationRevision, status *common.ApplicationComponentStatus, accessor util.NamespaceAccessor) (bool, *unstructured.Unstructured, []*unstructured.Unstructured, error) {
var output *unstructured.Unstructured
var outputs []*unstructured.Unstructured
var (
appName = appRev.Spec.Application.Name
isHealth = true
)
if comp.CapabilityCategory == types.TerraformCategory {
var configuration terraforv1beta2.Configuration
if err := h.Client.Get(ctx, client.ObjectKey{Name: comp.Name, Namespace: accessor.Namespace()}, &configuration); err != nil {
if kerrors.IsNotFound(err) {
var legacyConfiguration terraforv1beta1.Configuration
if err := h.Client.Get(ctx, client.ObjectKey{Name: comp.Name, Namespace: accessor.Namespace()}, &legacyConfiguration); err != nil {
return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, comp.Name)
}
isHealth = setStatus(status, legacyConfiguration.Status.ObservedGeneration, legacyConfiguration.Generation,
legacyConfiguration.GetLabels(), appRev.Name, legacyConfiguration.Status.Apply.State, legacyConfiguration.Status.Apply.Message)
} else {
return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, check health error", appName, comp.Name)
}
} else {
isHealth = setStatus(status, configuration.Status.ObservedGeneration, configuration.Generation, configuration.GetLabels(),
appRev.Name, configuration.Status.Apply.State, configuration.Status.Apply.Message)
}
} else {
templateContext, err := comp.GetTemplateContext(comp.Ctx, h.Client, accessor)
if err != nil {
return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, get template context error", appName, comp.Name)
}
if ok, err := comp.EvalHealth(templateContext); !ok || err != nil {
isHealth = false
}
status.Healthy = isHealth
status.Message, err = comp.EvalStatus(templateContext)
if err != nil {
return false, nil, nil, errors.WithMessagef(err, "app=%s, comp=%s, evaluate workload status message error", appName, comp.Name)
}
output, outputs = extractOutputAndOutputs(templateContext)
}
return isHealth, output, outputs, nil
}
// nolint
// collectHealthStatus will collect health status of component, including component itself and traits.
func (h *AppHandler) collectHealthStatus(ctx context.Context, comp *appfile.Component, appRev *v1beta1.ApplicationRevision, overrideNamespace string, skipWorkload bool, traitFilters ...TraitFilter) (*common.ApplicationComponentStatus, *unstructured.Unstructured, []*unstructured.Unstructured, bool, error) {
output := new(unstructured.Unstructured)
outputs := make([]*unstructured.Unstructured, 0)
accessor := util.NewApplicationResourceNamespaceAccessor(h.app.Namespace, overrideNamespace)
var (
status = common.ApplicationComponentStatus{
Name: comp.Name,
WorkloadDefinition: comp.FullTemplate.Reference.Definition,
Healthy: true,
Namespace: accessor.Namespace(),
Cluster: multicluster.ClusterNameInContext(ctx),
}
isHealth = true
err error
)
status = h.getServiceStatus(status)
if !skipWorkload {
isHealth, output, outputs, err = h.collectWorkloadHealthStatus(ctx, comp, appRev, &status, accessor)
if err != nil {
return nil, nil, nil, false, err
}
}
var traitStatusList []common.ApplicationTraitStatus
collectNext:
for _, tr := range comp.Traits {
for _, filter := range traitFilters {
// If filtered out by one of the filters
if filter(*tr) {
continue collectNext
}
}
traitStatus, _outputs, err := h.collectTraitHealthStatus(comp, tr, appRev, overrideNamespace)
if err != nil {
return nil, nil, nil, false, err
}
outputs = append(outputs, _outputs...)
isHealth = isHealth && traitStatus.Healthy
if status.Message == "" && traitStatus.Message != "" {
status.Message = traitStatus.Message
}
traitStatusList = append(traitStatusList, traitStatus)
var oldStatus []common.ApplicationTraitStatus
for _, _trait := range status.Traits {
if _trait.Type != tr.Name {
oldStatus = append(oldStatus, _trait)
}
}
status.Traits = oldStatus
}
status.Traits = append(status.Traits, traitStatusList...)
h.addServiceStatus(true, status)
return &status, output, outputs, isHealth, nil
}
func setStatus(status *common.ApplicationComponentStatus, observedGeneration, generation int64, labels map[string]string,
appRevName string, state terraformtypes.ConfigurationState, message string) bool {
isLatest := func() bool {
if observedGeneration != 0 && observedGeneration != generation {
return false
}
// Use AppRevision to avoid getting the configuration before the patch.
if v, ok := labels[oam.LabelAppRevision]; ok {
if v != appRevName {
return false
}
}
return true
}
status.Message = message
if !isLatest() || state != terraformtypes.Available {
status.Healthy = false
return false
}
status.Healthy = true
return true
}
// ApplyPolicies will render policies into manifests from appfile and dispatch them
// Note the builtin policy like apply-once, shared-resource, etc. is not handled here.
func (h *AppHandler) ApplyPolicies(ctx context.Context, af *appfile.Appfile) error {
if ctx, ok := ctx.(monitorContext.Context); ok {
subCtx := ctx.Fork("apply-policies", monitorContext.DurationMetric(func(v float64) {
metrics.AppReconcileStageDurationHistogram.WithLabelValues("apply-policies").Observe(v)
}))
defer subCtx.Commit("finish apply policies")
}
policyManifests, err := af.GeneratePolicyManifests(ctx)
if err != nil {
return errors.Wrapf(err, "failed to render policy manifests")
}
if len(policyManifests) > 0 {
for _, policyManifest := range policyManifests {
util.AddLabels(policyManifest, map[string]string{
oam.LabelAppName: h.app.GetName(),
oam.LabelAppNamespace: h.app.GetNamespace(),
})
}
if err = h.Dispatch(ctx, "", common.PolicyResourceCreator, policyManifests...); err != nil {
return errors.Wrapf(err, "failed to dispatch policy manifests")
}
}
return nil
}
func extractOutputAndOutputs(templateContext map[string]interface{}) (*unstructured.Unstructured, []*unstructured.Unstructured) {
output := new(unstructured.Unstructured)
if templateContext["output"] != nil {
output = &unstructured.Unstructured{Object: templateContext["output"].(map[string]interface{})}
}
outputs := extractOutputs(templateContext)
return output, outputs
}
func extractOutputs(templateContext map[string]interface{}) []*unstructured.Unstructured {
outputs := make([]*unstructured.Unstructured, 0)
if templateContext["outputs"] != nil {
for _, v := range templateContext["outputs"].(map[string]interface{}) {
outputs = append(outputs, &unstructured.Unstructured{Object: v.(map[string]interface{})})
}
}
return outputs
}
|
package main
import (
"fmt"
"strings"
)
func main() {
// const min = 5
classList := []string{"Chruschtschov",
"Hristo",
"Nguyen",
"Dmitry",
"Madchen",
"Fujiyama",
"Connor"}
total := 5
count := 0
final := 0
isConstant := false
for _, x := range classList {
x = strings.ToLower(x)
// fmt.Println(isConstant, "what is the con")
for _, y := range x {
// fmt.Println(string(y))
if string(y) != "a" && string(y) != "e" && string(y) != "i" && string(y) != "o" && string(y) != "u" && string(y) != "y" && isConstant == true {
count++
fmt.Println(string(y), "isConstant is already set to true")
// fmt.Println(count, "isConstant")
} else if string(y) != "a" && string(y) != "e" && string(y) != "i" && string(y) != "o" && string(y) != "u" && string(y) != "y" {
count++
isConstant = true
fmt.Println(string(y), "isConstant is set to true")
// fmt.Println(count, "isConstant")
} else {
fmt.Println(string(y), "is not a constant", "this is the count", count)
isConstant = false
switch count {
case 1:
//total += min
total += 1
case 2:
// total += min
total += 2
case 3:
// total += min
total += 4
case 4:
// total += min
total += 8
case 7:
total += 64
}
count = 0
}
}
if count > 0 {
switch count {
case 1:
//total += min
total += 1
case 2:
// total += min
total += 2
case 3:
// total += min
total += 4
case 4:
// total += min
total += 8
default:
total += 8
}
}
fmt.Println(total)
isConstant = false
final += total
total = 5
count = 0
}
fmt.Println(final)
}
|
/*
Copyright © 2022 SUSE LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package shutdown
import (
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"time"
"github.com/rancher-sandbox/rancher-desktop/src/go/rdctl/pkg/factoryreset"
"github.com/sirupsen/logrus"
)
type shutdownData struct {
waitForShutdown bool
}
func newShutdownData(waitForShutdown bool) *shutdownData {
return &shutdownData{waitForShutdown: waitForShutdown}
}
// FinishShutdown - common function used by both the shutdown and factory-reset commands
// to ensure rancher desktop is no longer running after sending it a shutdown command
func FinishShutdown(waitForShutdown bool) error {
s := newShutdownData(waitForShutdown)
var err error
switch runtime.GOOS {
case "darwin":
err = s.waitForAppToDieOrKillIt(checkProcessQemu, pkillQemu, 15, 2, "qemu")
if err == nil {
err = s.waitForAppToDieOrKillIt(checkProcessDarwin, pkillDarwin, 5, 1, "the app")
}
case "linux":
err = s.waitForAppToDieOrKillIt(checkProcessQemu, pkillQemu, 15, 2, "qemu")
if err == nil {
err = s.waitForAppToDieOrKillIt(checkProcessLinux, pkillLinux, 5, 1, "the app")
}
case "windows":
err = s.waitForAppToDieOrKillIt(factoryreset.CheckProcessWindows, factoryreset.KillRancherDesktop, 15, 2, "the app")
default:
return fmt.Errorf("unhandled runtime: %s", runtime.GOOS)
}
if err != nil {
return err
}
return nil
}
func (s *shutdownData) waitForAppToDieOrKillIt(checkFunc func() (bool, error), killFunc func() error, retryCount int, retryWait int, operation string) error {
for iter := 0; s.waitForShutdown && iter < retryCount; iter++ {
if iter > 0 {
logrus.Debugf("checking %s showed it's still running; sleeping %d seconds\n", operation, retryWait)
time.Sleep(time.Duration(retryWait) * time.Second)
}
status, err := checkFunc()
if err != nil {
return fmt.Errorf("while checking %s, found error: %w", operation, err)
}
if !status {
logrus.Debugf("%s is no longer running\n", operation)
return nil
}
}
logrus.Debugf("About to force-kill %s\n", operation)
return killFunc()
}
/**
* checkProcessX function returns [true, nil] if it detects the app is still running, [false, X] otherwise
* The Linux/macOS functions never return a non-nil error and that field can be ignored.
* If the Windows function returns a non-nil error, we can't conclude whether the specified process is running
*/
func checkProcessDarwin() (bool, error) {
return checkProcessLinuxLike("-f", "Contents/MacOS/Rancher Desktop"), nil
}
func checkProcessLinux() (bool, error) {
return checkProcessLinuxLike("rancher-desktop"), nil
}
func checkProcessLinuxLike(commandPattern ...string) bool {
result, err := exec.Command("pgrep", commandPattern...).CombinedOutput()
if err != nil {
return false
}
return regexp.MustCompile(`\A[0-9\s]+\z`).Match(result)
}
// RancherDesktopQemuCommand - be specific to avoid killing other VM-based processes running qemu
const RancherDesktopQemuCommand = "lima/bin/qemu-system.*rancher-desktop/lima/[0-9]/diffdisk"
func checkProcessQemu() (bool, error) {
return checkProcessLinuxLike("-f", RancherDesktopQemuCommand), nil
}
func pkill(args ...string) error {
pkillBinary := "pkill"
if runtime.GOOS == "darwin" {
pkillBinary = "/usr/bin/pkill"
}
cmd := exec.Command(pkillBinary, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
// don't throw an error if the process we are killing has already exited
if exitCode := exitError.ExitCode(); exitCode == 0 || exitCode == 1 {
return nil
}
}
return fmt.Errorf("error running pkill: %w", err)
}
return nil
}
func pkillQemu() error {
err := pkill("-9", "-f", RancherDesktopQemuCommand)
if err != nil {
return fmt.Errorf("failed to kill qemu: %w", err)
}
return nil
}
func pkillDarwin() error {
err := pkill("-9", "-a", "-l", "-f", "Contents/MacOS/Rancher Desktop")
if err != nil {
return fmt.Errorf("failed to kill Rancher Desktop: %w", err)
}
return nil
}
func pkillLinux() error {
err := pkill("-9", "rancher-desktop")
if err != nil {
return fmt.Errorf("failed to kill Rancher Desktop: %w", err)
}
return nil
}
|
package config
import (
"io/ioutil"
"os"
"github.com/galenguyer/retina/core"
"gopkg.in/yaml.v2"
)
type Config struct {
Services []core.Service `yaml:"services"`
}
func Load(path string) (*Config, error) {
return loadConfigFile(path)
}
func loadConfigFile(path string) (config *Config, err error) {
var bytes []byte
if bytes, err = ioutil.ReadFile(path); err == nil {
yamlBytes := []byte(os.ExpandEnv(string(bytes)))
err = yaml.Unmarshal(yamlBytes, &config)
if err != nil {
return nil, err
}
return config, nil
}
return nil, err
}
|
package core
import (
"log"
"sync"
"time"
)
type HookManager interface {
stopper
AddHook(time.Duration, func() error)
}
func NewHookManager() HookManager {
return &hookManager{}
}
type hook struct {
cb func() error
interval time.Duration
stopCh chan struct{}
doneCh chan struct{}
}
type hookManager struct {
hooks []*hook
RWMutex
}
func (m *hookManager) Stop() error {
var wg sync.WaitGroup
for _, h := range m.hooks {
wg.Add(1)
go func(h *hook) {
defer wg.Done()
h.stopCh <- struct{}{}
<-h.doneCh
}(h)
}
wg.Wait()
return nil
}
func (m *hookManager) AddHook(interval time.Duration, cb func() error) {
m.Lock()
defer m.Unlock()
h := &hook{
cb: cb,
interval: interval,
stopCh: make(chan struct{}, 1),
doneCh: make(chan struct{}, 1),
}
m.hooks = append(m.hooks, h)
go func(h *hook) {
ticker := time.NewTicker(h.interval)
for {
stopped := false
select {
case <-ticker.C:
if err := h.cb(); err != nil {
log.Println(err)
}
case <-h.stopCh:
ticker.Stop()
close(h.stopCh)
stopped = true
}
if stopped {
break
}
}
h.doneCh <- struct{}{}
}(h)
}
|
package main
import (
"fmt"
"io"
"net/http"
"os"
"strings"
"./plugins/whats_for_lunch"
"github.com/daneharrigan/hipchat"
)
type plugin interface {
ProcessMessage(msg *hipchat.Message, replyChan chan string)
}
func main() {
user := os.Getenv("HIPCHAT_USERNAME")
pass := os.Getenv("HIPCHAT_PASSWORD")
resource := "bot"
roomJid := os.Getenv("HIPCHAT_ROOM_JID")
fullName := os.Getenv("HIPCHAT_FULL_NAME")
plugins := []plugin{
whats_for_lunch.New(),
}
client, err := hipchat.NewClient(user, pass, resource)
if err != nil {
fmt.Printf("client error: %s\n", err)
return
}
go statusCheck()
go client.KeepAlive()
client.Status("chat")
client.Join(roomJid, fullName)
replyChan := make(chan string)
go reply(replyChan, client, roomJid, fullName)
for {
message := <-client.Messages()
// Filter out own messages
if !strings.HasSuffix(message.From, fmt.Sprint("/", fullName)) {
fmt.Printf("Received message from %s: %s\n", message.From, message.Body)
for _, plugin := range plugins {
go plugin.ProcessMessage(message, replyChan)
}
}
}
}
func reply(replyChan chan string, client *hipchat.Client, roomJid string, fullName string) {
for {
msg := <-replyChan
client.Say(roomJid, fullName, msg)
}
}
func statusCheck() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "OK")
})
http.ListenAndServe(":8080", nil)
}
|
package main
import (
"io"
"os"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promlog"
kingpin "gopkg.in/alecthomas/kingpin.v2"
)
func setupLogger() {
if logger == nil {
logger = promlog.New(&promlog.Config{Level: &defaultLogLevel, Format: &promlog.AllowedFormat{}})
}
}
func addMetricsFileFlag(command *kingpin.CmdClause, target *string) {
command.Flag("metrics.file", "Filename containing input metrics in prometheus export format.").
Required().ExistingFileVar(target)
}
func loadSamplesFile(filename string) ([]*model.Sample, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
return readSamples(file)
}
func readSamples(reader io.Reader) ([]*model.Sample, error) {
dec := &expfmt.SampleDecoder{
Dec: expfmt.NewDecoder(reader, expfmt.FmtText),
Opts: &expfmt.DecodeOptions{
Timestamp: model.Now(),
},
}
var all model.Vector
for {
var smpls model.Vector
err := dec.Decode(&smpls)
if err == io.EOF {
break
}
if err != nil {
return nil, err
}
all = append(all, smpls...)
}
return all, nil
}
|
package 字符串
import "fmt"
func freqAlphabets(s string) string {
charMap := getCharMap()
result := make([]byte, 0)
for i := len(s) - 1; i >= 0; {
if s[i] == '#' {
result = append(result, charMap[s[i-2:i+1]])
i -= 3
} else {
result = append(result, charMap[s[i:i+1]])
i -= 1
}
}
return string(reverse(result))
}
func getCharMap() map[string]byte {
charMap := make(map[string]byte)
for i := 1; i <= 9; i++ {
charMap[fmt.Sprintf("%d", i)] = byte(i - 1 + 'a')
}
for i := 10; i <= 26; i++ {
charMap[fmt.Sprintf("%d#", i)] = byte(i - 1 + 'a')
}
return charMap
}
func reverse(array []byte) []byte {
for i := 0; i < len(array)/2; i++ {
array[i], array[len(array)-1-i] = array[len(array)-1-i], array[i]
}
return array
}
/*
题目链接: https://leetcode-cn.com/problems/decrypt-string-from-alphabet-to-integer-mapping/
总结:
1. 这题反向遍历更好理解。
*/
|
package utils
import (
"io/ioutil"
"reflect"
"strings"
"testing"
)
func Test_listDirectory(t *testing.T) {
var files = []string{"test.conf"}
dir, err := ioutil.TempDir(".", "tmp")
if err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(strings.Join([]string{dir, "test.conf"}, "/"), []byte(""), 0777); err != nil {
t.Fatal(err)
}
expectedFiles, err := ListDirectory(dir)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(expectedFiles, files) {
t.Fatal("not match expected result")
}
}
|
package leetcode
func moveZeroes(nums []int) {
l, r, lens := 0, 0, len(nums)
for r < lens {
if nums[r] != 0 {
nums[l], nums[r] = nums[r], nums[l]
l++
}
r++
}
}
|
package caaa
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01000101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:caaa.010.001.01 Document"`
Message *AcceptorReconciliationResponseV01 `xml:"AccptrRcncltnRspn"`
}
func (d *Document01000101) AddMessage() *AcceptorReconciliationResponseV01 {
d.Message = new(AcceptorReconciliationResponseV01)
return d.Message
}
// Scope
// The AcceptorReconciliationResponse message is sent by the acquirer to communicate to the card acceptor the totals of the card payment transaction performed for the reconciliation period. An agent never forwards the message.
// Usage
// The AcceptorReconciliationResponse message is used to compare the totals between a card acceptor and an acquirer for the reconciliation period.
type AcceptorReconciliationResponseV01 struct {
// Reconciliation response message management information.
Header *iso20022.Header1 `xml:"Hdr"`
// Information related to thereconciliation response.
ReconciliationResponse *iso20022.AcceptorReconciliationResponse1 `xml:"RcncltnRspn"`
// Trailer of the message containing a MAC.
SecurityTrailer *iso20022.ContentInformationType3 `xml:"SctyTrlr"`
}
func (a *AcceptorReconciliationResponseV01) AddHeader() *iso20022.Header1 {
a.Header = new(iso20022.Header1)
return a.Header
}
func (a *AcceptorReconciliationResponseV01) AddReconciliationResponse() *iso20022.AcceptorReconciliationResponse1 {
a.ReconciliationResponse = new(iso20022.AcceptorReconciliationResponse1)
return a.ReconciliationResponse
}
func (a *AcceptorReconciliationResponseV01) AddSecurityTrailer() *iso20022.ContentInformationType3 {
a.SecurityTrailer = new(iso20022.ContentInformationType3)
return a.SecurityTrailer
}
|
package cmd
import (
"context"
"database/sql"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
"github.com/garyburd/redigo/redis"
_ "github.com/lib/pq"
homedir "github.com/mitchellh/go-homedir"
"github.com/neelance/graphql-go"
"github.com/neelance/graphql-go/relay"
"github.com/s1gu/s1gu-lib/cache"
"github.com/s1gu/s1gu-lib/db"
"github.com/s1gu/s1gu_graphql/starwars"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
logger *log.Logger
dbPool *sql.DB
cachePool *redis.Pool
cfgFile string
)
var RootCmd = &cobra.Command{
Use: "AuthS Api",
Short: "AuthS Api",
Long: `AuthS website API, Provide data for AuthS FrontEnd`,
PreRun: func(cmd *cobra.Command, args []string) {
fmt.Println(`
AUTH SECURE SERVER
`)
fmt.Println("AuthS running")
fmt.Println("Version:", viper.GetString("app.version"))
fmt.Println("App port:", viper.GetString("app.port"))
fmt.Println("Host :", viper.GetString("database.host"))
fmt.Println("DBName :", viper.GetString("database.name"))
go initDB()
go initCache()
},
Run: func(cmd *cobra.Command, args []string) {
// handler graphql server for client
http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write(page)
}))
http.Handle("/query", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next := &relay.Handler{Schema: starwars.Schema}
authorization := r.Header.Get("Authorization")
token := strings.Replace(authorization, "Bearer ", "", 1)
ctx := context.WithValue(r.Context(), "AuthorizationToken", token)
next.ServeHTTP(w, r.WithContext(ctx))
}))
http.ListenAndServe(fmt.Sprintf(":%d", viper.GetInt("app.port")), nil)
},
}
// Add graphql client
var page = []byte(`
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.10.2/graphiql.css" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/fetch/1.1.0/fetch.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/react/15.5.4/react.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/react/15.5.4/react-dom.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/graphiql/0.10.2/graphiql.js"></script>
</head>
<body style="width: 100%; height: 100%; margin: 0; overflow: hidden;">
<div id="graphiql" style="height: 100vh;">Loading...</div>
<script>
function graphQLFetcher(graphQLParams) {
return fetch("/query", {
method: "post",
body: JSON.stringify(graphQLParams),
credentials: "include",
}).then(function (response) {
return response.text();
}).then(function (responseBody) {
try {
return JSON.parse(responseBody);
} catch (error) {
return responseBody;
}
});
}
ReactDOM.render(
React.createElement(GraphiQL, {fetcher: graphQLFetcher}),
document.getElementById("graphiql")
);
</script>
</body>
</html>
`)
func Execute() {
if err := RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
func init() {
cobra.OnInitialize(initConfig, initLogger, initGraphQLserver)
RootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is $HOME/.s1gu.config.toml)")
RootCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
func initConfig() {
viper.SetConfigType("toml")
if cfgFile != "" {
viper.SetConfigFile(cfgFile)
} else {
home, err := homedir.Dir()
if err != nil {
panic(err)
}
viper.AddConfigPath(".")
viper.AddConfigPath(home)
viper.AddConfigPath("/Users/zzz/go/src/github.com/s1gu/exp-modem2phone")
viper.SetConfigName(".s1gu.config")
}
viper.AutomaticEnv()
if err := viper.ReadInConfig(); err == nil {
fmt.Println("Using config file:", viper.ConfigFileUsed())
}
}
func initDB() {
dbOptions := db.DBOptions{
Host: viper.GetString("database.host"),
Port: viper.GetInt("database.port"),
Username: viper.GetString("database.username"),
Password: viper.GetString("database.password"),
DBName: viper.GetString("database.name"),
SSLMode: viper.GetString("database.sslmode"),
}
fmt.Println("DBptions:", dbOptions)
dbConn, err := db.Connect(dbOptions)
if err != nil {
fmt.Println("Gagal konek", err)
panic(err)
}
dbPool = dbConn
starwars.DbPool = dbPool
}
func initCache() {
cacheOptions := cache.CacheOptions{
Host: viper.GetString("cache.host"),
Port: viper.GetInt("cache.port"),
MaxIdle: viper.GetInt("cache.max_idle"),
IdleTimeout: viper.GetInt("cache.idle_timeout"),
Enabled: viper.GetBool("cache.enabled"),
}
pool := cache.Connect(cacheOptions)
cachePool = pool
starwars.CachePool = cachePool
}
func initLogger() {
logger = log.New()
logger.Formatter = &log.JSONFormatter{}
logger.Out = os.Stdout
logger.Level = log.InfoLevel
starwars.Logger = logger
}
func initGraphQLserver() {
// Scema for starwars
if StarwarsSchema, err := ioutil.ReadFile("starwars/schema.graphql"); err != nil {
panic(err)
} else {
starwars.Schema = graphql.MustParseSchema(string(StarwarsSchema), &starwars.Resolver{})
}
}
|
package queue
import (
"github.com/google/go-cmp/cmp"
"testing"
)
func TestPush(t *testing.T) {
tests := []struct {
in func() *Queue
add []interface{}
want func() *Queue
}{
{
in: func() *Queue {
n := &Node{1, nil}
return &Queue{head: n, tail: n}
},
add: []interface{}{2},
want: func() *Queue {
t := &Node{2, nil}
n := &Node{1, t}
return &Queue{head: n, tail: t}
},
},
{
in: func() *Queue {
return &Queue{head: nil, tail: nil}
},
add: []interface{}{3,4,5},
want: func() *Queue {
t := &Node{5, nil}
n := &Node{3, &Node{4, t}}
return &Queue{head: n, tail: t}
},
},
}
for _, test := range tests {
s := *test.in()
for _, add := range test.add {
s.Push(add)
}
got := s
want := *test.want()
if !cmp.Equal(s, want) {
t.Errorf("Push(%v) = %v, expected %v", test.add, got, want)
}
}
}
func TestPop(t *testing.T) {
tests := []struct {
in func() *Queue
want func() *Queue
}{
{
in: func() *Queue {
n := &Node{1, nil}
return &Queue{head: n, tail: n}
},
want: func() *Queue {
return &Queue{head: nil, tail: nil}
},
},
{
in: func() *Queue {
t := &Node{5, nil}
n := &Node{3, &Node{4, t}}
return &Queue{head: n, tail: t}
},
want: func() *Queue {
t := &Node{5, nil}
n := &Node{4, t}
return &Queue{head: n, tail: t}
},
},
}
for _, test := range tests {
s := *test.in()
s.Pop()
got := s
want := *test.want()
if !cmp.Equal(s, want) {
t.Errorf("Pop() = %v, expected %v", got, want)
}
}
}
func TestFront(t *testing.T) {
tests := []struct {
in func() *Queue
want interface{}
}{
{
in: func() *Queue {
var q Queue
q.Push(2)
q.Push(11)
return &q
},
want: 2,
},
{
in: func() *Queue {
var q Queue
q.Push("ey")
q.Push("ho")
q.Push("yo")
return &q
},
want: "ey",
},
}
for _, test := range tests {
in := *test.in()
got := in.Front()
if got != test.want {
t.Errorf("Top() = %v, expected %v", got, test.want)
}
}
} |
package server
import (
"encoding/json"
"fmt"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/bryanl/dolb/dao"
"github.com/bryanl/dolb/pkg/app"
"github.com/bryanl/dolb/service"
)
// BootstrapClusterResponse is a bootstrap cluster response.
type BootstrapClusterResponse struct {
LoadBalancer LoadBalancerResponse `json:"load_balancer"`
}
// LBCreateHandler is a http handler for creating a load balancer.
func LBCreateHandler(c interface{}, r *http.Request) service.Response {
config := c.(*Config)
defer r.Body.Close()
var bc app.BootstrapConfig
err := json.NewDecoder(r.Body).Decode(&bc)
if err != nil {
return service.Response{Body: fmt.Errorf("could not decode json: %v", err), Status: 422}
}
lb, err := CreateLoadBalancer(bc, config)
if err != nil {
return service.Response{Body: err, Status: 400}
}
bcResp := BootstrapClusterResponse{
LoadBalancer: NewLoadBalancerFromDAO(*lb, config.BaseDomain),
}
return service.Response{Body: bcResp, Status: http.StatusCreated}
}
// CreateLoadBalancer creates a load balancer.
func CreateLoadBalancer(bc app.BootstrapConfig, config *Config) (*dao.LoadBalancer, error) {
if bc.DigitalOceanToken == "" {
return nil, fmt.Errorf("DigitalOcean token is required")
}
lb := config.DBSession.NewLoadBalancer()
lb.Name = bc.Name
lb.Region = bc.Region
lb.DigitaloceanAccessToken = bc.DigitalOceanToken
err := config.DBSession.SaveLoadBalancer(lb)
if err != nil {
return nil, err
}
co := config.ClusterOpsFactory()
bo := &BootstrapOptions{
Config: config,
LoadBalancer: lb,
BootstrapConfig: &bc,
}
err = co.Bootstrap(bo)
if err != nil {
config.GetLogger().WithError(err).Error("could not bootstrap cluster")
return nil, err
}
_, err = config.KVS.Set("/dolb/clusters/"+lb.ID, lb.ID, nil)
if err != nil {
config.GetLogger().WithError(err).Error("could not create cluster in kvs")
}
config.GetLogger().WithFields(logrus.Fields{
"cluster-name": bc.Name,
"cluster-region": bc.Region,
}).Info("created load balancer")
return lb, nil
}
|
package main
import (
"testing"
)
func TestLongestConsecutive(t *testing.T) {
LongestConsecutive([]int{100, 1, 300})
}
|
package pg
import (
"github.com/kyleconroy/sqlc/internal/sql/ast"
)
type CreateDomainStmt struct {
Domainname *ast.List
TypeName *TypeName
CollClause *CollateClause
Constraints *ast.List
}
func (n *CreateDomainStmt) Pos() int {
return 0
}
|
package domain
type UserFriendDomain struct {
Id int `db:"ID, primarykey, autoincrement"`
FromUserId int `db:"FromUserID"`
ToUserId int `db:"ToUserID"`
}
|
package database
import (
"context"
"errors"
"log"
"reflect"
"time"
"github.com/jmoiron/sqlx"
_ "github.com/mattn/go-sqlite3"
)
type Config struct {
DSN string
}
// Open knows how to open a database connection based on the configuration.
func Open(cfg Config) (*sqlx.DB, error) {
db, err := sqlx.Open("sqlite3", cfg.DSN)
if err != nil {
return nil, err
}
return db, nil
}
// StatusCheck returns nil if it can successfully talk to the database. It
// returns a non-nil error otherwise.
func StatusCheck(ctx context.Context, db *sqlx.DB) error {
// First check we can ping the database.
var pingError error
for attempts := 1; ; attempts++ {
pingError = db.Ping()
if pingError == nil {
break
}
time.Sleep(time.Duration(attempts) * 100 * time.Millisecond)
if ctx.Err() != nil {
return ctx.Err()
}
}
// Make sure we didn't timeout or be cancelled.
if ctx.Err() != nil {
return ctx.Err()
}
// Run a simple query to determine connectivity. Running this query forces a
// round trip through the database.
const q = `SELECT true`
var tmp bool
return db.QueryRowContext(ctx, q).Scan(&tmp)
}
func NamedExecContext(ctx context.Context, log *log.Logger, db *sqlx.DB, query string, data interface{}) error {
if _, err := db.NamedExecContext(ctx, query, data); err != nil {
return err
}
return nil
}
// NamedQuerySlice is a helper function for executing queries that return a
// collection of data to be unmarshaled into a slice.
func NamedQuerySlice(ctx context.Context, db sqlx.ExtContext, query string, data interface{}, dest interface{}) error {
val := reflect.ValueOf(dest)
if val.Kind() != reflect.Ptr || val.Elem().Kind() != reflect.Slice {
return errors.New("must provide a pointer to a slice")
}
rows, err := sqlx.NamedQueryContext(ctx, db, query, data)
if err != nil {
return err
}
slice := val.Elem()
for rows.Next() {
v := reflect.New(slice.Type().Elem())
if err := rows.StructScan(v.Interface()); err != nil {
return err
}
slice.Set(reflect.Append(slice, v.Elem()))
}
return nil
}
|
package tool
func HashCode(s string) (hash int32) {
if len(s) == 0 {
return 0
}
hash = 0
chr := 0
for i := 0; i < len(s); i++ {
chr = int(rune(s[i]))
hash = ((hash << 5) - hash) + int32(chr)
hash |= 0
}
return hash
}
|
package options
import "github.com/spf13/pflag"
type AppHealthOptions struct {
JoinIp string
Namespace string
SvcName string
}
func NewAppHealthOptions() *AppHealthOptions {
return &AppHealthOptions{
JoinIp: "",
Namespace: "",
SvcName: "",
}
}
func (o *AppHealthOptions) Validate() []error {
return nil
}
func (o *AppHealthOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.StringVar(&o.JoinIp, "join", o.JoinIp, "Join IP")
fs.StringVar(&o.Namespace, "namespace", o.Namespace, "")
fs.StringVar(&o.SvcName, "svc_name", o.SvcName, "")
}
|
/*
Package qu is a simple executor service. You add jobs to a queue, then run them concurrently with a configurable
amount of concurrency.
*/
package qu
import (
"sync"
"time"
"github.com/ecnepsnai/qu/atomic"
)
// Queue describes a queue of jobs
type Queue struct {
Done bool
jobs []func(payload interface{})
payloads []interface{}
}
// Add will add a new job to the queue. When the job is run it will be called with the value of payload.
// The job will not be invoked until queue.Run is called.
func (q *Queue) Add(job func(payload interface{}), payload interface{}) {
q.jobs = append(q.jobs, job)
q.payloads = append(q.payloads, payload)
}
// Run will begin to execute all of the jobs in the queue, running each job concurrently up-to the specified number of
// threads. Run will block until all jobs have completed. After this, the Done property on the queue will be true.
//
// Jobs may not be executed in the same order that they were added. If any jobs panics, the panic will bubble up to
// here.
func (q *Queue) Run(threads int) {
runningJobs := atomic.NewInteger(0)
remainingJobs := len(q.jobs)
wg := &sync.WaitGroup{}
for remainingJobs > 0 {
if runningJobs.Get() < threads {
wg.Add(1)
runningJobs.IncrementAndGet()
job := q.jobs[0]
payload := q.payloads[0]
q.jobs = append(q.jobs[:0], q.jobs[1:]...)
q.payloads = append(q.payloads[:0], q.payloads[1:]...)
remainingJobs = len(q.jobs)
go func() {
job(payload)
runningJobs.DecrementAndGet()
wg.Done()
}()
if remainingJobs == 0 {
wg.Wait()
}
} else {
time.Sleep(1 * time.Millisecond)
}
}
q.Done = true
}
|
package main
import (
"github.com/tanema/amore"
"github.com/tanema/gocraftmini/game"
)
func main() {
world := game.NewWorld(149, 20, 300, false)
amore.Start(world.Update, world.Draw)
}
|
package scache
import (
"errors"
"strconv"
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
)
func TestCache(t *testing.T) {
for _, testInfo := range []struct {
Name string
Func func(Kind) func(*testing.T)
}{
{
Name: "GetSet",
Func: func(kind Kind) func(*testing.T) {
return func(*testing.T) {
testCacheSetAndGet(t, kind)
}
},
},
{
Name: "LoadFunc",
Func: func(kind Kind) func(*testing.T) {
return func(*testing.T) {
testCacheLoadFunc(t, kind)
}
},
},
} {
for _, kind := range []Kind{KindLRU} {
if !t.Run(testInfo.Name, testInfo.Func(kind)) {
return
}
}
}
}
func testCacheLoadFunc(t *testing.T, kind Kind) {
conf := &Config{
Shards: 2,
MaxSize: 4,
Kind: kind,
}
loadFunc := func(key interface{}) (val interface{}, err error) {
if key == "1" {
val, err = "1", nil
} else {
err = errors.New("failed to upload")
}
return
}
cache, err := FromConfig(conf).LoaderFunc(loadFunc).Build()
require.NoError(t, err)
for i := 0; i < 3; i++ {
{
val, err := cache.Get(strconv.Itoa(1))
require.NoError(t, err)
require.Equal(t, "1", val)
}
{
val, err := cache.Get(strconv.Itoa(2))
require.EqualError(t, err, "failed to upload")
require.Nil(t, val)
}
}
}
func testCacheSetAndGet(t *testing.T, kind Kind) {
conf := Config{
Shards: 2,
MaxSize: 4,
Kind: kind,
}
cache, err := FromConfig(&conf).Build()
require.NoError(t, err)
keys := []string{"a", "b", "c", "d"}
for i, k := range keys {
cache.Set(k, i)
val, err := cache.Get(k)
require.NoError(t, err)
require.Equal(t, i, val)
}
for i, b := range cache.shards {
require.Greater(t, b.Count(), int64(0), i)
}
}
func BenchmarkBaseSCache(b *testing.B) {
countOverflowKeys := 101
size := b.N
if size > countOverflowKeys {
size /= 3
size -= countOverflowKeys
}
keysCount := size + countOverflowKeys
keys := make([]interface{}, keysCount)
for i := 0; i < keysCount; i++ {
keys[i] = "------" + strconv.Itoa(i) // "----" - check hash function for shard id
}
loadFunc := func(key interface{}) (value interface{}, err error) {
value = key
return
}
cache, err := New(100, int64(size)).ItemsToPrune(20).LRU().LoaderFunc(loadFunc).Build()
require.NoError(b, err)
defer cache.Close()
var (
hits int64
pHits = &hits
)
b.ResetTimer()
b.StartTimer()
for i := 0; i < b.N; i++ {
op := i % 3
key := keys[i%len(keys)]
switch op {
case 1:
cache.Del(key)
default:
_, err = cache.Get(key)
if err == nil {
atomic.AddInt64(pHits, 1)
}
}
}
b.StopTimer()
if hits < int64(b.N/2) {
b.Error("hits", hits, "b.N", b.N)
}
}
|
func findK(nums []int, start int, end int, k int) int {
pivot := nums[end]
l := start
for idx := start; idx < end; idx += 1 {
if nums[idx] < pivot {
nums[l], nums[idx] = nums[idx], nums[l]
l += 1
}
}
nums[l], nums[end] = nums[end], nums[l]
if l == k {
return pivot
} else if l < k {
return findK(nums, l+1, end, k)
} else {
return findK(nums, start, l-1, k)
}
}
func abs(v1, v2 int) int {
if v1 > v2 {
return v1 - v2
}
return v2 - v1
}
func minMoves2(nums []int) int {
if len(nums) == 0 {
return 0
}
medIdx := len(nums) / 2
med := findK(nums, 0, len(nums)-1, medIdx)
cnt := 0
for _, num := range nums {
cnt += abs(med, num)
}
return cnt
}
|
package recursion
import (
"fmt"
"testing"
"github.com/sko00o/leetcode-adventure/nary-tree/treenode"
)
func Test_maxDepth(t *testing.T) {
type args struct {
root *Node
}
tests := []struct {
name string
args args
want int
}{
{
name: "Example 1",
args: args{root: treenode.ExampleTree1},
want: 3,
},
{
name: "Example 2",
args: args{root: treenode.ExampleTree2},
want: 5,
},
}
for idx, f := range []func(*Node) int{
maxDepth,
maxDepth1,
} {
t.Run(fmt.Sprintf("func#%d", idx), func(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := f(tt.args.root); got != tt.want {
t.Errorf("maxDepth() = %v, want %v", got, tt.want)
}
})
}
})
}
}
|
// This file was generated for SObject PlatformCachePartitionType, API Version v43.0 at 2018-07-30 03:47:25.203153075 -0400 EDT m=+11.546192758
package sobjects
import (
"fmt"
"strings"
)
type PlatformCachePartitionType struct {
BaseSObject
AllocatedCapacity int `force:",omitempty"`
AllocatedPurchasedCapacity int `force:",omitempty"`
AllocatedTrialCapacity int `force:",omitempty"`
CacheType string `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
LastModifiedById string `force:",omitempty"`
LastModifiedDate string `force:",omitempty"`
PlatformCachePartitionId string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
}
func (t *PlatformCachePartitionType) ApiName() string {
return "PlatformCachePartitionType"
}
func (t *PlatformCachePartitionType) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("PlatformCachePartitionType #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tAllocatedCapacity: %v\n", t.AllocatedCapacity))
builder.WriteString(fmt.Sprintf("\tAllocatedPurchasedCapacity: %v\n", t.AllocatedPurchasedCapacity))
builder.WriteString(fmt.Sprintf("\tAllocatedTrialCapacity: %v\n", t.AllocatedTrialCapacity))
builder.WriteString(fmt.Sprintf("\tCacheType: %v\n", t.CacheType))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tLastModifiedById: %v\n", t.LastModifiedById))
builder.WriteString(fmt.Sprintf("\tLastModifiedDate: %v\n", t.LastModifiedDate))
builder.WriteString(fmt.Sprintf("\tPlatformCachePartitionId: %v\n", t.PlatformCachePartitionId))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
return builder.String()
}
type PlatformCachePartitionTypeQueryResponse struct {
BaseQuery
Records []PlatformCachePartitionType `json:"Records" force:"records"`
}
|
package main
import (
"github.com/gin-gonic/gin"
"strings"
)
func paxinxicunshujuku(){
for xuehao:=2019210001;xuehao<=2019215203;xuehao++ {
var students Student
body:=paqu(xuehao)
name,week :=nameandweek(body)
var date string
var classes []Class
if name[10:] != ""{
date,classes= classxinxi(body)
}
students=Student{
Stunum : xuehao,
Username: name[10:],
Nowweek : week,
Date : date,
}
insertstudent(students)
for _,class := range classes{
t := queryclass1(class.Dacourse_num)
if t.Dacourse_num == ""{
insertclass(class)
}
}
}
}
func chaxunshuju(c *gin.Context){
xuehao:=c.PostForm("xuehao")
student := querystudent(xuehao)
var newclass []NewClass
arr := strings.Split(student.Date, " ")//获得学生所有的课程编号
for _,v := range arr{//每门课程的信息获取,以及一周多次课重新存储结构体
if v != ""{
classes := queryclass1(v)
newclass = fenlitongjieke(classes,newclass)
}
}
c.JSON(200,gin.H{"version":"2020.2.15","stuNum":xuehao,"nowWeek":student.Nowweek,"success":"true","status":200,"data":newclass})
} |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package kubernetesupgrade
import (
"context"
"fmt"
"math/rand"
"strings"
"time"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/Azure/aks-engine/pkg/armhelpers/utils"
"github.com/Azure/aks-engine/pkg/i18n"
"github.com/Azure/aks-engine/pkg/kubernetes"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
util "k8s.io/client-go/util/retry"
)
// IsVMSSToBeUpgradedCb - Call back for checking whether the given vmss is to be upgraded or not.
type IsVMSSToBeUpgradedCb func(vmss string, cs *api.ContainerService) bool
// ClusterTopology contains resources of the cluster the upgrade operation
// is targeting
type ClusterTopology struct {
DataModel *api.ContainerService
SubscriptionID string
Location string
ResourceGroup string
NameSuffix string
AgentPoolsToUpgrade map[string]bool
AgentPools map[string]*AgentPoolTopology
AgentPoolScaleSetsToUpgrade []AgentPoolScaleSet
MasterVMs *[]compute.VirtualMachine
UpgradedMasterVMs *[]compute.VirtualMachine
IsVMSSToBeUpgraded IsVMSSToBeUpgradedCb
}
// AgentPoolScaleSet contains necessary data required to upgrade a VMSS
type AgentPoolScaleSet struct {
Name string
Sku compute.Sku
Location string
IsWindows bool
VMsToUpgrade []AgentPoolScaleSetVM
}
// AgentPoolScaleSetVM represents a VM in a VMSS
type AgentPoolScaleSetVM struct {
Name string
InstanceID string
}
// AgentPoolTopology contains agent VMs in a single pool
type AgentPoolTopology struct {
Identifier *string
Name *string
AgentVMs *[]compute.VirtualMachine
UpgradedAgentVMs *[]compute.VirtualMachine
}
// UpgradeCluster upgrades a cluster with Orchestrator version X.X to version Y.Y.
// Right now upgrades are supported for Kubernetes cluster only.
type UpgradeCluster struct {
Translator *i18n.Translator
Logger *logrus.Entry
ClusterTopology
Client armhelpers.AKSEngineClient
StepTimeout *time.Duration
CordonDrainTimeout *time.Duration
UpgradeWorkFlow UpgradeWorkFlow
Force bool
ControlPlaneOnly bool
CurrentVersion string
}
// MasterPoolName pool name
const MasterPoolName = "master"
// UpgradeCluster runs the workflow to upgrade a Kubernetes cluster.
func (uc *UpgradeCluster) UpgradeCluster(az armhelpers.AKSEngineClient, kubeConfig string, aksEngineVersion string) error {
uc.MasterVMs = &[]compute.VirtualMachine{}
uc.UpgradedMasterVMs = &[]compute.VirtualMachine{}
uc.AgentPools = make(map[string]*AgentPoolTopology)
var kubeClient kubernetes.Client
if az != nil {
timeout := time.Duration(60) * time.Minute
k, err := az.GetKubernetesClient("", kubeConfig, interval, timeout)
if err != nil {
uc.Logger.Warnf("Failed to get a Kubernetes client: %v", err)
}
kubeClient = k
}
if err := uc.setNodesToUpgrade(kubeClient, uc.ResourceGroup); err != nil {
return uc.Translator.Errorf("Error while querying ARM for resources: %+v", err)
}
if kubeClient != nil {
ctx, cancel := context.WithTimeout(context.Background(), 150*time.Second)
defer cancel()
notReadyStream := uc.upgradedNotReadyStream(kubeClient, wait.Backoff{Steps: 15, Duration: 10 * time.Second})
if err := uc.checkControlPlaneNodesStatus(ctx, notReadyStream); err != nil {
uc.Logger.Error("Aborting the upgrade process to avoid potential control plane downtime")
return errors.Wrap(err, "checking status of upgraded control plane nodes")
}
}
kc := uc.DataModel.Properties.OrchestratorProfile.KubernetesConfig
if kc != nil && kc.IsClusterAutoscalerEnabled() && !uc.ControlPlaneOnly {
// pause the cluster-autoscaler before running upgrade and resume it afterward
uc.Logger.Info("Pausing cluster autoscaler, replica count: 0")
count, err := uc.SetClusterAutoscalerReplicaCount(kubeClient, 0)
if err != nil {
uc.Logger.Errorf("Failed to pause cluster-autoscaler: %v", err)
if !uc.Force {
return err
}
} else {
if err == nil {
defer func() {
uc.Logger.Infof("Resuming cluster autoscaler, replica count: %d", count)
if _, err = uc.SetClusterAutoscalerReplicaCount(kubeClient, count); err != nil {
uc.Logger.Errorf("Failed to resume cluster-autoscaler: %v", err)
}
}()
}
}
}
upgradeVersion := uc.DataModel.Properties.OrchestratorProfile.OrchestratorVersion
what := "control plane and all nodes"
if uc.ControlPlaneOnly {
what = "control plane nodes"
}
uc.Logger.Infof("Upgrading %s to Kubernetes version %s", what, upgradeVersion)
if err := uc.getUpgradeWorkflow(kubeConfig, aksEngineVersion).RunUpgrade(); err != nil {
return err
}
what = "Cluster"
if uc.ControlPlaneOnly {
what = "Control plane"
}
uc.Logger.Infof("%s upgraded successfully to Kubernetes version %s", what, upgradeVersion)
return nil
}
// SetClusterAutoscalerReplicaCount changes the replica count of a cluster-autoscaler deployment.
func (uc *UpgradeCluster) SetClusterAutoscalerReplicaCount(kubeClient kubernetes.Client, replicaCount int32) (int32, error) {
if kubeClient == nil {
return 0, errors.New("no kubernetes client")
}
var count int32
var err error
const namespace, name, retries = "kube-system", "cluster-autoscaler", 10
for attempt := 0; attempt < retries; attempt++ {
deployment, getErr := kubeClient.GetDeployment(namespace, name)
err = getErr
if getErr == nil {
count = *deployment.Spec.Replicas
deployment.Spec.Replicas = &replicaCount
if _, err = kubeClient.UpdateDeployment(namespace, deployment); err == nil {
break
}
}
sleepTime := time.Duration(rand.Intn(5))
uc.Logger.Warnf("Failed to update cluster-autoscaler deployment: %v", err)
uc.Logger.Infof("Retry updating cluster-autoscaler after %d seconds", sleepTime)
time.Sleep(sleepTime * time.Second)
}
if err != nil {
return 0, err
}
return count, nil
}
func (uc *UpgradeCluster) getUpgradeWorkflow(kubeConfig string, aksEngineVersion string) UpgradeWorkFlow {
if uc.UpgradeWorkFlow != nil {
return uc.UpgradeWorkFlow
}
u := &Upgrader{}
u.Init(uc.Translator, uc.Logger, uc.ClusterTopology, uc.Client, kubeConfig, uc.StepTimeout, uc.CordonDrainTimeout, aksEngineVersion, uc.ControlPlaneOnly)
u.CurrentVersion = uc.CurrentVersion
return u
}
func (uc *UpgradeCluster) setNodesToUpgrade(kubeClient kubernetes.Client, resourceGroup string) error {
goalVersion := uc.DataModel.Properties.OrchestratorProfile.OrchestratorVersion
ctx, cancel := context.WithTimeout(context.Background(), armhelpers.DefaultARMOperationTimeout)
defer cancel()
if !uc.ControlPlaneOnly {
for vmScaleSetPage, err := uc.Client.ListVirtualMachineScaleSets(ctx, resourceGroup); vmScaleSetPage.NotDone(); err = vmScaleSetPage.NextWithContext(ctx) {
if err != nil {
return err
}
for _, vmScaleSet := range vmScaleSetPage.Values() {
if uc.IsVMSSToBeUpgraded != nil && !uc.IsVMSSToBeUpgraded(*vmScaleSet.Name, uc.DataModel) {
continue
}
for vmScaleSetVMsPage, err := uc.Client.ListVirtualMachineScaleSetVMs(ctx, resourceGroup, *vmScaleSet.Name); vmScaleSetVMsPage.NotDone(); err = vmScaleSetVMsPage.NextWithContext(ctx) {
if err != nil {
return err
}
// set agent pool node count to match VMSS capacity
for _, pool := range uc.ClusterTopology.DataModel.Properties.AgentPoolProfiles {
if poolName, _, _ := utils.VmssNameParts(*vmScaleSet.Name); poolName == pool.Name {
pool.Count = int(*vmScaleSet.Sku.Capacity)
break
}
}
scaleSetToUpgrade := AgentPoolScaleSet{
Name: *vmScaleSet.Name,
Sku: *vmScaleSet.Sku,
Location: *vmScaleSet.Location,
}
if vmScaleSet.VirtualMachineProfile != nil &&
vmScaleSet.VirtualMachineProfile.OsProfile != nil &&
vmScaleSet.VirtualMachineProfile.OsProfile.WindowsConfiguration != nil {
scaleSetToUpgrade.IsWindows = true
uc.Logger.Infof("Set isWindows flag for vmss %s.", *vmScaleSet.Name)
}
for _, vm := range vmScaleSetVMsPage.Values() {
currentVersion := uc.getNodeVersion(kubeClient, strings.ToLower(*vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName), vm.Tags, *vm.VirtualMachineScaleSetVMProperties.LatestModelApplied)
if uc.Force {
if currentVersion == "" {
currentVersion = "Unknown"
}
}
if currentVersion == "" {
uc.Logger.Infof("Skipping VM: %s for upgrade as the orchestrator version could not be determined.", *vm.Name)
continue
}
if uc.Force || currentVersion != goalVersion {
uc.Logger.Infof(
"VM %s in VMSS %s has a current version of %s and a desired version of %s. Upgrading this node.",
*vm.Name,
*vmScaleSet.Name,
currentVersion,
goalVersion,
)
scaleSetToUpgrade.VMsToUpgrade = append(
scaleSetToUpgrade.VMsToUpgrade,
AgentPoolScaleSetVM{
Name: *vm.VirtualMachineScaleSetVMProperties.OsProfile.ComputerName,
InstanceID: *vm.InstanceID,
},
)
}
}
uc.AgentPoolScaleSetsToUpgrade = append(uc.AgentPoolScaleSetsToUpgrade, scaleSetToUpgrade)
}
}
}
}
for vmListPage, err := uc.Client.ListVirtualMachines(ctx, resourceGroup); vmListPage.NotDone(); err = vmListPage.Next() {
if err != nil {
return err
}
for _, vm := range vmListPage.Values() {
// Windows VMs contain a substring of the name suffix
if !strings.Contains(*(vm.Name), uc.NameSuffix) && !strings.Contains(*(vm.Name), uc.NameSuffix[:4]+"k8s") {
uc.Logger.Infof("Skipping VM: %s for upgrade as it does not belong to cluster with expected name suffix: %s",
*vm.Name, uc.NameSuffix)
continue
}
currentVersion := uc.getNodeVersion(kubeClient, strings.ToLower(*vm.Name), vm.Tags, true)
if uc.Force {
if currentVersion == "" {
currentVersion = "Unknown"
}
uc.addVMToUpgradeSets(vm, currentVersion)
} else {
if currentVersion == "" {
uc.Logger.Infof("Skipping VM: %s for upgrade as the orchestrator version could not be determined.", *vm.Name)
continue
}
// If the current version is different than the desired version then we add the VM to the list of VMs to upgrade.
if currentVersion != goalVersion {
if err := uc.upgradable(currentVersion); err != nil {
return err
}
uc.addVMToUpgradeSets(vm, currentVersion)
} else if currentVersion == goalVersion {
uc.addVMToFinishedSets(vm, currentVersion)
}
}
}
}
return nil
}
func (uc *UpgradeCluster) upgradable(currentVersion string) error {
nodeVersion := &api.OrchestratorProfile{
OrchestratorType: api.Kubernetes,
OrchestratorVersion: currentVersion,
}
targetVersion := uc.DataModel.Properties.OrchestratorProfile.OrchestratorVersion
orch, err := api.GetOrchestratorVersionProfile(nodeVersion, uc.DataModel.Properties.HasWindows(), uc.DataModel.Properties.IsAzureStackCloud())
if err != nil {
return err
}
for _, up := range orch.Upgrades {
if up.OrchestratorVersion == targetVersion {
return nil
}
}
return errors.Errorf("%s cannot be upgraded to %s", currentVersion, targetVersion)
}
// getNodeVersion returns a node's current Kubernetes version via Kubernetes API or VM tag.
// For VMSS nodes, make sure OsProfile.ComputerName instead of VM name is used as the name here
// because the former is used as the K8s node name.
// Also, if the latest VMSS model is applied, then we can get the version info from the tags.
// Otherwise, we have to get version via K8s API. This is because VMSS does not support tags
// for individual instances and old/new instances have the same tags.
func (uc *UpgradeCluster) getNodeVersion(client kubernetes.Client, name string, tags map[string]*string, getVersionFromTags bool) string {
if getVersionFromTags {
if tags != nil && tags["orchestrator"] != nil {
parts := strings.Split(*tags["orchestrator"], ":")
if len(parts) == 2 {
return parts[1]
}
}
uc.Logger.Warnf("Expected tag \"orchestrator\" not found for VM: %s. Using Kubernetes API to retrieve Kubernetes version.", name)
}
if client != nil {
node, err := client.GetNode(name)
if err == nil {
return strings.TrimPrefix(node.Status.NodeInfo.KubeletVersion, "v")
}
uc.Logger.Warnf("Failed to get node %s: %v", name, err)
}
return ""
}
func (uc *UpgradeCluster) addVMToAgentPool(vm compute.VirtualMachine, isUpgradableVM bool) error {
var poolIdentifier string
var poolPrefix string
var err error
var vmPoolName string
if vm.Tags != nil && vm.Tags["poolName"] != nil {
vmPoolName = *vm.Tags["poolName"]
} else {
uc.Logger.Infof("poolName tag not found for VM: %s.", *vm.Name)
// If there's only one agent pool, assume this VM is a member.
agentPools := []string{}
for k := range uc.AgentPoolsToUpgrade {
if !strings.HasPrefix(k, "master") {
agentPools = append(agentPools, k)
}
}
if len(agentPools) == 1 {
vmPoolName = agentPools[0]
}
}
if vmPoolName == "" {
uc.Logger.Warnf("Couldn't determine agent pool membership for VM: %s.", *vm.Name)
return nil
}
uc.Logger.Infof("Evaluating VM: %s in pool: %s...", *vm.Name, vmPoolName)
if vmPoolName == "" {
uc.Logger.Infof("VM: %s does not contain `poolName` tag, skipping.", *vm.Name)
return nil
} else if !uc.AgentPoolsToUpgrade[vmPoolName] {
uc.Logger.Infof("Skipping upgrade of VM: %s in pool: %s.", *vm.Name, vmPoolName)
return nil
}
if vm.StorageProfile.OsDisk.OsType == compute.Windows {
poolPrefix, _, _, _, err = utils.WindowsVMNameParts(*vm.Name)
if err != nil {
uc.Logger.Errorf(err.Error())
return err
}
if !strings.Contains(uc.NameSuffix, poolPrefix) {
uc.Logger.Infof("Skipping VM: %s for upgrade as it does not belong to cluster with expected name suffix: %s",
*vm.Name, uc.NameSuffix)
return nil
}
// The k8s Windows VM Naming Format was previously "^([a-fA-F0-9]{5})([0-9a-zA-Z]{3})([a-zA-Z0-9]{4,6})$" (i.e.: 50621k8s9000)
// The k8s Windows VM Naming Format is now "^([a-fA-F0-9]{4})([0-9a-zA-Z]{3})([0-9]{3,8})$" (i.e.: 1708k8s020)
// The pool identifier is made of the first 11 or 9 characters
if string((*vm.Name)[8]) == "9" {
poolIdentifier = (*vm.Name)[:11]
} else {
poolIdentifier = (*vm.Name)[:9]
}
} else { // vm.StorageProfile.OsDisk.OsType == compute.Linux
poolIdentifier, poolPrefix, _, err = utils.K8sLinuxVMNameParts(*vm.Name)
if err != nil {
uc.Logger.Errorf(err.Error())
return err
}
if !strings.EqualFold(uc.NameSuffix, poolPrefix) {
uc.Logger.Infof("Skipping VM: %s for upgrade as it does not belong to cluster with expected name suffix: %s",
*vm.Name, uc.NameSuffix)
return nil
}
}
if uc.AgentPools[poolIdentifier] == nil {
uc.AgentPools[poolIdentifier] =
&AgentPoolTopology{&poolIdentifier, &vmPoolName, &[]compute.VirtualMachine{}, &[]compute.VirtualMachine{}}
}
orchestrator := "unknown"
if vm.Tags != nil && vm.Tags["orchestrator"] != nil {
orchestrator = *vm.Tags["orchestrator"]
}
//TODO(sterbrec): extract this from add to agentPool
// separate the upgrade/skip decision from the agentpool composition
if isUpgradableVM {
uc.Logger.Infof("Adding Agent VM: %s, orchestrator: %s to pool: %s (AgentVMs)",
*vm.Name, orchestrator, poolIdentifier)
*uc.AgentPools[poolIdentifier].AgentVMs = append(*uc.AgentPools[poolIdentifier].AgentVMs, vm)
} else {
uc.Logger.Infof("Adding Agent VM: %s, orchestrator: %s to pool: %s (UpgradedAgentVMs)",
*vm.Name, orchestrator, poolIdentifier)
*uc.AgentPools[poolIdentifier].UpgradedAgentVMs = append(*uc.AgentPools[poolIdentifier].UpgradedAgentVMs, vm)
}
return nil
}
func (uc *UpgradeCluster) addVMToUpgradeSets(vm compute.VirtualMachine, currentVersion string) {
if strings.Contains(*(vm.Name), fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix)) {
uc.Logger.Infof("Master VM name: %s, orchestrator: %s (MasterVMs)", *vm.Name, currentVersion)
*uc.MasterVMs = append(*uc.MasterVMs, vm)
} else {
if err := uc.addVMToAgentPool(vm, true); err != nil {
uc.Logger.Errorf("Failed to add VM %s to agent pool: %s", *vm.Name, err)
}
}
}
func (uc *UpgradeCluster) addVMToFinishedSets(vm compute.VirtualMachine, currentVersion string) {
if strings.Contains(*(vm.Name), fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix)) {
uc.Logger.Infof("Master VM name: %s, orchestrator: %s (UpgradedMasterVMs)", *vm.Name, currentVersion)
*uc.UpgradedMasterVMs = append(*uc.UpgradedMasterVMs, vm)
} else {
if err := uc.addVMToAgentPool(vm, false); err != nil {
uc.Logger.Errorf("Failed to add VM %s to agent pool: %s", *vm.Name, err)
}
}
}
// checkControlPlaneNodesStatus checks whether it is safe to proceed with the upgrade process
// by looking at the status of previously upgraded control plane nodes.
//
// It returns an error if more than 1 of the already-upgraded control plane nodes are in the NotReady state.
// To recreate the node, users have to manually update the "orchestrator" tag on the VM.
func (uc *UpgradeCluster) checkControlPlaneNodesStatus(ctx context.Context, upgradedNotReadyStream <-chan []string) error {
if len(*uc.UpgradedMasterVMs) == 0 {
return nil
}
uc.Logger.Infoln("Checking status of upgraded control plane nodes")
upgradedNotReadyCount := 0
loop:
for {
select {
case upgradedNotReady, ok := <-upgradedNotReadyStream:
if !ok {
break loop
}
upgradedNotReadyCount = len(upgradedNotReady)
case <-ctx.Done():
break loop
}
}
// return error if more than 1 upgraded node is not ready
if upgradedNotReadyCount > 1 {
uc.Logger.Error("At least 2 of the previously upgraded control plane nodes did not reach the NodeReady status")
return errors.New("too many upgraded nodes are not ready")
}
return nil
}
func (uc *UpgradeCluster) upgradedNotReadyStream(client kubernetes.Client, backoff wait.Backoff) <-chan []string {
alwaysRetry := func(_ error) bool {
return true
}
upgraded := []string{}
for _, vm := range *uc.UpgradedMasterVMs {
upgraded = append(upgraded, *vm.Name)
}
stream := make(chan []string)
go func() {
defer close(stream)
util.OnError(backoff, alwaysRetry, func() error { //nolint:errcheck
upgradedNotReady, err := uc.getUpgradedNotReady(client, upgraded)
if err != nil {
return err
}
stream <- upgradedNotReady
if len(upgradedNotReady) > 0 {
return errors.New("retry to give NotReady nodes some extra time")
}
return nil
})
}()
return stream
}
func (uc *UpgradeCluster) getUpgradedNotReady(client kubernetes.Client, upgraded []string) ([]string, error) {
cpNodes, err := client.ListNodesByOptions(metav1.ListOptions{LabelSelector: "node-role.kubernetes.io/master"})
if err != nil {
return nil, err
}
nodeStatusMap := make(map[string]bool)
for _, n := range cpNodes.Items {
nodeStatusMap[n.Name] = kubernetes.IsNodeReady(&n)
}
upgradedNotReady := []string{}
for _, vm := range upgraded {
if ready, found := nodeStatusMap[vm]; found && !ready {
upgradedNotReady = append(upgradedNotReady, vm)
}
}
return upgradedNotReady, nil
}
|
package main
import (
"time"
)
type BinaryWheel struct {
Effect
startLed int
size int
stepDuration time.Duration
stepStart time.Time
}
func NewBinaryWheel(disp Display, cg ColorGenerator, size int, duration time.Duration) *BinaryWheel {
ef := NewEffect(disp, 0.5, 0.0)
e := &BinaryWheel{
Effect: ef,
size: size,
stepDuration: duration,
stepStart: time.Now(),
}
e.Painter = cg
return e
}
func (e *BinaryWheel) Update() {
for i := (0 + e.OffsetPar); i < (e.OffsetPar + e.LengthPar); i++ {
e.Leds[i].setV(0)
if i >= e.startLed && i < e.startLed+e.size {
e.Leds[i].setV(0.8)
}
if time.Since(e.stepStart) > e.stepDuration {
e.startLed = (e.startLed + e.size) % e.LengthPar
e.stepStart = time.Now()
}
}
// every update function of an effect ends with this snippet
e.Painter.Update()
e.Leds = e.Painter.Colorize(e.Leds)
e.myDisplay.AddEffect(e.Effect)
}
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package clusters
import (
"context"
"fmt"
"os"
"reflect"
"strings"
humiov1alpha1 "github.com/humio/humio-operator/api/v1alpha1"
"github.com/humio/humio-operator/controllers"
"github.com/humio/humio-operator/controllers/suite"
"github.com/humio/humio-operator/pkg/helpers"
"github.com/humio/humio-operator/pkg/kubernetes"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
)
const (
oldSupportedHumioVersion = "humio/humio-core:1.30.7"
oldUnsupportedHumioVersion = "humio/humio-core:1.18.4"
upgradePatchBestEffortOldVersion = "humio/humio-core:1.36.0"
upgradePatchBestEffortNewVersion = "humio/humio-core:1.36.1"
upgradeRollingBestEffortPreviewOldVersion = "humio/humio-core:1.36.1"
upgradeRollingBestEffortPreviewNewVersion = "humio/humio-core:1.37.0"
upgradeRollingBestEffortStableOldVersion = "humio/humio-core:1.35.0"
upgradeRollingBestEffortStableNewVersion = "humio/humio-core:1.36.1"
upgradeRollingBestEffortVersionJumpOldVersion = "humio/humio-core:1.34.2"
upgradeRollingBestEffortVersionJumpNewVersion = "humio/humio-core:1.36.1"
imageSourceConfigmapOldVersion = "humio/humio-core:1.36.1"
imageSourceConfigmapNewVersion = "humio/humio-core:1.37.0"
)
var _ = Describe("HumioCluster Controller", func() {
BeforeEach(func() {
// failed test runs that don't clean up leave resources behind.
humioClientForTestSuite.ClearHumioClientConnections()
})
AfterEach(func() {
// Add any teardown steps that needs to be executed after each test
humioClientForTestSuite.ClearHumioClientConnections()
})
// Add Tests for OpenAPI validation (or additional CRD features) specified in
// your API definition.
// Avoid adding tests for vanilla CRUD operations because they would
// test Kubernetes API server, which isn't the goal here.
Context("Humio Cluster Simple", func() {
It("Should bootstrap cluster correctly", func() {
key := types.NamespacedName{
Name: "humiocluster-simple",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
})
})
Context("Humio Cluster With Multiple Node Pools", func() {
It("Should bootstrap multi node cluster correctly", func() {
key := types.NamespacedName{
Name: "humiocluster-multi-node-pool",
Namespace: testProcessNamespace,
}
toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
createAndBootstrapMultiNodePoolCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
})
})
Context("Humio Cluster Without Init Container", func() {
It("Should bootstrap cluster correctly", func() {
key := types.NamespacedName{
Name: "humiocluster-no-init-container",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.DisableInitContainer = true
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
})
})
Context("Humio Cluster Multi Organizations", func() {
It("Should bootstrap cluster correctly", func() {
key := types.NamespacedName{
Name: "humiocluster-multi-org",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "ENABLE_ORGANIZATIONS",
Value: "true",
})
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "ORGANIZATION_MODE",
Value: "multi",
})
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
})
})
Context("Humio Cluster Unsupported Version", func() {
It("Creating cluster with unsupported version", func() {
key := types.NamespacedName{
Name: "humiocluster-err-unsupp-vers",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = oldUnsupportedHumioVersion
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateConfigError, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal(fmt.Sprintf("Humio version must be at least %s: unsupported Humio version: %s", controllers.HumioVersionMinimumSupported, strings.Split(oldUnsupportedHumioVersion, ":")[1])))
})
})
Context("Humio Cluster Update Image", func() {
It("Update should correctly replace pods to use new image", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = oldSupportedHumioVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
updatedImage := controllers.Image
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Failed Pods", func() {
It("Update should correctly replace pods that are in a failed state", func() {
key := types.NamespacedName{
Name: "humiocluster-update-failed",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
originalAffinity := toCreate.Spec.Affinity
updatedHumioCluster := humiov1alpha1.HumioCluster{}
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
return nil
}, testTimeout, suite.TestInterval).Should(Succeed())
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
Expect(pod.Status.Phase).To(BeIdenticalTo(corev1.PodRunning))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
suite.UsingClusterBy(key.Name, "Updating the cluster resources successfully")
Eventually(func() error {
updatedHumioCluster := humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioNodeSpec.Affinity = corev1.Affinity{
NodeAffinity: &corev1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
NodeSelectorTerms: []corev1.NodeSelectorTerm{
{
MatchExpressions: []corev1.NodeSelectorRequirement{
{
Key: "some-none-existant-label",
Operator: corev1.NodeSelectorOpIn,
Values: []string{"does-not-exist"},
},
},
},
},
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}).Should(Succeed())
Eventually(func() string {
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting))
ensurePodsGoPending(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2, 1)
Eventually(func() int {
var pendingPodsCount int
updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
for _, pod := range updatedClusterPods {
if pod.Status.Phase == corev1.PodPending {
for _, condition := range pod.Status.Conditions {
if condition.Type == corev1.PodScheduled {
if condition.Status == corev1.ConditionFalse && condition.Reason == controllers.PodConditionReasonUnschedulable {
pendingPodsCount++
}
}
}
}
}
return pendingPodsCount
}, testTimeout, suite.TestInterval).Should(Equal(1))
Eventually(func() string {
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Updating the cluster resources successfully")
Eventually(func() error {
updatedHumioCluster := humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioNodeSpec.Affinity = originalAffinity
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 3)
Eventually(func() string {
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
})
})
Context("Humio Cluster Update Image Rolling Restart", func() {
It("Update should correctly replace pods to use new image in a rolling fashion", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-rolling",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = oldSupportedHumioVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{
Type: humiov1alpha1.HumioClusterUpdateStrategyRollingUpdate,
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
updatedImage := controllers.Image
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Update Strategy OnDelete", func() {
It("Update should not replace pods on image update when update strategy OnDelete is used", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-on-delete",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = oldSupportedHumioVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
toCreate.Spec.UpdateStrategy = &humiov1alpha1.HumioUpdateStrategy{
Type: humiov1alpha1.HumioClusterUpdateStrategyOnDelete,
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
updatedImage := controllers.Image
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Confirming pods have not been recreated")
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
suite.UsingClusterBy(key.Name, "Simulating manual deletion of pods")
for _, pod := range updatedClusterPods {
Expect(k8sClient.Delete(ctx, &pod)).To(Succeed())
}
Eventually(func() []corev1.Pod {
var clusterPods []corev1.Pod
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
_ = suite.MarkPodsAsRunning(ctx, k8sClient, clusterPods, key.Name)
return clusterPods
}, testTimeout, suite.TestInterval).Should(HaveLen(*toCreate.Spec.NodeCount))
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Rolling Best Effort Patch", func() {
It("Update should correctly replace pods to use new image in a rolling fashion for patch updates", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-rolling-patch",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = upgradePatchBestEffortOldVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
updatedImage := upgradePatchBestEffortNewVersion
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Pods upgrade in a rolling fashion because the new version is a patch release")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Rolling Best Effort Preview", func() {
It("Update should correctly replace pods to use new image in a rolling fashion for preview updates", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-rolling-preview",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = upgradeRollingBestEffortPreviewOldVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = upgradeRollingBestEffortPreviewNewVersion
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Pods upgrade at the same time because the new version is preview")
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeRollingBestEffortPreviewNewVersion))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Rolling Best Effort Stable", func() {
It("Update should correctly replace pods to use new image in a rolling fashion for stable updates", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-rolling-stable",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = upgradeRollingBestEffortStableOldVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = upgradeRollingBestEffortStableNewVersion
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Pods upgrade in a rolling fashion because the new version is stable and"+
"only one minor revision greater than the previous version")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeRollingBestEffortStableNewVersion))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Rolling Best Effort Version Jump", func() {
It("Update should correctly replace pods to use new image in a rolling fashion for version jump updates", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-rolling-vj",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = upgradeRollingBestEffortVersionJumpOldVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = upgradeRollingBestEffortVersionJumpNewVersion
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Pods upgrade at the same time because the new version is more than one"+
"minor revision greater than the previous version")
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(upgradeRollingBestEffortVersionJumpNewVersion))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update EXTERNAL_URL", func() {
It("Update should correctly replace pods to use the new EXTERNAL_URL in a non-rolling fashion", func() {
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
key := types.NamespacedName{
Name: "humiocluster-update-ext-url",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.TLS = &humiov1alpha1.HumioClusterTLSSpec{Enabled: helpers.BoolPtr(false)}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElement(corev1.EnvVar{
Name: "EXTERNAL_URL",
Value: "http://$(POD_NAME).humiocluster-update-ext-url-headless.$(POD_NAMESPACE):$(HUMIO_PORT)",
}))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Waiting for pods to be Running")
Eventually(func() int {
var runningPods int
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
for _, pod := range clusterPods {
if pod.Status.Phase == corev1.PodRunning {
runningPods++
}
}
return runningPods
}, testTimeout, suite.TestInterval).Should(Equal(*toCreate.Spec.NodeCount))
suite.UsingClusterBy(key.Name, "Updating the cluster TLS successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.TLS.Enabled = helpers.BoolPtr(true)
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).To(ContainElement(corev1.EnvVar{
Name: "EXTERNAL_URL",
Value: "https://$(POD_NAME).humiocluster-update-ext-url-headless.$(POD_NAMESPACE):$(HUMIO_PORT)",
}))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
}
})
})
Context("Humio Cluster Update Image Multi Node Pool", func() {
It("Update should correctly replace pods to use new image in multiple node pools", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-np",
Namespace: testProcessNamespace,
}
originalImage := oldSupportedHumioVersion
toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1)
toCreate.Spec.Image = originalImage
toCreate.Spec.NodeCount = helpers.IntPtr(1)
toCreate.Spec.NodePools[0].NodeCount = helpers.IntPtr(1)
toCreate.Spec.NodePools[0].Image = originalImage
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
createAndBootstrapMultiNodePoolCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
mainNodePoolManager := controllers.NewHumioNodeManagerFromHumioCluster(toCreate)
revisionKey, _ := mainNodePoolManager.GetHumioClusterNodePoolRevisionAnnotation()
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, mainNodePoolManager.GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image on the main node pool successfully")
updatedImage := controllers.Image
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state")
Eventually(func() int {
var poolsInCorrectState int
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus {
if poolStatus.State == humiov1alpha1.HumioClusterStateUpgrading {
poolsInCorrectState++
}
}
return poolsInCorrectState
}, testTimeout, suite.TestInterval).Should(Equal(1))
ensurePodsSimultaneousRestart(ctx, mainNodePoolManager, 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for main pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other node pool")
additionalNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(&updatedHumioCluster, &updatedHumioCluster.Spec.NodePools[0])
nonUpdatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
Expect(nonUpdatedClusterPods).To(HaveLen(*toCreate.Spec.NodePools[0].NodeCount))
Expect(updatedHumioCluster.Spec.NodePools[0].Image).To(Equal(originalImage))
for _, pod := range nonUpdatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(originalImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
suite.UsingClusterBy(key.Name, "Updating the cluster image on the additional node pool successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.NodePools[0].Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state")
Eventually(func() int {
var poolsInCorrectState int
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus {
if poolStatus.State == humiov1alpha1.HumioClusterStateUpgrading {
poolsInCorrectState++
}
}
return poolsInCorrectState
}, testTimeout, suite.TestInterval).Should(Equal(1))
ensurePodsSimultaneousRestart(ctx, additionalNodePoolManager, 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
additionalPoolRevisionKey, _ := additionalNodePoolManager.GetHumioClusterNodePoolRevisionAnnotation()
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(additionalPoolRevisionKey, "2"))
updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodePools[0].NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the main node pool")
updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Image Source", func() {
It("Update should correctly replace pods to use new image", func() {
key := types.NamespacedName{
Name: "humiocluster-update-image-source",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Image = imageSourceConfigmapOldVersion
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
suite.UsingClusterBy(key.Name, "Adding missing imageSource to pod spec")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
updatedHumioCluster.Spec.ImageSource = &humiov1alpha1.HumioImageSource{
ConfigMapRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "image-source-missing",
},
Key: "tag",
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming the HumioCluster goes into ConfigError state since the configmap does not exist")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "Confirming the HumioCluster describes the reason the cluster is in ConfigError state")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("failed to set imageFromSource: ConfigMap \"image-source-missing\" not found"))
suite.UsingClusterBy(key.Name, "Creating the imageSource configmap")
updatedImage := imageSourceConfigmapNewVersion
envVarSourceConfigMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "image-source",
Namespace: key.Namespace,
},
Data: map[string]string{"tag": updatedImage},
}
Expect(k8sClient.Create(ctx, &envVarSourceConfigMap)).To(Succeed())
suite.UsingClusterBy(key.Name, "Updating imageSource of pod spec")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ImageSource = &humiov1alpha1.HumioImageSource{
ConfigMapRef: &corev1.ConfigMapKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "image-source",
},
Key: "tag",
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetHumioClusterNodePoolRevisionAnnotation()
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "2"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Using Wrong Image", func() {
It("Update should correctly replace pods after using wrong image", func() {
key := types.NamespacedName{
Name: "humiocluster-update-wrong-image",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(toCreate.Spec.Image))
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
revisionKey, _ := controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetHumioClusterNodePoolRevisionAnnotation()
Expect(updatedHumioCluster.Annotations).To(HaveKeyWithValue(revisionKey, "1"))
suite.UsingClusterBy(key.Name, "Updating the cluster image unsuccessfully")
updatedImage := "humio/humio-operator:1.30.7-missing-image"
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
suite.UsingClusterBy(key.Name, "Waiting until pods are started with the bad image")
Eventually(func() int {
var badPodCount int
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
if pod.Spec.Containers[humioIndex].Image == updatedImage && pod.Annotations[controllers.PodRevisionAnnotation] == "2" {
badPodCount++
}
}
return badPodCount
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(*toCreate.Spec.NodeCount))
suite.UsingClusterBy(key.Name, "Simulating mock pods to be scheduled")
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunning(ctx, k8sClient, clusterPods, key.Name)
suite.UsingClusterBy(key.Name, "Waiting for humio cluster state to be Running")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Updating the cluster image successfully")
updatedImage = controllers.Image
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Image = updatedImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateUpgrading))
ensurePodsSimultaneousRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 3)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pod revision is the same for all pods and the cluster itself")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
Expect(updatedHumioCluster.Annotations[revisionKey]).To(Equal("3"))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(updatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range updatedClusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Image).To(BeIdenticalTo(updatedImage))
Expect(pod.Annotations[controllers.PodRevisionAnnotation]).To(Equal("3"))
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Helper Image", func() {
It("Update should correctly replace pods to use new image", func() {
key := types.NamespacedName{
Name: "humiocluster-update-helper-image",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HelperImage = ""
toCreate.Spec.NodeCount = helpers.IntPtr(2)
suite.UsingClusterBy(key.Name, "Creating a cluster with default helper image")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Validating pod uses default helper image as init container")
Eventually(func() string {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunning(ctx, k8sClient, clusterPods, key.Name)
for _, pod := range clusterPods {
initIdx, _ := kubernetes.GetInitContainerIndexByName(pod, controllers.InitContainerName)
return pod.Spec.InitContainers[initIdx].Image
}
return ""
}, testTimeout, suite.TestInterval).Should(Equal(controllers.HelperImage))
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
suite.UsingClusterBy(key.Name, "Validating pod uses default helper image as auth sidecar container")
Eventually(func() string {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunning(ctx, k8sClient, clusterPods, key.Name)
for _, pod := range clusterPods {
authIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.AuthContainerName)
return pod.Spec.InitContainers[authIdx].Image
}
return ""
}, testTimeout, suite.TestInterval).Should(Equal(controllers.HelperImage))
suite.UsingClusterBy(key.Name, "Overriding helper image")
var updatedHumioCluster humiov1alpha1.HumioCluster
customHelperImage := "humio/humio-operator-helper:master"
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HelperImage = customHelperImage
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Validating pod is recreated using the explicitly defined helper image as init container")
Eventually(func() string {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
initIdx, _ := kubernetes.GetInitContainerIndexByName(pod, controllers.InitContainerName)
return pod.Spec.InitContainers[initIdx].Image
}
return ""
}, testTimeout, suite.TestInterval).Should(Equal(customHelperImage))
suite.UsingClusterBy(key.Name, "Validating pod is recreated using the explicitly defined helper image as auth sidecar container")
Eventually(func() string {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
authIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.AuthContainerName)
return pod.Spec.InitContainers[authIdx].Image
}
return ""
}, testTimeout, suite.TestInterval).Should(Equal(customHelperImage))
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Environment Variable", func() {
It("Should correctly replace pods to use new environment variable", func() {
key := types.NamespacedName{
Name: "humiocluster-update-envvar",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.NodeCount = helpers.IntPtr(2)
toCreate.Spec.EnvironmentVariables = []corev1.EnvVar{
{
Name: "test",
Value: "",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
humioVersion, _ := controllers.HumioVersionFromString(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetImage())
if ok, _ := humioVersion.AtLeast(controllers.HumioVersionWithLauncherScript); ok {
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_GC_OPTS",
Value: "-XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC",
})
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_JVM_LOG_OPTS",
Value: "-Xlog:gc+jni=debug:stdout -Xlog:gc*:stdout:time,tags",
})
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
})
} else {
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_JVM_ARGS",
Value: "-Xss2m -Xms256m -Xmx2g -server -XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
})
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(toCreate.Spec.EnvironmentVariables[0]))
}
suite.UsingClusterBy(key.Name, "Updating the environment variable successfully")
updatedEnvironmentVariables := []corev1.EnvVar{
{
Name: "test",
Value: "update",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
humioVersion, _ = controllers.HumioVersionFromString(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetImage())
if ok, _ := humioVersion.AtLeast(controllers.HumioVersionWithLauncherScript); ok {
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_GC_OPTS",
Value: "-XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC",
})
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_JVM_LOG_OPTS",
Value: "-Xlog:gc+jni=debug:stdout -Xlog:gc*:stdout:time,tags",
})
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
})
} else {
toCreate.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{
Name: "HUMIO_JVM_ARGS",
Value: "-Xss2m -Xms256m -Xmx2g -server -XX:+UseParallelGC -XX:+ScavengeBeforeFullGC -XX:+DisableExplicitGC -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
})
}
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariables = updatedEnvironmentVariables
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting))
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
Expect(len(clusterPods)).To(BeIdenticalTo(*toCreate.Spec.NodeCount))
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(updatedEnvironmentVariables[0]))
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
})
})
Context("Humio Cluster Update Environment Variable Multi Node Pool", func() {
It("Should correctly replace pods to use new environment variable for multi node pool clusters", func() {
key := types.NamespacedName{
Name: "humiocluster-update-envvar-np",
Namespace: testProcessNamespace,
}
toCreate := constructBasicMultiNodePoolHumioCluster(key, true, 1)
toCreate.Spec.NodeCount = helpers.IntPtr(1)
toCreate.Spec.NodePools[0].NodeCount = helpers.IntPtr(1)
toCreate.Spec.EnvironmentVariables = []corev1.EnvVar{
{
Name: "test",
Value: "",
},
{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
toCreate.Spec.NodePools[0].EnvironmentVariables = []corev1.EnvVar{
{
Name: "test",
Value: "",
},
{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
createAndBootstrapMultiNodePoolCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
mainNodePoolManager := controllers.NewHumioNodeManagerFromHumioCluster(toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, mainNodePoolManager.GetPodLabels())
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(toCreate.Spec.EnvironmentVariables[0]))
}
suite.UsingClusterBy(key.Name, "Updating the environment variable on main node pool successfully")
updatedEnvironmentVariables := []corev1.EnvVar{
{
Name: "test",
Value: "update",
},
{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariables = updatedEnvironmentVariables
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting))
suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state")
Eventually(func() int {
var poolsInCorrectState int
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus {
if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting {
poolsInCorrectState++
}
}
return poolsInCorrectState
}, testTimeout, suite.TestInterval).Should(Equal(1))
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, mainNodePoolManager, 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
Expect(len(clusterPods)).To(BeIdenticalTo(*toCreate.Spec.NodeCount))
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(updatedEnvironmentVariables[0]))
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
updatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other node pool")
additionalNodePoolManager := controllers.NewHumioNodeManagerFromHumioNodePool(&updatedHumioCluster, &updatedHumioCluster.Spec.NodePools[0])
nonUpdatedClusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
Expect(nonUpdatedClusterPods).To(HaveLen(*toCreate.Spec.NodePools[0].NodeCount))
Expect(updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables).To(Equal(toCreate.Spec.NodePools[0].EnvironmentVariables))
for _, pod := range nonUpdatedClusterPods {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "1"))
}
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, additionalNodePoolManager.GetPodLabels())
suite.UsingClusterBy(key.Name, "Updating the environment variable on additional node pool successfully")
updatedEnvironmentVariables = []corev1.EnvVar{
{
Name: "test",
Value: "update",
},
{
Name: "HUMIO_OPTS",
Value: "-Dakka.log-config-on-start=on -Dlog4j2.formatMsgNoLookups=true -Dzookeeper.client.secure=false",
},
{
Name: "ZOOKEEPER_URL",
Value: "humio-cp-zookeeper-0.humio-cp-zookeeper-headless.default:2181",
},
{
Name: "KAFKA_SERVERS",
Value: "humio-cp-kafka-0.humio-cp-kafka-headless.default:9092",
},
{
Name: "HUMIO_KAFKA_TOPIC_PREFIX",
Value: key.Name,
},
{
Name: "AUTHENTICATION_METHOD",
Value: "single-user",
},
{
Name: "SINGLE_USER_PASSWORD",
Value: "password",
},
{
Name: "ENABLE_IOC_SERVICE",
Value: "false",
},
}
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.NodePools[0].EnvironmentVariables = updatedEnvironmentVariables
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting))
suite.UsingClusterBy(key.Name, "Confirming only one node pool is in the correct state")
Eventually(func() int {
var poolsInCorrectState int
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
for _, poolStatus := range updatedHumioCluster.Status.NodePoolStatus {
if poolStatus.State == humiov1alpha1.HumioClusterStateRestarting {
poolsInCorrectState++
}
}
return poolsInCorrectState
}, testTimeout, suite.TestInterval).Should(Equal(1))
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, additionalNodePoolManager, 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
Expect(len(clusterPods)).To(BeIdenticalTo(*toCreate.Spec.NodeCount))
for _, pod := range clusterPods {
humioIndex, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIndex].Env).Should(ContainElement(updatedEnvironmentVariables[0]))
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
updatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, additionalNodePoolManager.GetPodLabels())
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
suite.UsingClusterBy(key.Name, "Ensuring pod names are not changed")
Expect(podNames(clusterPods)).To(Equal(podNames(updatedClusterPods)))
}
suite.UsingClusterBy(key.Name, "Confirming pod revision did not change for the other main pool")
nonUpdatedClusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, mainNodePoolManager.GetPodLabels())
Expect(nonUpdatedClusterPods).To(HaveLen(*toCreate.Spec.NodeCount))
for _, pod := range nonUpdatedClusterPods {
Expect(pod.Annotations).To(HaveKeyWithValue(controllers.PodRevisionAnnotation, "2"))
}
})
})
Context("Humio Cluster Ingress", func() {
It("Should correctly update ingresses to use new annotations variable", func() {
key := types.NamespacedName{
Name: "humiocluster-ingress",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Hostname = "humio.example.com"
toCreate.Spec.ESHostname = "humio-es.humio.com"
toCreate.Spec.Ingress = humiov1alpha1.HumioClusterIngressSpec{
Enabled: true,
Controller: "nginx",
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Waiting for ingresses to be created")
desiredIngresses := []*networkingv1.Ingress{
controllers.ConstructGeneralIngress(toCreate, toCreate.Spec.Hostname),
controllers.ConstructStreamingQueryIngress(toCreate, toCreate.Spec.Hostname),
controllers.ConstructIngestIngress(toCreate, toCreate.Spec.Hostname),
controllers.ConstructESIngestIngress(toCreate, toCreate.Spec.ESHostname),
}
var foundIngressList []networkingv1.Ingress
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(4))
// Kubernetes 1.18 introduced a new field, PathType. For older versions PathType is returned as nil,
// so we explicitly set the value before comparing ingress objects.
// When minimum supported Kubernetes version is 1.18, we can drop this.
pathTypeImplementationSpecific := networkingv1.PathTypeImplementationSpecific
for ingressIdx, ingress := range foundIngressList {
for ruleIdx, rule := range ingress.Spec.Rules {
for pathIdx := range rule.HTTP.Paths {
if foundIngressList[ingressIdx].Spec.Rules[ruleIdx].HTTP.Paths[pathIdx].PathType == nil {
foundIngressList[ingressIdx].Spec.Rules[ruleIdx].HTTP.Paths[pathIdx].PathType = &pathTypeImplementationSpecific
}
}
}
}
Expect(foundIngressList).Should(HaveLen(4))
for _, desiredIngress := range desiredIngresses {
for _, foundIngress := range foundIngressList {
if desiredIngress.Name == foundIngress.Name {
Expect(foundIngress.Annotations).To(BeEquivalentTo(desiredIngress.Annotations))
Expect(foundIngress.Spec).To(BeEquivalentTo(desiredIngress.Spec))
}
}
}
suite.UsingClusterBy(key.Name, "Adding an additional ingress annotation successfully")
var existingHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
Expect(k8sClient.Get(ctx, key, &existingHumioCluster)).Should(Succeed())
existingHumioCluster.Spec.Ingress.Annotations = map[string]string{"humio.com/new-important-annotation": "true"}
return k8sClient.Update(ctx, &existingHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() bool {
ingresses, _ := kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range ingresses {
if _, ok := ingress.Annotations["humio.com/new-important-annotation"]; !ok {
return false
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
Eventually(func() ([]networkingv1.Ingress, error) {
return kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
}, testTimeout, suite.TestInterval).Should(HaveLen(4))
suite.UsingClusterBy(key.Name, "Changing ingress hostnames successfully")
Eventually(func() error {
Expect(k8sClient.Get(ctx, key, &existingHumioCluster)).Should(Succeed())
existingHumioCluster.Spec.Hostname = "humio2.example.com"
existingHumioCluster.Spec.ESHostname = "humio2-es.example.com"
return k8sClient.Update(ctx, &existingHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
desiredIngresses = []*networkingv1.Ingress{
controllers.ConstructGeneralIngress(&existingHumioCluster, existingHumioCluster.Spec.Hostname),
controllers.ConstructStreamingQueryIngress(&existingHumioCluster, existingHumioCluster.Spec.Hostname),
controllers.ConstructIngestIngress(&existingHumioCluster, existingHumioCluster.Spec.Hostname),
controllers.ConstructESIngestIngress(&existingHumioCluster, existingHumioCluster.Spec.ESHostname),
}
Eventually(func() bool {
ingresses, _ := kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range ingresses {
for _, rule := range ingress.Spec.Rules {
if rule.Host != "humio2.example.com" && rule.Host != "humio2-es.example.com" {
return false
}
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
// Kubernetes 1.18 introduced a new field, PathType. For older versions PathType is returned as nil,
// so we explicitly set the value before comparing ingress objects.
// When minimum supported Kubernetes version is 1.18, we can drop this.
for ingressIdx, ingress := range foundIngressList {
for ruleIdx, rule := range ingress.Spec.Rules {
for pathIdx := range rule.HTTP.Paths {
if foundIngressList[ingressIdx].Spec.Rules[ruleIdx].HTTP.Paths[pathIdx].PathType == nil {
foundIngressList[ingressIdx].Spec.Rules[ruleIdx].HTTP.Paths[pathIdx].PathType = &pathTypeImplementationSpecific
}
}
}
}
for _, desiredIngress := range desiredIngresses {
for _, foundIngress := range foundIngressList {
if desiredIngress.Name == foundIngress.Name {
Expect(foundIngress.Annotations).To(BeEquivalentTo(desiredIngress.Annotations))
Expect(foundIngress.Spec).To(BeEquivalentTo(desiredIngress.Spec))
}
}
}
suite.UsingClusterBy(key.Name, "Removing an ingress annotation successfully")
Eventually(func() error {
Expect(k8sClient.Get(ctx, key, &existingHumioCluster)).Should(Succeed())
delete(existingHumioCluster.Spec.Ingress.Annotations, "humio.com/new-important-annotation")
return k8sClient.Update(ctx, &existingHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() bool {
ingresses, _ := kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range ingresses {
if _, ok := ingress.Annotations["humio.com/new-important-annotation"]; ok {
return true
}
}
return false
}, testTimeout, suite.TestInterval).Should(BeFalse())
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, foundIngress := range foundIngressList {
Expect(foundIngress.Annotations).ShouldNot(HaveKey("humio.com/new-important-annotation"))
}
suite.UsingClusterBy(key.Name, "Disabling ingress successfully")
Eventually(func() error {
Expect(k8sClient.Get(ctx, key, &existingHumioCluster)).Should(Succeed())
existingHumioCluster.Spec.Ingress.Enabled = false
return k8sClient.Update(ctx, &existingHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() ([]networkingv1.Ingress, error) {
return kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
}, testTimeout, suite.TestInterval).Should(HaveLen(0))
})
})
Context("Humio Cluster Pod Annotations", func() {
It("Should be correctly annotated", func() {
key := types.NamespacedName{
Name: "humiocluster-pods",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.PodAnnotations = map[string]string{"humio.com/new-important-annotation": "true"}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, toCreate.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
Expect(len(clusterPods)).To(BeIdenticalTo(*toCreate.Spec.NodeCount))
for _, pod := range clusterPods {
Expect(pod.Annotations["humio.com/new-important-annotation"]).Should(Equal("true"))
Expect(pod.Annotations["productName"]).Should(Equal("humio"))
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
})
Context("Humio Cluster Pod Labels", func() {
It("Should be correctly annotated", func() {
key := types.NamespacedName{
Name: "humiocluster-labels",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.PodLabels = map[string]string{"humio.com/new-important-label": "true"}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, toCreate.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
Expect(len(clusterPods)).To(BeIdenticalTo(*toCreate.Spec.NodeCount))
for _, pod := range clusterPods {
Expect(pod.Labels["humio.com/new-important-label"]).Should(Equal("true"))
Expect(pod.Labels["app.kubernetes.io/managed-by"]).Should(Equal("humio-operator"))
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
})
Context("Humio Cluster Custom Service", func() {
It("Should correctly use default service", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-svc",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
svc, _ := kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
Expect(svc.Spec.Type).To(BeIdenticalTo(corev1.ServiceTypeClusterIP))
for _, port := range svc.Spec.Ports {
if port.Name == "http" {
Expect(port.Port).Should(Equal(int32(8080)))
}
if port.Name == "es" {
Expect(port.Port).Should(Equal(int32(9200)))
}
}
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "Updating service type")
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServiceType = corev1.ServiceTypeLoadBalancer
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
// Wait for the new HumioCluster to finish any existing reconcile loop by waiting for the
// status.observedGeneration to equal at least that of the current resource version. This will avoid race
// conditions where the HumioCluster is updated and service is deleted midway through reconciliation.
suite.IncrementGenerationAndWaitForReconcileToSync(ctx, key, k8sClient, testTimeout)
Expect(k8sClient.Delete(ctx, controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster)))).To(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we can see the updated HumioCluster object")
Eventually(func() corev1.ServiceType {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Spec.HumioServiceType
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(corev1.ServiceTypeLoadBalancer))
Eventually(func() types.UID {
newSvc, _ := kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
suite.UsingClusterBy(key.Name, fmt.Sprintf("Waiting for Service to get recreated. ServiceBeforeDeletion.Metadata=%#+v, CurrentServiceFromAPI.Metadata=%#+v", svc.ObjectMeta, newSvc.ObjectMeta))
return newSvc.UID
}, testTimeout, suite.TestInterval).ShouldNot(BeEquivalentTo(svc.UID))
Eventually(func() corev1.ServiceType {
svc, _ = kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
return svc.Spec.Type
}, testTimeout, suite.TestInterval).Should(Equal(corev1.ServiceTypeLoadBalancer))
suite.UsingClusterBy(key.Name, "Updating Humio port")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServicePort = 443
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
// TODO: Right now the service is not updated properly, so we delete it ourselves to make the operator recreate the service
// Wait for the new HumioCluster to finish any existing reconcile loop by waiting for the
// status.observedGeneration to equal at least that of the current resource version. This will avoid race
// conditions where the HumioCluster is updated and service is deleted mid-way through a reconcile.
suite.IncrementGenerationAndWaitForReconcileToSync(ctx, key, k8sClient, testTimeout)
Expect(k8sClient.Delete(ctx, controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster)))).To(Succeed())
suite.UsingClusterBy(key.Name, "Confirming service gets recreated with correct Humio port")
Eventually(func() types.UID {
newSvc, _ := kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
suite.UsingClusterBy(key.Name, fmt.Sprintf("Waiting for Service to get recreated. ServiceBeforeDeletion.Metadata=%#+v, CurrentServiceFromAPI.Metadata=%#+v", svc.ObjectMeta, newSvc.ObjectMeta))
return newSvc.UID
}, testTimeout, suite.TestInterval).ShouldNot(BeEquivalentTo(svc.UID))
Eventually(func() int32 {
svc, _ = kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
for _, port := range svc.Spec.Ports {
if port.Name == "http" {
return port.Port
}
}
return -1
}, testTimeout, suite.TestInterval).Should(Equal(int32(443)))
suite.UsingClusterBy(key.Name, "Updating ES port")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
updatedHumioCluster.Spec.HumioESServicePort = 9201
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
// TODO: Right now the service is not updated properly, so we delete it ourselves to make the operator recreate the service
// Wait for the new HumioCluster to finish any existing reconcile loop by waiting for the
// status.observedGeneration to equal at least that of the current resource version. This will avoid race
// conditions where the HumioCluster is updated and service is deleted mid-way through a reconcile.
suite.IncrementGenerationAndWaitForReconcileToSync(ctx, key, k8sClient, testTimeout)
Expect(k8sClient.Delete(ctx, controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster)))).To(Succeed())
suite.UsingClusterBy(key.Name, "Confirming service gets recreated with correct ES port")
Eventually(func() types.UID {
newSvc, _ := kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
suite.UsingClusterBy(key.Name, fmt.Sprintf("Waiting for Service to get recreated. ServiceBeforeDeletion.Metadata=%#+v, CurrentServiceFromAPI.Metadata=%#+v", svc.ObjectMeta, newSvc.ObjectMeta))
return newSvc.UID
}, testTimeout, suite.TestInterval).ShouldNot(BeEquivalentTo(svc.UID))
Eventually(func() int32 {
svc, _ = kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
for _, port := range svc.Spec.Ports {
if port.Name == "es" {
return port.Port
}
}
return -1
}, testTimeout, suite.TestInterval).Should(Equal(int32(9201)))
svc, _ = kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
Expect(svc.Annotations).To(BeNil())
suite.UsingClusterBy(key.Name, "Updating service annotations")
updatedAnnotationKey := "new-annotation"
updatedAnnotationValue := "new-value"
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServiceAnnotations = map[string]string{updatedAnnotationKey: updatedAnnotationValue}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we can see the updated service annotations")
Eventually(func() map[string]string {
service := controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster))
Expect(k8sClient.Get(ctx, key, service)).To(Succeed())
return service.Annotations
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue(updatedAnnotationKey, updatedAnnotationValue))
suite.UsingClusterBy(key.Name, "Updating service labels")
updatedLabelsKey := "new-label"
updatedLabelsValue := "new-value"
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServiceLabels = map[string]string{updatedLabelsKey: updatedLabelsValue}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we can see the updated service labels")
Eventually(func() map[string]string {
service := controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster))
Expect(k8sClient.Get(ctx, key, service)).To(Succeed())
return service.Labels
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue(updatedLabelsKey, updatedLabelsValue))
// The selector is not controlled through the spec, but with the addition of node pools, the operator adds
// a new selector. This test confirms the operator will be able to migrate to different selectors on the
// service.
suite.UsingClusterBy(key.Name, "Updating service selector for migration to node pools")
service := controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster))
Expect(k8sClient.Get(ctx, key, service)).To(Succeed())
delete(service.Spec.Selector, "humio.com/node-pool")
Expect(k8sClient.Update(ctx, service)).To(Succeed())
suite.IncrementGenerationAndWaitForReconcileToSync(ctx, key, k8sClient, testTimeout)
Eventually(func() map[string]string {
service := controllers.ConstructService(controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster))
Expect(k8sClient.Get(ctx, key, service)).To(Succeed())
return service.Spec.Selector
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue("humio.com/node-pool", key.Name))
suite.UsingClusterBy(key.Name, "Confirming headless service has the correct HTTP and ES ports")
headlessSvc, _ := kubernetes.GetService(ctx, k8sClient, fmt.Sprintf("%s-headless", key.Name), key.Namespace)
Expect(headlessSvc.Spec.Type).To(BeIdenticalTo(corev1.ServiceTypeClusterIP))
for _, port := range headlessSvc.Spec.Ports {
if port.Name == "http" {
Expect(port.Port).Should(Equal(int32(8080)))
}
if port.Name == "es" {
Expect(port.Port).Should(Equal(int32(9200)))
}
}
headlessSvc, _ = kubernetes.GetService(ctx, k8sClient, key.Name, key.Namespace)
Expect(svc.Annotations).To(BeNil())
suite.UsingClusterBy(key.Name, "Updating headless service annotations")
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioHeadlessServiceAnnotations = map[string]string{updatedAnnotationKey: updatedAnnotationValue}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we can see the updated service annotations")
Eventually(func() map[string]string {
Expect(k8sClient.Get(ctx, key, headlessSvc)).Should(Succeed())
return headlessSvc.Annotations
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue(updatedAnnotationKey, updatedAnnotationValue))
suite.UsingClusterBy(key.Name, "Updating headless service labels")
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioHeadlessServiceLabels = map[string]string{updatedLabelsKey: updatedLabelsValue}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we can see the updated service labels")
Eventually(func() map[string]string {
Expect(k8sClient.Get(ctx, key, headlessSvc)).Should(Succeed())
return headlessSvc.Labels
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue(updatedLabelsKey, updatedLabelsValue))
})
})
Context("Humio Cluster Container Arguments", func() {
It("Should correctly configure container arguments and ephemeral disks env var", func() {
key := types.NamespacedName{
Name: "humiocluster-container-args",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully without ephemeral disks")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].Args).To(Equal([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && exec bash /app/humio/run.sh"}))
Expect(pod.Spec.Containers[humioIdx].Env).ToNot(ContainElement(corev1.EnvVar{
Name: "ZOOKEEPER_URL_FOR_NODE_UUID",
Value: "$(ZOOKEEPER_URL)",
}))
}
suite.UsingClusterBy(key.Name, "Updating node uuid prefix which includes ephemeral disks and zone")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{Name: "USING_EPHEMERAL_DISKS", Value: "true"})
updatedHumioCluster.Spec.NodeUUIDPrefix = "humio_{{.Zone}}_"
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() []string {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
if len(clusterPods) > 0 {
humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName)
return clusterPods[0].Spec.Containers[humioIdx].Args
}
return []string{}
}, testTimeout, suite.TestInterval).Should(BeEquivalentTo([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && export ZOOKEEPER_PREFIX_FOR_NODE_UUID=/humio_$(cat /shared/availability-zone)_ && exec bash /app/humio/run.sh"}))
clusterPods, err := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
Expect(err).ToNot(HaveOccurred())
humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName)
Expect(clusterPods[0].Spec.Containers[humioIdx].Env).To(ContainElement(corev1.EnvVar{
Name: "ZOOKEEPER_URL_FOR_NODE_UUID",
Value: "$(ZOOKEEPER_URL)",
}))
})
})
Context("Humio Cluster Container Arguments Without Zone", func() {
It("Should correctly configure container arguments", func() {
key := types.NamespacedName{
Name: "humiocluster-container-without-zone-args",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].Args).To(Equal([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && exec bash /app/humio/run.sh"}))
}
suite.UsingClusterBy(key.Name, "Updating node uuid prefix which includes ephemeral disks but not zone")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariables = append(toCreate.Spec.EnvironmentVariables, corev1.EnvVar{Name: "USING_EPHEMERAL_DISKS", Value: "true"})
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() []string {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
if len(clusterPods) > 0 {
humioIdx, _ := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName)
return clusterPods[0].Spec.Containers[humioIdx].Args
}
return []string{}
}, testTimeout, suite.TestInterval).Should(BeEquivalentTo([]string{"-c", "export CORES=$(getconf _NPROCESSORS_ONLN) && export HUMIO_OPTS=\"$HUMIO_OPTS -XX:ActiveProcessorCount=$(getconf _NPROCESSORS_ONLN)\" && export ZONE=$(cat /shared/availability-zone) && export ZOOKEEPER_PREFIX_FOR_NODE_UUID=/humio_ && exec bash /app/humio/run.sh"}))
})
})
Context("Humio Cluster Service Account Annotations", func() {
It("Should correctly handle service account annotations", func() {
key := types.NamespacedName{
Name: "humiocluster-sa-annotations",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
humioServiceAccountName := fmt.Sprintf("%s-%s", key.Name, controllers.HumioServiceAccountNameSuffix)
Eventually(func() error {
_, err := kubernetes.GetServiceAccount(ctx, k8sClient, humioServiceAccountName, key.Namespace)
return err
}, testTimeout, suite.TestInterval).Should(Succeed())
serviceAccount, _ := kubernetes.GetServiceAccount(ctx, k8sClient, humioServiceAccountName, key.Namespace)
Expect(serviceAccount.Annotations).Should(BeNil())
suite.UsingClusterBy(key.Name, "Adding an annotation successfully")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServiceAccountAnnotations = map[string]string{"some-annotation": "true"}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() bool {
serviceAccount, _ = kubernetes.GetServiceAccount(ctx, k8sClient, humioServiceAccountName, key.Namespace)
_, ok := serviceAccount.Annotations["some-annotation"]
return ok
}, testTimeout, suite.TestInterval).Should(BeTrue())
Expect(serviceAccount.Annotations["some-annotation"]).Should(Equal("true"))
suite.UsingClusterBy(key.Name, "Removing all annotations successfully")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HumioServiceAccountAnnotations = nil
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() map[string]string {
serviceAccount, _ = kubernetes.GetServiceAccount(ctx, k8sClient, humioServiceAccountName, key.Namespace)
return serviceAccount.Annotations
}, testTimeout, suite.TestInterval).Should(BeNil())
})
})
Context("Humio Cluster Pod Security Context", func() {
It("Should correctly handle pod security context", func() {
key := types.NamespacedName{
Name: "humiocluster-podsecuritycontext",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.SecurityContext).To(Equal(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodSecurityContext()))
}
suite.UsingClusterBy(key.Name, "Updating Pod Security Context to be empty")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.PodSecurityContext = &corev1.PodSecurityContext{}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() bool {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
if !reflect.DeepEqual(pod.Spec.SecurityContext, &corev1.PodSecurityContext{}) {
return false
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.SecurityContext).To(Equal(&corev1.PodSecurityContext{}))
}
suite.UsingClusterBy(key.Name, "Updating Pod Security Context to be non-empty")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.PodSecurityContext = &corev1.PodSecurityContext{RunAsNonRoot: helpers.BoolPtr(true)}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() corev1.PodSecurityContext {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
return *pod.Spec.SecurityContext
}
return corev1.PodSecurityContext{}
}, testTimeout, suite.TestInterval).Should(BeEquivalentTo(corev1.PodSecurityContext{RunAsNonRoot: helpers.BoolPtr(true)}))
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.SecurityContext).To(Equal(&corev1.PodSecurityContext{RunAsNonRoot: helpers.BoolPtr(true)}))
}
})
})
Context("Humio Cluster Container Security Context", func() {
It("Should correctly handle container security context", func() {
key := types.NamespacedName{
Name: "humiocluster-containersecuritycontext",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].SecurityContext).To(Equal(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetContainerSecurityContext()))
}
suite.UsingClusterBy(key.Name, "Updating Container Security Context to be empty")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ContainerSecurityContext = &corev1.SecurityContext{}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() bool {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
if !reflect.DeepEqual(pod.Spec.Containers[humioIdx].SecurityContext, &corev1.SecurityContext{}) {
return false
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].SecurityContext).To(Equal(&corev1.SecurityContext{}))
}
suite.UsingClusterBy(key.Name, "Updating Container Security Context to be non-empty")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ContainerSecurityContext = &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() corev1.SecurityContext {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return *pod.Spec.Containers[humioIdx].SecurityContext
}
return corev1.SecurityContext{}
}, testTimeout, suite.TestInterval).Should(Equal(corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
},
},
}))
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].SecurityContext).To(Equal(&corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{
"NET_ADMIN",
},
},
}))
}
})
})
Context("Humio Cluster Container Probes", func() {
It("Should correctly handle container probes", func() {
key := types.NamespacedName{
Name: "humiocluster-probes",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].ReadinessProbe).To(Equal(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetContainerReadinessProbe()))
Expect(pod.Spec.Containers[humioIdx].LivenessProbe).To(Equal(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetContainerLivenessProbe()))
Expect(pod.Spec.Containers[humioIdx].StartupProbe).To(Equal(controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetContainerStartupProbe()))
}
suite.UsingClusterBy(key.Name, "Updating Container probes to be empty")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ContainerReadinessProbe = &corev1.Probe{}
updatedHumioCluster.Spec.ContainerLivenessProbe = &corev1.Probe{}
updatedHumioCluster.Spec.ContainerStartupProbe = &corev1.Probe{}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming pods have the updated revision")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Confirming pods do not have a readiness probe set")
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].ReadinessProbe
}
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{Command: []string{"no-pods-found"}},
},
}
}, testTimeout, suite.TestInterval).Should(BeNil())
suite.UsingClusterBy(key.Name, "Confirming pods do not have a liveness probe set")
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].LivenessProbe
}
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{Command: []string{"no-pods-found"}},
},
}
}, testTimeout, suite.TestInterval).Should(BeNil())
suite.UsingClusterBy(key.Name, "Confirming pods do not have a startup probe set")
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].StartupProbe
}
return &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
Exec: &corev1.ExecAction{Command: []string{"no-pods-found"}},
},
}
}, testTimeout, suite.TestInterval).Should(BeNil())
suite.UsingClusterBy(key.Name, "Updating Container probes to be non-empty")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ContainerReadinessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 2,
FailureThreshold: 20,
}
updatedHumioCluster.Spec.ContainerLivenessProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 20,
}
updatedHumioCluster.Spec.ContainerStartupProbe = &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 30,
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].ReadinessProbe
}
return &corev1.Probe{}
}, testTimeout, suite.TestInterval).Should(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 2,
FailureThreshold: 20,
}))
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].LivenessProbe
}
return &corev1.Probe{}
}, testTimeout, suite.TestInterval).Should(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 20,
}))
Eventually(func() *corev1.Probe {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].StartupProbe
}
return &corev1.Probe{}
}, testTimeout, suite.TestInterval).Should(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 30,
}))
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].ReadinessProbe).To(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 2,
FailureThreshold: 20,
}))
Expect(pod.Spec.Containers[humioIdx].LivenessProbe).To(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
InitialDelaySeconds: 60,
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 20,
}))
Expect(pod.Spec.Containers[humioIdx].StartupProbe).To(Equal(&corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/api/v1/config",
Port: intstr.IntOrString{IntVal: controllers.HumioPort},
Scheme: getProbeScheme(&updatedHumioCluster),
},
},
PeriodSeconds: 10,
TimeoutSeconds: 4,
SuccessThreshold: 1,
FailureThreshold: 30,
}))
}
})
})
Context("Humio Cluster Ekstra Kafka Configs", func() {
It("Should correctly handle extra kafka configs", func() {
key := types.NamespacedName{
Name: "humiocluster-extrakafkaconfigs",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully with extra kafka configs")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].Env).To(ContainElement(corev1.EnvVar{
Name: "EXTRA_KAFKA_CONFIGS_FILE",
Value: fmt.Sprintf("/var/lib/humio/extra-kafka-configs-configmap/%s", controllers.ExtraKafkaPropertiesFilename),
}))
}
suite.UsingClusterBy(key.Name, "Confirming pods have additional volume mounts for extra kafka configs")
Eventually(func() []corev1.VolumeMount {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].VolumeMounts
}
return []corev1.VolumeMount{}
}, testTimeout, suite.TestInterval).Should(ContainElement(corev1.VolumeMount{
Name: "extra-kafka-configs",
ReadOnly: true,
MountPath: "/var/lib/humio/extra-kafka-configs-configmap",
}))
suite.UsingClusterBy(key.Name, "Confirming pods have additional volumes for extra kafka configs")
mode := int32(420)
Eventually(func() []corev1.Volume {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
return pod.Spec.Volumes
}
return []corev1.Volume{}
}, testTimeout, suite.TestInterval).Should(ContainElement(corev1.Volume{
Name: "extra-kafka-configs",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetExtraKafkaConfigsConfigMapName(),
},
DefaultMode: &mode,
},
},
}))
suite.UsingClusterBy(key.Name, "Confirming config map contains desired extra kafka configs")
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetExtraKafkaConfigsConfigMapName(), key.Namespace)
Expect(configMap.Data[controllers.ExtraKafkaPropertiesFilename]).To(Equal(toCreate.Spec.ExtraKafkaConfigs))
suite.UsingClusterBy(key.Name, "Removing extra kafka configs")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ExtraKafkaConfigs = ""
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming pods do not have environment variable enabling extra kafka configs")
Eventually(func() []corev1.EnvVar {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].Env
}
return []corev1.EnvVar{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.EnvVar{
Name: "EXTRA_KAFKA_CONFIGS_FILE",
Value: fmt.Sprintf("/var/lib/humio/extra-kafka-configs-configmap/%s", controllers.ExtraKafkaPropertiesFilename),
}))
suite.UsingClusterBy(key.Name, "Confirming pods do not have additional volume mounts for extra kafka configs")
Eventually(func() []corev1.VolumeMount {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].VolumeMounts
}
return []corev1.VolumeMount{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.VolumeMount{
Name: "extra-kafka-configs",
ReadOnly: true,
MountPath: "/var/lib/humio/extra-kafka-configs-configmap",
}))
suite.UsingClusterBy(key.Name, "Confirming pods do not have additional volumes for extra kafka configs")
Eventually(func() []corev1.Volume {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
return pod.Spec.Volumes
}
return []corev1.Volume{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.Volume{
Name: "extra-kafka-configs",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetExtraKafkaConfigsConfigMapName(),
},
DefaultMode: &mode,
},
},
}))
})
})
Context("Humio Cluster View Group Permissions", func() {
It("Should correctly handle view group permissions", func() {
key := types.NamespacedName{
Name: "humiocluster-vgp",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.ViewGroupPermissions = `
{
"views": {
"REPO1": {
"GROUP1": {
"queryPrefix": "QUERY1",
"canEditDashboards": true
},
"GROUP2": {
"queryPrefix": "QUERY2",
"canEditDashboards": false
}
},
"REPO2": {
"GROUP2": {
"queryPrefix": "QUERY3"
},
"GROUP3": {
"queryPrefix": "QUERY4"
}
}
}
}
`
suite.UsingClusterBy(key.Name, "Creating the cluster successfully with view group permissions")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming config map was created")
Eventually(func() error {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace)
return err
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming pods have the expected environment variable, volume and volume mounts")
mode := int32(420)
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].Env).To(ContainElement(corev1.EnvVar{
Name: "READ_GROUP_PERMISSIONS_FROM_FILE",
Value: "true",
}))
Expect(pod.Spec.Containers[humioIdx].VolumeMounts).To(ContainElement(corev1.VolumeMount{
Name: "view-group-permissions",
ReadOnly: true,
MountPath: fmt.Sprintf("%s/%s", controllers.HumioDataPath, controllers.ViewGroupPermissionsFilename),
SubPath: controllers.ViewGroupPermissionsFilename,
}))
Expect(pod.Spec.Volumes).To(ContainElement(corev1.Volume{
Name: "view-group-permissions",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.ViewGroupPermissionsConfigMapName(toCreate),
},
DefaultMode: &mode,
},
},
}))
}
suite.UsingClusterBy(key.Name, "Confirming config map contains desired view group permissions")
configMap, _ := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), key.Namespace)
Expect(configMap.Data[controllers.ViewGroupPermissionsFilename]).To(Equal(toCreate.Spec.ViewGroupPermissions))
suite.UsingClusterBy(key.Name, "Removing view group permissions")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ViewGroupPermissions = ""
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming pods do not have environment variable enabling view group permissions")
Eventually(func() []corev1.EnvVar {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].Env
}
return []corev1.EnvVar{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.EnvVar{
Name: "READ_GROUP_PERMISSIONS_FROM_FILE",
Value: "true",
}))
suite.UsingClusterBy(key.Name, "Confirming pods do not have additional volume mounts for view group permissions")
Eventually(func() []corev1.VolumeMount {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].VolumeMounts
}
return []corev1.VolumeMount{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.VolumeMount{
Name: "view-group-permissions",
ReadOnly: true,
MountPath: fmt.Sprintf("%s/%s", controllers.HumioDataPath, controllers.ViewGroupPermissionsFilename),
SubPath: controllers.ViewGroupPermissionsFilename,
}))
suite.UsingClusterBy(key.Name, "Confirming pods do not have additional volumes for view group permissions")
Eventually(func() []corev1.Volume {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
return pod.Spec.Volumes
}
return []corev1.Volume{}
}, testTimeout, suite.TestInterval).ShouldNot(ContainElement(corev1.Volume{
Name: "view-group-permissions",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: controllers.ViewGroupPermissionsConfigMapName(toCreate),
},
DefaultMode: &mode,
},
},
}))
suite.UsingClusterBy(key.Name, "Confirming config map was cleaned up")
Eventually(func() bool {
_, err := kubernetes.GetConfigMap(ctx, k8sClient, controllers.ViewGroupPermissionsConfigMapName(toCreate), toCreate.Namespace)
return k8serrors.IsNotFound(err)
}, testTimeout, suite.TestInterval).Should(BeTrue())
})
})
Context("Humio Cluster Persistent Volumes", func() {
It("Should correctly handle persistent volumes", func() {
key := types.NamespacedName{
Name: "humiocluster-pvc",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.NodeCount = helpers.IntPtr(2)
toCreate.Spec.DataVolumePersistentVolumeClaimSpecTemplate = corev1.PersistentVolumeClaimSpec{}
toCreate.Spec.DataVolumeSource = corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
}
suite.UsingClusterBy(key.Name, "Bootstrapping the cluster successfully without persistent volumes")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Expect(kubernetes.ListPersistentVolumeClaims(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())).To(HaveLen(0))
suite.UsingClusterBy(key.Name, "Updating cluster to use persistent volumes")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.DataVolumeSource = corev1.VolumeSource{}
updatedHumioCluster.Spec.DataVolumePersistentVolumeClaimSpecTemplate = corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: resource.MustParse("10Gi"),
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}).Should(Succeed())
Eventually(func() ([]corev1.PersistentVolumeClaim, error) {
return kubernetes.ListPersistentVolumeClaims(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
}, testTimeout, suite.TestInterval).Should(HaveLen(*toCreate.Spec.NodeCount))
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRestarting))
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Confirming pods are using PVC's and no PVC is left unused")
pvcList, _ := kubernetes.ListPersistentVolumeClaims(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
foundPodList, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range foundPodList {
_, err := controllers.FindPvcForPod(pvcList, pod)
Expect(err).ShouldNot(HaveOccurred())
}
_, err := controllers.FindNextAvailablePvc(pvcList, foundPodList)
Expect(err).Should(HaveOccurred())
})
})
Context("Humio Cluster Extra Volumes", func() {
It("Should correctly handle extra volumes", func() {
key := types.NamespacedName{
Name: "humiocluster-extra-volumes",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
initialExpectedVolumesCount := 6
initialExpectedVolumeMountsCount := 4
humioVersion, _ := controllers.HumioVersionFromString(toCreate.Spec.Image)
if ok, _ := humioVersion.AtLeast(controllers.HumioVersionWithNewTmpDir); !ok {
initialExpectedVolumesCount += 1
initialExpectedVolumeMountsCount += 1
}
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
// if we run on a real cluster we have TLS enabled (using 2 volumes),
// and k8s will automatically inject a service account token adding one more
initialExpectedVolumesCount += 3
initialExpectedVolumeMountsCount += 2
}
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.Volumes).To(HaveLen(initialExpectedVolumesCount))
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].VolumeMounts).To(HaveLen(initialExpectedVolumeMountsCount))
}
suite.UsingClusterBy(key.Name, "Adding additional volumes")
var updatedHumioCluster humiov1alpha1.HumioCluster
mode := int32(420)
extraVolume := corev1.Volume{
Name: "gcp-storage-account-json-file",
VolumeSource: corev1.VolumeSource{
Secret: &corev1.SecretVolumeSource{
SecretName: "gcp-storage-account-json-file",
DefaultMode: &mode,
},
},
}
extraVolumeMount := corev1.VolumeMount{
Name: "gcp-storage-account-json-file",
MountPath: "/var/lib/humio/gcp-storage-account-json-file",
ReadOnly: true,
}
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ExtraVolumes = []corev1.Volume{extraVolume}
updatedHumioCluster.Spec.ExtraHumioVolumeMounts = []corev1.VolumeMount{extraVolumeMount}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
Eventually(func() []corev1.Volume {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
return pod.Spec.Volumes
}
return []corev1.Volume{}
}, testTimeout, suite.TestInterval).Should(HaveLen(initialExpectedVolumesCount + 1))
Eventually(func() []corev1.VolumeMount {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
return pod.Spec.Containers[humioIdx].VolumeMounts
}
return []corev1.VolumeMount{}
}, testTimeout, suite.TestInterval).Should(HaveLen(initialExpectedVolumeMountsCount + 1))
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.Volumes).Should(ContainElement(extraVolume))
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(pod.Spec.Containers[humioIdx].VolumeMounts).Should(ContainElement(extraVolumeMount))
}
})
})
Context("Humio Cluster Custom Path", func() {
It("Should correctly handle custom paths with ingress disabled", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-path-ing-disabled",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
protocol := "http"
if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" {
protocol = "https"
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming PUBLIC_URL is set to default value and PROXY_PREFIX_URL is not set")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(controllers.EnvVarValue(pod.Spec.Containers[humioIdx].Env, "PUBLIC_URL")).Should(Equal(fmt.Sprintf("%s://$(THIS_POD_IP):$(HUMIO_PORT)", protocol)))
Expect(controllers.EnvVarHasKey(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL")).To(BeFalse())
}
suite.UsingClusterBy(key.Name, "Updating humio cluster path")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Path = "/logs"
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming PROXY_PREFIX_URL have been configured on all pods")
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
if !controllers.EnvVarHasKey(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL") {
return false
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
suite.UsingClusterBy(key.Name, "Confirming PUBLIC_URL and PROXY_PREFIX_URL have been correctly configured")
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(controllers.EnvVarValue(pod.Spec.Containers[humioIdx].Env, "PUBLIC_URL")).Should(Equal(fmt.Sprintf("%s://$(THIS_POD_IP):$(HUMIO_PORT)/logs", protocol)))
Expect(controllers.EnvVarHasValue(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL", "/logs")).To(BeTrue())
}
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Confirming cluster returns to Running state")
Eventually(func() string {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning))
})
It("Should correctly handle custom paths with ingress enabled", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-path-ing-enabled",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Hostname = "test-cluster.humio.com"
toCreate.Spec.ESHostname = "test-cluster-es.humio.com"
toCreate.Spec.Ingress = humiov1alpha1.HumioClusterIngressSpec{
Enabled: true,
Controller: "nginx",
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming PUBLIC_URL is set to default value and PROXY_PREFIX_URL is not set")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(controllers.EnvVarValue(pod.Spec.Containers[humioIdx].Env, "PUBLIC_URL")).Should(Equal("https://test-cluster.humio.com"))
Expect(controllers.EnvVarHasKey(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL")).To(BeFalse())
}
suite.UsingClusterBy(key.Name, "Updating humio cluster path")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Path = "/logs"
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming PROXY_PREFIX_URL have been configured on all pods")
Eventually(func() bool {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
if !controllers.EnvVarHasKey(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL") {
return false
}
}
return true
}, testTimeout, suite.TestInterval).Should(BeTrue())
suite.UsingClusterBy(key.Name, "Confirming PUBLIC_URL and PROXY_PREFIX_URL have been correctly configured")
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(controllers.EnvVarValue(pod.Spec.Containers[humioIdx].Env, "PUBLIC_URL")).Should(Equal("https://test-cluster.humio.com/logs"))
Expect(controllers.EnvVarHasValue(pod.Spec.Containers[humioIdx].Env, "PROXY_PREFIX_URL", "/logs")).To(BeTrue())
}
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Confirming cluster returns to Running state")
Eventually(func() string {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, updatedHumioCluster.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster).GetPodLabels())
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning))
})
})
Context("Humio Cluster Config Errors", func() {
It("Creating cluster with conflicting volume mount name", func() {
key := types.NamespacedName{
Name: "humiocluster-err-volmnt-name",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioNodeSpec.ExtraHumioVolumeMounts = []corev1.VolumeMount{
{
Name: "humio-data",
},
}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("failed to validate pod spec: extraHumioVolumeMount conflicts with existing name: humio-data"))
})
It("Creating cluster with conflicting volume mount mount path", func() {
key := types.NamespacedName{
Name: "humiocluster-err-mount-path",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioNodeSpec.ExtraHumioVolumeMounts = []corev1.VolumeMount{
{
Name: "something-unique",
MountPath: controllers.HumioDataPath,
},
}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("failed to validate pod spec: extraHumioVolumeMount conflicts with existing mount path: /data/humio-data"))
})
It("Creating cluster with conflicting volume name", func() {
key := types.NamespacedName{
Name: "humiocluster-err-vol-name",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioNodeSpec.ExtraVolumes = []corev1.Volume{
{
Name: "humio-data",
},
}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("failed to validate pod spec: extraVolume conflicts with existing name: humio-data"))
})
It("Creating cluster with higher replication factor than nodes", func() {
key := types.NamespacedName{
Name: "humiocluster-err-repl-factor",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.TargetReplicationFactor = 2
toCreate.Spec.HumioNodeSpec.NodeCount = helpers.IntPtr(1)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("node count must be equal to or greater than the target replication factor: nodeCount is too low"))
})
It("Creating cluster with conflicting storage configuration", func() {
key := types.NamespacedName{
Name: "humiocluster-err-conflict-storage-conf",
Namespace: testProcessNamespace,
}
toCreate := &humiov1alpha1.HumioCluster{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: humiov1alpha1.HumioClusterSpec{
HumioNodeSpec: humiov1alpha1.HumioNodeSpec{
DataVolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
DataVolumePersistentVolumeClaimSpecTemplate: corev1.PersistentVolumeClaimSpec{
AccessModes: []corev1.PersistentVolumeAccessMode{
corev1.ReadWriteOnce,
},
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceStorage: *resource.NewQuantity(10*1024*1024*1024, resource.BinarySI),
},
},
},
},
},
}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("conflicting storage configuration provided: exactly one of dataVolumeSource and dataVolumePersistentVolumeClaimSpecTemplate must be set"))
})
It("Creating cluster with conflicting storage configuration", func() {
key := types.NamespacedName{
Name: "humiocluster-err-no-storage-conf",
Namespace: testProcessNamespace,
}
toCreate := &humiov1alpha1.HumioCluster{
ObjectMeta: metav1.ObjectMeta{
Name: key.Name,
Namespace: key.Namespace,
},
Spec: humiov1alpha1.HumioClusterSpec{},
}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "should indicate cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "should describe cluster configuration error")
Eventually(func() string {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return updatedHumioCluster.Status.Message
}, testTimeout, suite.TestInterval).Should(Equal("no storage configuration provided: exactly one of dataVolumeSource and dataVolumePersistentVolumeClaimSpecTemplate must be set"))
})
})
Context("Humio Cluster Without TLS for Ingress", func() {
It("Creating cluster without TLS for ingress", func() {
key := types.NamespacedName{
Name: "humiocluster-without-tls-ingress",
Namespace: testProcessNamespace,
}
tlsDisabled := false
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Ingress.Enabled = true
toCreate.Spec.Ingress.Controller = "nginx"
toCreate.Spec.Ingress.TLS = &tlsDisabled
toCreate.Spec.Hostname = "example.humio.com"
toCreate.Spec.ESHostname = "es-example.humio.com"
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming ingress objects do not have TLS configured")
var ingresses []networkingv1.Ingress
Eventually(func() ([]networkingv1.Ingress, error) {
return kubernetes.ListIngresses(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
}, testTimeout, suite.TestInterval).Should(HaveLen(4))
ingresses, _ = kubernetes.ListIngresses(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range ingresses {
Expect(ingress.Spec.TLS).To(BeNil())
}
})
})
Context("Humio Cluster Ingress", func() {
It("Should correctly handle ingress when toggling both ESHostname and Hostname on/off", func() {
key := types.NamespacedName{
Name: "humiocluster-ingress-hostname",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Hostname = ""
toCreate.Spec.ESHostname = ""
toCreate.Spec.Ingress = humiov1alpha1.HumioClusterIngressSpec{
Enabled: true,
Controller: "nginx",
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully without any Hostnames defined")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming we did not create any ingresses")
var foundIngressList []networkingv1.Ingress
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(0))
suite.UsingClusterBy(key.Name, "Setting the Hostname")
var updatedHumioCluster humiov1alpha1.HumioCluster
hostname := "test-cluster.humio.com"
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Hostname = hostname
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we only created ingresses with expected hostname")
foundIngressList = []networkingv1.Ingress{}
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(3))
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range foundIngressList {
for _, rule := range ingress.Spec.Rules {
Expect(rule.Host).To(Equal(updatedHumioCluster.Spec.Hostname))
}
}
suite.UsingClusterBy(key.Name, "Setting the ESHostname")
updatedHumioCluster = humiov1alpha1.HumioCluster{}
esHostname := "test-cluster-es.humio.com"
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ESHostname = esHostname
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming ingresses for ES Hostname gets created")
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(4))
var ingressHostnames []string
for _, ingress := range foundIngressList {
for _, rule := range ingress.Spec.Rules {
ingressHostnames = append(ingressHostnames, rule.Host)
}
}
Expect(ingressHostnames).To(ContainElement(esHostname))
suite.UsingClusterBy(key.Name, "Removing the ESHostname")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ESHostname = ""
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming ingresses for ES Hostname gets removed")
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(3))
ingressHostnames = []string{}
for _, ingress := range foundIngressList {
for _, rule := range ingress.Spec.Rules {
ingressHostnames = append(ingressHostnames, rule.Host)
}
}
Expect(ingressHostnames).ToNot(ContainElement(esHostname))
suite.UsingClusterBy(key.Name, "Creating the hostname secret")
secretKeyRef := &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "hostname",
},
Key: "humio-hostname",
}
updatedHostname := "test-cluster-hostname-ref.humio.com"
hostnameSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretKeyRef.Name,
Namespace: key.Namespace,
},
StringData: map[string]string{secretKeyRef.Key: updatedHostname},
Type: corev1.SecretTypeOpaque,
}
Expect(k8sClient.Create(ctx, &hostnameSecret)).To(Succeed())
suite.UsingClusterBy(key.Name, "Setting the HostnameSource")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.Hostname = ""
updatedHumioCluster.Spec.HostnameSource.SecretKeyRef = secretKeyRef
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we only created ingresses with expected hostname")
foundIngressList = []networkingv1.Ingress{}
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(3))
Eventually(func() string {
ingressHosts := make(map[string]interface{})
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range foundIngressList {
for _, rule := range ingress.Spec.Rules {
ingressHosts[rule.Host] = nil
}
}
if len(ingressHosts) == 1 {
for k := range ingressHosts {
return k
}
}
return fmt.Sprintf("%#v", ingressHosts)
}, testTimeout, suite.TestInterval).Should(Equal(updatedHostname))
suite.UsingClusterBy(key.Name, "Removing the HostnameSource")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.HostnameSource.SecretKeyRef = nil
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Deleting the hostname secret")
Expect(k8sClient.Delete(ctx, &hostnameSecret)).To(Succeed())
suite.UsingClusterBy(key.Name, "Creating the es hostname secret")
secretKeyRef = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: "es-hostname",
},
Key: "humio-es-hostname",
}
updatedESHostname := "test-cluster-es-hostname-ref.humio.com"
esHostnameSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretKeyRef.Name,
Namespace: key.Namespace,
},
StringData: map[string]string{secretKeyRef.Key: updatedESHostname},
Type: corev1.SecretTypeOpaque,
}
Expect(k8sClient.Create(ctx, &esHostnameSecret)).To(Succeed())
suite.UsingClusterBy(key.Name, "Setting the ESHostnameSource")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ESHostname = ""
updatedHumioCluster.Spec.ESHostnameSource.SecretKeyRef = secretKeyRef
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming we only created ingresses with expected es hostname")
foundIngressList = []networkingv1.Ingress{}
Eventually(func() []networkingv1.Ingress {
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, key.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
return foundIngressList
}, testTimeout, suite.TestInterval).Should(HaveLen(1))
Eventually(func() string {
ingressHosts := make(map[string]interface{})
foundIngressList, _ = kubernetes.ListIngresses(ctx, k8sClient, toCreate.Namespace, kubernetes.MatchingLabelsForHumio(toCreate.Name))
for _, ingress := range foundIngressList {
for _, rule := range ingress.Spec.Rules {
ingressHosts[rule.Host] = nil
}
}
if len(ingressHosts) == 1 {
for k := range ingressHosts {
return k
}
}
return fmt.Sprintf("%#v", ingressHosts)
}, testTimeout, suite.TestInterval).Should(Equal(updatedESHostname))
suite.UsingClusterBy(key.Name, "Removing the ESHostnameSource")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
updatedHumioCluster.Spec.ESHostnameSource.SecretKeyRef = nil
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Deleting the es hostname secret")
Expect(k8sClient.Delete(ctx, &esHostnameSecret)).To(Succeed())
})
})
Context("Humio Cluster with non-existent custom service accounts", func() {
It("Should correctly handle non-existent humio service account by marking cluster as ConfigError", func() {
key := types.NamespacedName{
Name: "humiocluster-err-humio-service-account",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioServiceAccountName = "non-existent-humio-service-account"
suite.UsingClusterBy(key.Name, "Creating cluster with non-existent service accounts")
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() string {
var cluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, key, &cluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return cluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateConfigError))
})
It("Should correctly handle non-existent init service account by marking cluster as ConfigError", func() {
key := types.NamespacedName{
Name: "humiocluster-err-init-service-account",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioServiceAccountName = "non-existent-init-service-account"
suite.UsingClusterBy(key.Name, "Creating cluster with non-existent service accounts")
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() string {
var cluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, key, &cluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return cluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateConfigError))
})
It("Should correctly handle non-existent auth service account by marking cluster as ConfigError", func() {
key := types.NamespacedName{
Name: "humiocluster-err-auth-service-account",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioServiceAccountName = "non-existent-auth-service-account"
suite.UsingClusterBy(key.Name, "Creating cluster with non-existent service accounts")
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() string {
var cluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, key, &cluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return cluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo(humiov1alpha1.HumioClusterStateConfigError))
})
})
Context("Humio Cluster With Custom Service Accounts", func() {
It("Creating cluster with custom service accounts", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-service-accounts",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.InitServiceAccountName = "init-custom-service-account"
toCreate.Spec.AuthServiceAccountName = "auth-custom-service-account"
toCreate.Spec.HumioServiceAccountName = "humio-custom-service-account"
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming init container is using the correct service account")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetInitContainerIndexByName(pod, controllers.InitContainerName)
var serviceAccountSecretVolumeName string
for _, volumeMount := range pod.Spec.InitContainers[humioIdx].VolumeMounts {
if volumeMount.MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
serviceAccountSecretVolumeName = volumeMount.Name
}
}
Expect(serviceAccountSecretVolumeName).To(Not(BeEmpty()))
for _, volume := range pod.Spec.Volumes {
if volume.Name == serviceAccountSecretVolumeName {
secret, err := kubernetes.GetSecret(ctx, k8sClient, volume.Secret.SecretName, key.Namespace)
Expect(err).ShouldNot(HaveOccurred())
Expect(secret.ObjectMeta.Annotations["kubernetes.io/service-account.name"]).To(Equal(toCreate.Spec.InitServiceAccountName))
}
}
}
suite.UsingClusterBy(key.Name, "Confirming auth container is using the correct service account")
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.AuthContainerName)
var serviceAccountSecretVolumeName string
for _, volumeMount := range pod.Spec.Containers[humioIdx].VolumeMounts {
if volumeMount.MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
serviceAccountSecretVolumeName = volumeMount.Name
}
}
Expect(serviceAccountSecretVolumeName).To(Not(BeEmpty()))
for _, volume := range pod.Spec.Volumes {
if volume.Name == serviceAccountSecretVolumeName {
secret, err := kubernetes.GetSecret(ctx, k8sClient, volume.Secret.SecretName, key.Namespace)
Expect(err).ShouldNot(HaveOccurred())
Expect(secret.ObjectMeta.Annotations["kubernetes.io/service-account.name"]).To(Equal(toCreate.Spec.AuthServiceAccountName))
}
}
}
suite.UsingClusterBy(key.Name, "Confirming humio pod is using the correct service account")
for _, pod := range clusterPods {
Expect(pod.Spec.ServiceAccountName).To(Equal(toCreate.Spec.HumioServiceAccountName))
}
})
It("Creating cluster with custom service accounts sharing the same name", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-sa-same-name",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.InitServiceAccountName = "custom-service-account"
toCreate.Spec.AuthServiceAccountName = "custom-service-account"
toCreate.Spec.HumioServiceAccountName = "custom-service-account"
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming init container is using the correct service account")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetInitContainerIndexByName(pod, controllers.InitContainerName)
var serviceAccountSecretVolumeName string
for _, volumeMount := range pod.Spec.InitContainers[humioIdx].VolumeMounts {
if volumeMount.MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
serviceAccountSecretVolumeName = volumeMount.Name
}
}
Expect(serviceAccountSecretVolumeName).To(Not(BeEmpty()))
for _, volume := range pod.Spec.Volumes {
if volume.Name == serviceAccountSecretVolumeName {
secret, err := kubernetes.GetSecret(ctx, k8sClient, volume.Secret.SecretName, key.Namespace)
Expect(err).ShouldNot(HaveOccurred())
Expect(secret.ObjectMeta.Annotations["kubernetes.io/service-account.name"]).To(Equal(toCreate.Spec.InitServiceAccountName))
}
}
}
suite.UsingClusterBy(key.Name, "Confirming auth container is using the correct service account")
for _, pod := range clusterPods {
humioIdx, _ := kubernetes.GetContainerIndexByName(pod, controllers.AuthContainerName)
var serviceAccountSecretVolumeName string
for _, volumeMount := range pod.Spec.Containers[humioIdx].VolumeMounts {
if volumeMount.MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
serviceAccountSecretVolumeName = volumeMount.Name
}
}
Expect(serviceAccountSecretVolumeName).To(Not(BeEmpty()))
for _, volume := range pod.Spec.Volumes {
if volume.Name == serviceAccountSecretVolumeName {
secret, err := kubernetes.GetSecret(ctx, k8sClient, volume.Secret.SecretName, key.Namespace)
Expect(err).ShouldNot(HaveOccurred())
Expect(secret.ObjectMeta.Annotations["kubernetes.io/service-account.name"]).To(Equal(toCreate.Spec.AuthServiceAccountName))
}
}
}
suite.UsingClusterBy(key.Name, "Confirming humio pod is using the correct service account")
for _, pod := range clusterPods {
Expect(pod.Spec.ServiceAccountName).To(Equal(toCreate.Spec.HumioServiceAccountName))
}
})
})
Context("Humio Cluster With Service Annotations", func() {
It("Creating cluster with custom service annotations", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-svc-annotations",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioServiceAnnotations = map[string]string{
"service.beta.kubernetes.io/aws-load-balancer-type": "nlb",
"service.beta.kubernetes.io/aws-load-balancer-cross-zone-load-balancing-enabled": "false",
"service.beta.kubernetes.io/aws-load-balancer-ssl-cert": "arn:aws:acm:region:account:certificate/123456789012-1234-1234-1234-12345678",
"service.beta.kubernetes.io/aws-load-balancer-backend-protocol": "ssl",
"service.beta.kubernetes.io/aws-load-balancer-ssl-ports": "443",
"service.beta.kubernetes.io/aws-load-balancer-internal": "0.0.0.0/0",
}
toCreate.Spec.HumioServiceAnnotations = map[string]string{
"custom": "annotation",
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming service was created using the correct annotations")
svc, err := kubernetes.GetService(ctx, k8sClient, toCreate.Name, toCreate.Namespace)
Expect(err).ToNot(HaveOccurred())
for k, v := range toCreate.Spec.HumioServiceAnnotations {
Expect(svc.Annotations).To(HaveKeyWithValue(k, v))
}
suite.UsingClusterBy(key.Name, "Confirming the headless service was created using the correct annotations")
headlessSvc, err := kubernetes.GetService(ctx, k8sClient, toCreate.Name, toCreate.Namespace)
Expect(err).ToNot(HaveOccurred())
for k, v := range toCreate.Spec.HumioHeadlessServiceAnnotations {
Expect(headlessSvc.Annotations).To(HaveKeyWithValue(k, v))
}
})
})
Context("Humio Cluster With Custom Tolerations", func() {
It("Creating cluster with custom tolerations", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-tolerations",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.Tolerations = []corev1.Toleration{
{
Key: "key",
Operator: "Equal",
Value: "value",
Effect: "NoSchedule",
},
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming the humio pods use the requested tolerations")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
Expect(pod.Spec.Tolerations).To(ContainElement(toCreate.Spec.Tolerations[0]))
}
})
})
Context("Humio Cluster With Service Labels", func() {
It("Creating cluster with custom service labels", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-svc-labels",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.HumioServiceLabels = map[string]string{
"mirror.linkerd.io/exported": "true",
}
toCreate.Spec.HumioHeadlessServiceLabels = map[string]string{
"custom": "label",
}
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming service was created using the correct annotations")
svc, err := kubernetes.GetService(ctx, k8sClient, toCreate.Name, toCreate.Namespace)
Expect(err).ToNot(HaveOccurred())
for k, v := range toCreate.Spec.HumioServiceLabels {
Expect(svc.Labels).To(HaveKeyWithValue(k, v))
}
suite.UsingClusterBy(key.Name, "Confirming the headless service was created using the correct labels")
headlessSvc, err := kubernetes.GetService(ctx, k8sClient, fmt.Sprintf("%s-headless", toCreate.Name), toCreate.Namespace)
Expect(err).ToNot(HaveOccurred())
for k, v := range toCreate.Spec.HumioHeadlessServiceLabels {
Expect(headlessSvc.Labels).To(HaveKeyWithValue(k, v))
}
})
})
Context("Humio Cluster with shared process namespace and sidecars", func() {
It("Creating cluster without shared process namespace and sidecar", func() {
key := types.NamespacedName{
Name: "humiocluster-custom-sidecars",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.SidecarContainers = nil
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming the humio pods are not using shared process namespace nor additional sidecars")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
if pod.Spec.ShareProcessNamespace != nil {
Expect(*pod.Spec.ShareProcessNamespace).To(BeFalse())
}
Expect(pod.Spec.Containers).Should(HaveLen(2))
}
suite.UsingClusterBy(key.Name, "Enabling shared process namespace and sidecars")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.ShareProcessNamespace = helpers.BoolPtr(true)
updatedHumioCluster.Spec.SidecarContainers = []corev1.Container{
{
Name: "jmap",
Image: controllers.Image,
Command: []string{"/bin/sh"},
Args: []string{"-c", "HUMIO_PID=$(ps -e | grep java | awk '{print $1'}); while :; do sleep 30 ; jmap -histo:live $HUMIO_PID | head -n203 ; done"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "tmp",
MountPath: controllers.TmpPath,
ReadOnly: false,
},
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{
"ALL",
},
},
Privileged: helpers.BoolPtr(false),
RunAsUser: helpers.Int64Ptr(65534),
RunAsNonRoot: helpers.BoolPtr(true),
ReadOnlyRootFilesystem: helpers.BoolPtr(true),
AllowPrivilegeEscalation: helpers.BoolPtr(false),
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming the humio pods use shared process namespace")
Eventually(func() bool {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
if pod.Spec.ShareProcessNamespace != nil {
return *pod.Spec.ShareProcessNamespace
}
}
return false
}, testTimeout, suite.TestInterval).Should(BeTrue())
suite.UsingClusterBy(key.Name, "Confirming pods contain the new sidecar")
Eventually(func() string {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
for _, container := range pod.Spec.Containers {
if container.Name == controllers.HumioContainerName {
continue
}
if container.Name == controllers.AuthContainerName {
continue
}
return container.Name
}
}
return ""
}, testTimeout, suite.TestInterval).Should(Equal("jmap"))
})
})
Context("Humio Cluster pod termination grace period", func() {
It("Should validate default configuration", func() {
key := types.NamespacedName{
Name: "humiocluster-grace-default",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
toCreate.Spec.TerminationGracePeriodSeconds = nil
suite.UsingClusterBy(key.Name, "Creating Humio cluster without a termination grace period set")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Validating pod is created with the default grace period")
Eventually(func() int64 {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
_ = suite.MarkPodsAsRunning(ctx, k8sClient, clusterPods, key.Name)
for _, pod := range clusterPods {
if pod.Spec.TerminationGracePeriodSeconds != nil {
return *pod.Spec.TerminationGracePeriodSeconds
}
}
return 0
}, testTimeout, suite.TestInterval).Should(BeEquivalentTo(300))
suite.UsingClusterBy(key.Name, "Overriding termination grace period")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.TerminationGracePeriodSeconds = helpers.Int64Ptr(120)
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Validating pod is recreated using the explicitly defined grace period")
Eventually(func() int64 {
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
for _, pod := range clusterPods {
if pod.Spec.TerminationGracePeriodSeconds != nil {
return *pod.Spec.TerminationGracePeriodSeconds
}
}
return 0
}, testTimeout, suite.TestInterval).Should(BeEquivalentTo(120))
})
})
Context("Humio Cluster install license", func() {
It("Should fail when no license is present", func() {
key := types.NamespacedName{
Name: "humiocluster-no-license",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, false)
toCreate.Spec.License = humiov1alpha1.HumioClusterLicenseSpec{}
ctx := context.Background()
Expect(k8sClient.Create(ctx, toCreate)).Should(Succeed())
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
Eventually(func() string {
var cluster humiov1alpha1.HumioCluster
err := k8sClient.Get(ctx, key, &cluster)
if err != nil && !k8serrors.IsNotFound(err) {
Expect(err).Should(Succeed())
}
return cluster.Status.State
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo("ConfigError"))
// TODO: set a valid license
// TODO: confirm cluster enters running
})
It("Should successfully install a license", func() {
key := types.NamespacedName{
Name: "humiocluster-license",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully with a license secret")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
secretName := fmt.Sprintf("%s-license", key.Name)
secretKey := "license"
var updatedHumioCluster humiov1alpha1.HumioCluster
suite.UsingClusterBy(key.Name, "Updating the HumioCluster to add broken reference to license")
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.License.SecretKeyRef = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: fmt.Sprintf("%s-wrong", secretName),
},
Key: secretKey,
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Should indicate cluster configuration error due to missing license secret")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "Updating the HumioCluster to add a valid license")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.License.SecretKeyRef = &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: secretName,
},
Key: secretKey,
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Should indicate cluster is no longer in a configuration error state")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Ensuring the license is updated")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.LicenseStatus.Type
}, testTimeout, suite.TestInterval).Should(BeIdenticalTo("onprem"))
suite.UsingClusterBy(key.Name, "Updating the license secret to remove the key")
var licenseSecret corev1.Secret
Eventually(func() error {
return k8sClient.Get(ctx, types.NamespacedName{
Namespace: key.Namespace,
Name: secretName,
}, &licenseSecret)
}, testTimeout, suite.TestInterval).Should(Succeed())
Expect(k8sClient.Delete(ctx, &licenseSecret)).To(Succeed())
licenseSecretMissingKey := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: key.Namespace,
},
StringData: map[string]string{},
Type: corev1.SecretTypeOpaque,
}
Expect(k8sClient.Create(ctx, &licenseSecretMissingKey)).To(Succeed())
suite.UsingClusterBy(key.Name, "Should indicate cluster configuration error due to missing license secret key")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
})
})
Context("Humio Cluster state adjustment", func() {
It("Should successfully set proper state", func() {
key := types.NamespacedName{
Name: "humiocluster-state",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Ensuring the state is Running")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() string {
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning))
suite.UsingClusterBy(key.Name, "Updating the HumioCluster to ConfigError state")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Status.State = humiov1alpha1.HumioClusterStateConfigError
return k8sClient.Status().Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Should indicate healthy cluster resets state to Running")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateRunning))
})
})
Context("Humio Cluster with envSource configmap", func() {
It("Creating cluster with envSource configmap", func() {
key := types.NamespacedName{
Name: "humiocluster-env-source-configmap",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming the humio pods are not using env var source")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
humioIdx, err := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName)
Expect(err).ToNot(HaveOccurred())
Expect(clusterPods[0].Spec.Containers[humioIdx].EnvFrom).To(BeNil())
suite.UsingClusterBy(key.Name, "Adding missing envVarSource to pod spec")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariablesSource = []corev1.EnvFromSource{
{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "env-var-source-missing",
},
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming the HumioCluster goes into ConfigError state since the configmap does not exist")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "Creating the envVarSource configmap")
envVarSourceConfigMap := corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "env-var-source",
Namespace: key.Namespace,
},
Data: map[string]string{"SOME_ENV_VAR": "SOME_ENV_VALUE"},
}
Expect(k8sClient.Create(ctx, &envVarSourceConfigMap)).To(Succeed())
suite.WaitForReconcileToSync(ctx, key, k8sClient, nil, testTimeout)
suite.UsingClusterBy(key.Name, "Updating envVarSource of pod spec")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariablesSource = []corev1.EnvFromSource{
{
ConfigMapRef: &corev1.ConfigMapEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "env-var-source",
},
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Confirming pods contain the new env vars")
Eventually(func() int {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
var podsContainingEnvFrom int
for _, pod := range clusterPods {
humioIdx, err := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(err).ToNot(HaveOccurred())
if pod.Spec.Containers[humioIdx].EnvFrom != nil {
if len(pod.Spec.Containers[humioIdx].EnvFrom) > 0 {
if pod.Spec.Containers[humioIdx].EnvFrom[0].ConfigMapRef != nil {
podsContainingEnvFrom++
}
}
}
}
return podsContainingEnvFrom
}, testTimeout, suite.TestInterval).Should(Equal(*toCreate.Spec.NodeCount))
})
})
Context("Humio Cluster with envSource secret", func() {
It("Creating cluster with envSource secret", func() {
key := types.NamespacedName{
Name: "humiocluster-env-source-secret",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Confirming the humio pods are not using env var source")
clusterPods, _ := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
humioIdx, err := kubernetes.GetContainerIndexByName(clusterPods[0], controllers.HumioContainerName)
Expect(err).ToNot(HaveOccurred())
Expect(clusterPods[0].Spec.Containers[humioIdx].EnvFrom).To(BeNil())
suite.UsingClusterBy(key.Name, "Adding missing envVarSource to pod spec")
var updatedHumioCluster humiov1alpha1.HumioCluster
Eventually(func() error {
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariablesSource = []corev1.EnvFromSource{
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "env-var-source-missing",
},
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Confirming the HumioCluster goes into ConfigError state since the secret does not exist")
Eventually(func() string {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
Expect(k8sClient.Get(ctx, key, &updatedHumioCluster)).Should(Succeed())
return updatedHumioCluster.Status.State
}, testTimeout, suite.TestInterval).Should(Equal(humiov1alpha1.HumioClusterStateConfigError))
suite.UsingClusterBy(key.Name, "Creating the envVarSource secret")
envVarSourceSecret := corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "env-var-source",
Namespace: key.Namespace,
},
StringData: map[string]string{"SOME_ENV_VAR": "SOME_ENV_VALUE"},
}
Expect(k8sClient.Create(ctx, &envVarSourceSecret)).To(Succeed())
suite.WaitForReconcileToSync(ctx, key, k8sClient, nil, testTimeout)
suite.UsingClusterBy(key.Name, "Updating envVarSource of pod spec")
Eventually(func() error {
updatedHumioCluster = humiov1alpha1.HumioCluster{}
err := k8sClient.Get(ctx, key, &updatedHumioCluster)
if err != nil {
return err
}
updatedHumioCluster.Spec.EnvironmentVariablesSource = []corev1.EnvFromSource{
{
SecretRef: &corev1.SecretEnvSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: "env-var-source",
},
},
},
}
return k8sClient.Update(ctx, &updatedHumioCluster)
}, testTimeout, suite.TestInterval).Should(Succeed())
suite.UsingClusterBy(key.Name, "Restarting the cluster in a rolling fashion")
ensurePodsRollingRestart(ctx, controllers.NewHumioNodeManagerFromHumioCluster(&updatedHumioCluster), 2)
suite.UsingClusterBy(key.Name, "Confirming pods contain the new env vars")
Eventually(func() int {
clusterPods, _ = kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
var podsContainingEnvFrom int
for _, pod := range clusterPods {
humioIdx, err := kubernetes.GetContainerIndexByName(pod, controllers.HumioContainerName)
Expect(err).ToNot(HaveOccurred())
if pod.Spec.Containers[humioIdx].EnvFrom != nil {
if len(pod.Spec.Containers[humioIdx].EnvFrom) > 0 {
if pod.Spec.Containers[humioIdx].EnvFrom[0].SecretRef != nil {
podsContainingEnvFrom++
}
}
}
}
return podsContainingEnvFrom
}, testTimeout, suite.TestInterval).Should(Equal(*toCreate.Spec.NodeCount))
})
})
Context("Humio Cluster with resources without node pool name label", func() {
It("Creating cluster with all node pool labels set", func() {
key := types.NamespacedName{
Name: "humiocluster-nodepool-labels",
Namespace: testProcessNamespace,
}
toCreate := suite.ConstructBasicSingleNodeHumioCluster(key, true)
suite.UsingClusterBy(key.Name, "Creating the cluster successfully")
ctx := context.Background()
suite.CreateAndBootstrapCluster(ctx, k8sClient, humioClientForTestSuite, toCreate, true, humiov1alpha1.HumioClusterStateRunning, testTimeout)
defer suite.CleanupCluster(ctx, k8sClient, toCreate)
suite.UsingClusterBy(key.Name, "Removing the node pool label from the pod")
clusterPods, err := kubernetes.ListPods(ctx, k8sClient, key.Namespace, controllers.NewHumioNodeManagerFromHumioCluster(toCreate).GetPodLabels())
Expect(err).Should(BeNil())
Expect(clusterPods).To(HaveLen(1))
labelsWithoutNodePoolName := map[string]string{}
for k, v := range clusterPods[0].GetLabels() {
if k == kubernetes.NodePoolLabelName {
continue
}
labelsWithoutNodePoolName[k] = v
}
clusterPods[0].SetLabels(labelsWithoutNodePoolName)
Expect(k8sClient.Update(ctx, &clusterPods[0])).Should(Succeed())
suite.UsingClusterBy(key.Name, "Validating the node pool name label gets added to the pod again")
Eventually(func() map[string]string {
var updatedPod corev1.Pod
err := k8sClient.Get(ctx, types.NamespacedName{
Name: clusterPods[0].Name,
Namespace: key.Namespace,
}, &updatedPod)
if updatedPod.ResourceVersion == clusterPods[0].ResourceVersion {
return map[string]string{
"same-resource-version": updatedPod.ResourceVersion,
}
}
if err != nil {
return map[string]string{
"got-err": err.Error(),
}
}
return updatedPod.GetLabels()
}, testTimeout, suite.TestInterval).Should(HaveKeyWithValue(kubernetes.NodePoolLabelName, key.Name))
})
})
})
|
//-------------
package main
import (
"fmt"
"os"
"strings"
"time"
"github.com/k0kubun/pp"
"github.com/pharrisee/poloniex-api"
)
type (
Strategy struct {
p *poloniex.Poloniex
isDebug bool
idxOrders int //Индекс заявок в стакане
isRealTrade bool
}
ThreePair struct {
oneSell string
thoBuy string
threeSell string
}
)
func main() {
p := poloniex.NewWithCredentials(API_POLONIEX_KEY, API_POLONIEX_SECRET)
p.Subscribe("ticker")
p.Subscribe("USDT_BTC")
p.Subscribe("BTC_NXT")
p.On("ticker", func(m poloniex.WSTicker) {
pp.Println(m)
}).On("USDT_BTC-trade", func(m poloniex.WSOrderbook) {
pp.Println(m)
})
loopTradeGo := LoopTrade(p)
//loopTradeGo := OneTrade(p)()
go loopTradeGo(&ThreePair{"BTC_NXT", "USDT_NXT", "USDT_BTC"})
// go loopTradeGo(&ThreePair{"BTC_ETC", "USDT_ETC", "USDT_BTC"})
// go loopTradeGo(&ThreePair{"BTC_ETH", "USDT_ETH", "USDT_BTC"})
// go loopTradeGo(&ThreePair{"BTC_BCH", "USDT_BCH", "USDT_BTC"})
go loopTradeGo(&ThreePair{"BTC_XRP", "USDT_XRP", "USDT_BTC"})
// go loopTradeGo(&ThreePair{"BTC_LTC", "USDT_LTC", "USDT_BTC"})
//go loopTradeGo(&ThreePair{"BTC_STR", "USDT_STR", "USDT_BTC"})
var input string
fmt.Scan(&input)
}
func NewStrategy(p *poloniex.Poloniex) *Strategy {
return &Strategy{
p: p,
isDebug: true,
idxOrders: 0,
isRealTrade: true,
}
}
func (s *Strategy) GetCountOpenOrders() int {
var openCount int = 0
openOrders, err := s.p.OpenOrdersAll()
if err == nil {
for id, val := range openOrders {
if len(val) > 0 {
openCount++
fmt.Println(id, val, len(val))
}
}
} else {
fmt.Println(err)
}
return openCount
}
func LoopTrade(p *poloniex.Poloniex) func(tp *ThreePair) {
s := NewStrategy(p)
var start float64 = 0.003
s.isDebug = false
startSumUsd, _, err0 := s.tradeBuy("USDT_BTC", start)
if err0 != nil {
os.Exit(111)
}
fmt.Printf("start %f\n", startSumUsd)
return func(threePairs *ThreePair) {
for _ = range time.Tick(1 * time.Millisecond) {
//
nxt, fnSell1, err1 := s.tradeSell(threePairs.oneSell, start)
usdt, fnBuy, err2 := s.tradeBuy(threePairs.thoBuy, nxt)
btc, fnSell2, err3 := s.tradeSell(threePairs.threeSell, usdt)
//fmt.Printf("result %f\n", s.tradeBuy("USDT_BTC", btc-start))
if btc-start > 0.00003 {
//s.p.Sell() threAmount := &ThreeAmounts{}
fmt.Println("+++", threePairs.oneSell, btc-start)
if s.isRealTrade && err1 == nil && err2 == nil && err3 == nil {
if s.GetCountOpenOrders() == 0 {
err := s.RealTrade(fnSell1, fnBuy, fnSell2)
if err != nil {
fmt.Println("err trade:", err)
}
} else {
fmt.Println("open orders is active")
}
}
}
start = btc
//}
}
}
}
func (s *Strategy) RealTrade(sell1 func() (poloniex.Sell, error), buy1 func() (poloniex.Buy, error), sell2 func() (poloniex.Sell, error)) error {
OneOrderNum, err := sell1()
if err != nil {
return err
}
TwoOrderNum, err := buy1()
if err != nil {
return err
}
ThreeNum, err := sell2()
if err != nil {
return err
}
fmt.Println(OneOrderNum, TwoOrderNum, ThreeNum)
return nil
}
func (s *Strategy) tradeBuy(pair string, size float64) (float64, func() (poloniex.Buy, error), error) {
exchangeBuyCost, _, err := s.Orders(pair) // // Купил USDT, продал NXT
if err != nil {
return 0, nil, err
}
buyCurrency := size * exchangeBuyCost.Rate
buyCurrencyFee := buyCurrency - getFee(buyCurrency)
sellCurrency := size
if s.isDebug {
fmt.Printf("%s price %f\t", pair, exchangeBuyCost)
fmt.Printf("%s buy %f\t", strings.Split(pair, "_")[0], buyCurrencyFee)
fmt.Printf("%s sell %f\t\n", strings.Split(pair, "_")[1], sellCurrency)
}
return buyCurrencyFee, func() (poloniex.Buy, error) {
fmt.Println("try buy", pair)
return s.p.Buy(pair, size, exchangeBuyCost.Rate)
}, err
}
func (s *Strategy) tradeSell(pair string, size float64) (float64, func() (poloniex.Sell, error), error) {
_, btcnxtSell, err := s.Orders(pair) // Продать BTC, купил NXT
if err != nil {
return 0, nil, err
}
buyCurrency := size / btcnxtSell.Rate
buyCurrencyFee := buyCurrency - getFee(buyCurrency)
sellCurrency := size - size
if s.isDebug {
fmt.Printf("%s price %f\t", pair, btcnxtSell)
fmt.Printf("%s sell %f\t", strings.Split(pair, "_")[0], sellCurrency)
fmt.Printf("%s buy %f\t\n", strings.Split(pair, "_")[1], buyCurrencyFee)
}
return buyCurrencyFee, func() (poloniex.Sell, error) {
fmt.Println("try sell", pair)
return s.p.Sell(pair, size, btcnxtSell.Rate)
}, err
}
func (s *Strategy) Orders(pair string) (poloniex.Order, poloniex.Order, error) {
ob, err := s.p.OrderBook(pair)
if err != nil {
fmt.Println("GetOrders err", err)
return poloniex.Order{}, poloniex.Order{}, err
}
return ob.Asks[s.idxOrders], ob.Bids[s.idxOrders], nil
}
func getFee(cost float64) float64 {
return 0.25 * cost / 100
}
//func OneTrade(p *poloniex.Poloniex) func() {
// s := NewStrategy(p)
// var start float64 = 0.003
// s.isDebug = false
// fmt.Printf("start %f\n", s.tradeBuy("USDT_BTC", start))
// return func() {
// s.isDebug = true
// nxt := s.tradeSell("BTC_NXT", start)
// usdt := s.tradeBuy("USDT_NXT", nxt)
// btc := s.tradeSell("USDT_BTC", usdt)
// s.isDebug = false
// fmt.Printf("result %f\n", s.tradeBuy("USDT_BTC", btc-start))
// os.Exit(0)
// }
//}
|
package main
import (
"go-openapi/restapi/operations/health"
"os"
"go-openapi/models"
logadaptor "go-openapi/pkg/log"
"go-openapi/pkg/storage"
"go-openapi/restapi"
"go-openapi/restapi/operations"
"go-openapi/restapi/operations/user"
"github.com/go-openapi/loads"
"github.com/go-openapi/runtime/middleware"
"github.com/jessevdk/go-flags"
"github.com/opentracing/opentracing-go"
log "github.com/sirupsen/logrus"
"github.com/uber/jaeger-client-go"
jaegercfg "github.com/uber/jaeger-client-go/config"
"github.com/uber/jaeger-lib/metrics"
)
func main() {
// -----------------------------------------------------------------------------------------------------------------
// Fake database
db, err := storage.LoadLocalDB()
if err != nil {
log.Fatal(err)
}
// -----------------------------------------------------------------------------------------------------------------
// Logging
cLogger := log.New()
cLogger.SetFormatter(&log.JSONFormatter{})
cLogger.SetOutput(os.Stdout)
cLogger.SetLevel(log.InfoLevel)
cLoggerEntry := cLogger.WithFields(log.Fields{
"app": "go-openapi",
})
// -----------------------------------------------------------------------------------------------------------------
// Jaeger
// Configure Jaeger
cfg := jaegercfg.Configuration{
ServiceName: "go-openapi",
Sampler: &jaegercfg.SamplerConfig{
Type: jaeger.SamplerTypeConst,
Param: 1,
},
Reporter: &jaegercfg.ReporterConfig{
LogSpans: true,
},
}
jMetricsFactory := metrics.NullFactory
tracer, closer, err := cfg.NewTracer(
jaegercfg.Logger(logadaptor.LogrusAdapter{Logger: cLogger}),
jaegercfg.Metrics(jMetricsFactory))
if err != nil {
cLoggerEntry.Error(err.Error())
}
opentracing.SetGlobalTracer(tracer)
defer closer.Close()
swaggerSpec, err := loads.Embedded(restapi.SwaggerJSON, restapi.FlatSwaggerJSON)
if err != nil {
cLoggerEntry.Fatalln(err)
}
api := operations.NewGoOpenapiAPI(swaggerSpec)
server := restapi.NewServer(api)
defer server.Shutdown()
parser := flags.NewParser(server, flags.Default)
server.ConfigureFlags()
for _, optsGroup := range api.CommandLineOptionsGroups {
_, err := parser.AddGroup(optsGroup.ShortDescription, optsGroup.LongDescription, optsGroup.Options)
if err != nil {
cLoggerEntry.Fatalln(err)
}
}
if _, err := parser.Parse(); err != nil {
code := 1
if fe, ok := err.(*flags.Error); ok {
if fe.Type == flags.ErrHelp {
code = 0
}
}
os.Exit(code)
}
// Stub API code examples ------------------------------------------------------------------------------------------
api.HealthGetHealthzHandler = health.GetHealthzHandlerFunc(func(params health.GetHealthzParams) middleware.Responder {
// Default health check
return health.NewGetHealthzDefault(200)
})
api.UserCreateUserHandler = user.CreateUserHandlerFunc(func(params user.CreateUserParams) middleware.Responder {
cLoggerEntry.WithFields(log.Fields{
"username": params.Body.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Info("CreateUserHandlerFunc")
t := opentracing.GlobalTracer()
span := t.StartSpan("UserCreateUserHandler")
defer span.Finish()
tnxGetSpan := tracer.StartSpan(
"DatabaseGet",
opentracing.ChildOf(span.Context()),
)
defer tnxGetSpan.Finish()
getTxn := db.Txn(false)
defer getTxn.Abort()
raw, err := getTxn.First("user", "username", params.Body.Username)
if err != nil || raw != nil {
cLoggerEntry.WithFields(log.Fields{
"username": params.Body.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Warn(err)
return user.NewCreateUserConflict()
}
tnxCreateSpan := tracer.StartSpan(
"DatabaseCreate",
opentracing.ChildOf(tnxGetSpan.Context()),
)
defer tnxCreateSpan.Finish()
txn := db.Txn(true)
defer txn.Abort()
if err := txn.Insert("user", params.Body); err != nil {
cLoggerEntry.WithFields(log.Fields{
"username": params.Body.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Warn(err)
}
txn.Commit()
return user.NewCreateUserDefault(201)
})
api.UserDeleteUserHandler = user.DeleteUserHandlerFunc(func(params user.DeleteUserParams) middleware.Responder {
cLoggerEntry.WithFields(log.Fields{
"username": params.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Infof("UserDeleteUserHandler")
t := opentracing.GlobalTracer()
span := t.StartSpan("UserDeleteUserHandler")
defer span.Finish()
tnxGetSpan := tracer.StartSpan(
"DatabaseGet",
opentracing.ChildOf(span.Context()),
)
defer tnxGetSpan.Finish()
getTxn := db.Txn(false)
defer getTxn.Abort()
raw, err := getTxn.First("user", "username", params.Username)
if err != nil || raw == nil {
cLoggerEntry.WithFields(log.Fields{
"username": params.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Warn(err)
return user.NewDeleteUserNotFound()
}
tnxDelSpan := tracer.StartSpan(
"DatabaseDelete",
opentracing.ChildOf(tnxGetSpan.Context()),
)
defer tnxDelSpan.Finish()
writeTxn := db.Txn(true)
defer writeTxn.Abort()
if err := writeTxn.Delete("user", raw); err != nil {
cLoggerEntry.WithFields(log.Fields{
"username": params.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Warn(err)
return user.NewDeleteUserNotFound()
}
writeTxn.Commit()
return user.NewDeleteUserOK()
})
api.UserGetUserByNameHandler = user.GetUserByNameHandlerFunc(func(params user.GetUserByNameParams) middleware.Responder {
cLoggerEntry.WithFields(log.Fields{
"username": params.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Infof("UserGetUserByNameHandler")
t := opentracing.GlobalTracer()
span := t.StartSpan("UserGetUserByNameHandler")
defer span.Finish()
tnxSpan := tracer.StartSpan(
"DatabaseGet",
opentracing.ChildOf(span.Context()),
)
defer tnxSpan.Finish()
txn := db.Txn(false)
defer txn.Abort()
raw, err := txn.First("user", "username", params.Username)
if err != nil {
cLoggerEntry.WithFields(log.Fields{
"username": params.Username,
"headers": params.HTTPRequest.Header,
"method": params.HTTPRequest.Method,
"host": params.HTTPRequest.Host,
"requestPath": params.HTTPRequest.RequestURI,
}).Warn(err)
}
if raw == nil {
return user.NewGetUserByNameNotFound()
}
u := raw.(*models.User)
return user.NewGetUserByNameOK().WithPayload(u)
})
// -----------------------------------------------------------------------------------------------------------------
server.ConfigureAPI()
server.Port = 8080
server.Host = "0.0.0.0"
if err := server.Serve(); err != nil {
cLoggerEntry.Fatalln(err)
}
// -----------------------------------------------------------------------------------------------------------------
}
|
package bca
import (
"encoding/base64"
"fmt"
"github.com/imroc/req"
)
const (
DefaultTimezone string = "Asia/Jakarta"
DefaultCurrencyCode string = "IDR"
TimestampISO8601 string = "2006-01-02T15:04:05.000-07:00"
TimestampTransactionDate string = "2006-01-02"
accessTokenCacheKey string = "bca.accessToken"
transactionIdCacheKey string = "bca.transid"
)
var (
Debug bool
)
var (
ApiKey string
ApiSecret string
ClientId string
ClientSecret string
CorporateId string
ChannelId string
CredentialId string
)
type OAuth struct {
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
ExpiresIn int `json:"expires_in"`
Scope string `json:"scope"`
}
func OAuthToken() (OAuth, error) {
var o OAuth
rq := createRequest("POST", "/api/oauth/token")
rq.Header.Set("Authorization", fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(ClientId+":"+ClientSecret))))
rq.Body = req.Param{
"grant_type": "client_credentials",
}
err := rq.Request(&o)
return o, err
}
|
package main
import "fmt"
// 200. 岛屿数量
// 给你一个由 '1'(陆地)和 '0'(水)组成的的二维网格,请你计算网格中岛屿的数量。
// 岛屿总是被水包围,并且每座岛屿只能由水平方向或竖直方向上相邻的陆地连接形成。
// 此外,你可以假设该网格的四条边均被水包围。
// https://leetcode-cn.com/problems/number-of-islands/
func main() {
fmt.Println(numIslands3([][]byte{
{'1', '1', '1', '1', '0'},
{'1', '1', '0', '1', '0'},
{'1', '1', '0', '0', '0'},
{'0', '0', '0', '0', '0'},
}))
fmt.Println(numIslands3([][]byte{
{'1', '1', '1', '1', '0'},
{'1', '1', '0', '1', '0'},
{'1', '1', '0', '0', '0'},
{'0', '0', '0', '0', '0'},
}))
}
// 法一:dfs
func numIslands(grid [][]byte) (count int) {
if len(grid) == 0 || len(grid[0]) == 0 {
return 0
}
visited := make([][]bool, len(grid))
for k := range visited {
visited[k] = make([]bool, len(grid[0]))
}
for i, row := range grid {
for j, v := range row {
if !visited[i][j] && v == '1' {
// 第一次发现一个岛的坐标,递归标记周围是‘1’的坐标为已访问
markIsland(grid, i, j, visited)
count++
}
}
}
return count
}
func markIsland(grid [][]byte, x, y int, visited [][]bool) {
if x < 0 || y < 0 || x >= len(grid) || y >= len(grid[0]) || visited[x][y] || grid[x][y] == '0' {
return
}
visited[x][y] = true
markIsland(grid, x+1, y, visited)
markIsland(grid, x-1, y, visited)
markIsland(grid, x, y+1, visited)
markIsland(grid, x, y-1, visited)
}
var around = [4][2]int{{-1, 0}, {0, -1}, {0, 1}, {1, 0}}
// 法二:BFS
func numIslands2(grid [][]byte) (count int) {
if len(grid) == 0 {
return 0
}
lx := len(grid)
ly := len(grid[0])
visited := make([][]bool, lx)
for i := 0; i < lx; i++ {
visited[i] = make([]bool, ly)
}
for i := 0; i < lx; i++ {
for j := 0; j < ly; j++ {
if grid[i][j] == '1' && !visited[i][j] {
queue := [][2]int{{i, j}}
visited[i][j] = true
for len(queue) > 0 {
x, y := queue[0][0], queue[0][1]
for _, diff := range around {
newX, newY := x+diff[0], y+diff[1]
if newX >= 0 && newX < lx && newY >= 0 && newY < ly && grid[newX][newY] == '1' && !visited[newX][newY] {
visited[newX][newY] = true
queue = append(queue, [2]int{newX, newY})
}
}
queue = queue[1:]
}
count++
}
}
}
return count
}
// 法三:并查集
// 「并查集」主要用于解决「动态连通性」问题,重点关注的是连接问题,不关注路径问题。
// 对于本题,就是将水域和周边水域连接,岛屿和周边岛屿连接
// 岛屿的数量就是岛屿联通集合的数目
func numIslands3(grid [][]byte) (count int) {
if len(grid) == 0 || len(grid[0]) == 0 {
return 0
}
m, n := len(grid), len(grid[0])
water := m * n // 归类水域集合
uf := NewUnionFind(water + 1)
getIndex := func(x, y int) int {
return x*n + y
}
for i := 0; i < m; i++ {
for j := 0; j < n; j++ {
if grid[i][j] == '0' {
uf.union(water, getIndex(i, j))
} else {
// 本身是陆地,同时也要合并四周的陆地
for _, diff := range around {
newX, newY := i+diff[0], j+diff[1]
if newX >= 0 && newX < m && newY >= 0 && newY < n && grid[newX][newY] == '1' {
uf.union(getIndex(i, j), getIndex(newX, newY))
}
}
}
}
}
return uf.getCount() - 1
}
type unionFind struct {
parent []int
count int
}
func NewUnionFind(n int) *unionFind {
p := make([]int, n)
for i := 0; i < n; i++ {
p[i] = i
}
return &unionFind{
parent: p,
count: n,
}
}
// 返回集合的根元素
func (this unionFind) find(p int) int {
root := p
for root != this.parent[root] {
root = this.parent[root]
}
// 压缩路径
for p != this.parent[p] {
next := this.parent[p]
this.parent[p] = root
p = next
}
return root
}
func (this *unionFind) union(x, y int) {
rootX := this.find(x)
rootY := this.find(y)
if rootX == rootY {
return
}
this.parent[rootX] = rootY
this.count--
}
func (this unionFind) getCount() int {
return this.count
}
|
// access_test.go (access-check)
package main
import (
"testing"
)
// We can check the file exist
// then run this test will prove it checks correctly exists
func TestExistFile(t *testing.T) {
var this_file string = `../config.yml-sample`
if Exists(this_file) != true {
t.Error("Failure TestExistFile")
}
}
// We can check the file doesn't exist
// then run this test will prove it checks correctly
func TestDoesntExistFile(t *testing.T) {
var this_file string = `cheese.txt`
if Exists(this_file) == true {
t.Error("Failure DoesntExistFile")
}
}
// We can check the folder exists
// then run this test will prove it checks correctly exists
func TestFolderInvalid(t *testing.T) {
var this_file string = `pkg`
if Exists(this_file) == true {
t.Error("Failure FolderInvalid")
}
}
func TestConfigGetNull(t *testing.T) {
var file string = `../config.yml-sample`
var data ConfigTypes
GetData(&data, file)
if data.EmailSettings.Email == "" {
t.Error("Failure")
}
if data.EmailSettings.Password == "" {
t.Error("Failure")
}
if data.EmailSettings.Name == "" {
t.Error("Failure")
}
if data.EmailSettings.ToEmail == "" {
t.Error("Failure")
}
}
func TestConfigGetAll(t *testing.T) {
var file string = `../config.yml-sample`
var data ConfigTypes
if GetData(&data, file) == false {
t.Error("Failure")
}
if data.EmailSettings.Email != "myemail" {
t.Error("Failure")
}
if data.EmailSettings.Password != "password" {
t.Error("Failure")
}
if data.EmailSettings.Name != "rubber-duck-999" {
t.Error("Failure")
}
if data.EmailSettings.ToEmail != "cheese" {
t.Error("Failure")
}
}
func TestConfigFail(t *testing.T) {
var file string = `config.ymlsample`
var data ConfigTypes
if GetData(&data, file) == true {
t.Error("Failure")
}
if data.EmailSettings.Email != "" {
t.Error("Failure")
}
if data.EmailSettings.Password != "" {
t.Error("Failure")
}
if data.EmailSettings.Name != "" {
t.Error("Failure")
}
if data.EmailSettings.ToEmail != "" {
t.Error("Failure")
}
}
func TestIncorrectConfig(t *testing.T) {
var file string = `config.yml-fail`
var data ConfigTypes
if GetData(&data, file) == true {
t.Error("Failure")
}
if data.EmailSettings.Email != "" {
t.Error("Failure")
}
if data.EmailSettings.Password != "" {
t.Error("Failure")
}
if data.EmailSettings.Name != "" {
t.Error("Failure")
}
if data.EmailSettings.ToEmail != "" {
t.Error("Failure")
}
}
|
/*
* @lc app=leetcode.cn id=884 lang=golang
*
* [884] 两句话中的不常见单词
*/
package main
// @lc code=start
func uncommonFromSentences(s1 string, s2 string) []string {
wordCount := make(map[string]int)
start := 0
for i := 1; i < len(s1); {
for i < len(s1) && s1[i] != ' ' {
i++
}
wordCount[s1[start:i]]++
start = i + 1
i++
}
start = 0
for i := 1; i < len(s2); {
for i < len(s2) && s2[i] != ' ' {
i++
}
wordCount[s2[start:i]]++
start = i + 1
i++
}
ret := []string{}
for word, count := range wordCount {
if count == 1 {
ret = append(ret, word)
}
}
return ret
}
// func main() {
// fmt.Println(uncommonFromSentences("this apple is sweet", "this apple is sour"),
// uncommonFromSentences("apple apple", "banana"))
// }
// @lc code=end
|
package goble
import (
"github.com/ge-lighting/goble/xpc"
"log"
)
const (
ALL = "__allEvents__"
)
// Event generated by blued, with associated data
type Event struct {
Name string
State string
DeviceUUID xpc.UUID
ServiceUuid string
CharacteristicUuid string
Peripheral Peripheral
Data []byte
Mtu int
IsNotification bool
}
// The event handler function.
// Return true to terminate
type EventHandlerFunc func(Event) bool
type subscriberFunc struct {
eventName string
fn EventHandlerFunc
}
// Emitter is an object to emit and handle Event(s)
type Emitter struct {
handlers map[string]EventHandlerFunc
event chan Event
subscribe chan subscriberFunc
verbose bool
clean chan bool
}
func MakeEmitter() Emitter {
return Emitter{handlers: make(map[string]EventHandlerFunc), event: make(chan Event), subscribe: make(chan subscriberFunc), clean: make(chan bool)}
}
// Init initialize the emitter and start a goroutine to execute the event handlers
func (e *Emitter) Init() {
// event handler
go func() {
for {
select {
case ev := <-e.event:
if fn, ok := e.handlers[ev.Name]; ok {
fn(ev)
} else if fn, ok := e.handlers[ALL]; ok {
fn(ev)
} else {
if e.verbose {
log.Println("unhandled Emit", ev)
}
}
case cb := <-e.subscribe:
if cb.fn == nil {
delete(e.handlers, cb.eventName)
} else {
e.handlers[cb.eventName] = cb.fn
}
case cleaning := <-e.clean:
if cleaning == true {
close(e.event)
close(e.subscribe)
close(e.clean)
return
}
}
}
//close(e.event) // TOFIX: this causes new "emits" to panic.
}()
}
func (e *Emitter) SetVerbose(v bool) {
e.verbose = v
}
// Emit sends the event on the 'event' channel
func (e *Emitter) Emit(ev Event) {
e.event <- ev
}
// On(event, cb) registers an handler for the specified event
func (e *Emitter) On(event string, fn EventHandlerFunc) {
e.subscribe <- subscriberFunc{eventName: event, fn: fn}
}
func (e *Emitter) Close() {
e.clean <- true
}
|
package cloudflare
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
type DLPPayloadLogSettings struct {
PublicKey string `json:"public_key,omitempty"`
// Only present in responses
UpdatedAt *time.Time `json:"updated_at,omitempty"`
}
type GetDLPPayloadLogSettingsParams struct{}
type DLPPayloadLogSettingsResponse struct {
Response
Result DLPPayloadLogSettings `json:"result"`
}
// GetDLPPayloadLogSettings gets the current DLP payload logging settings.
//
// API reference: https://api.cloudflare.com/#dlp-payload-log-settings-get-settings
func (api *API) GetDLPPayloadLogSettings(ctx context.Context, rc *ResourceContainer, params GetDLPPayloadLogSettingsParams) (DLPPayloadLogSettings, error) {
if rc.Identifier == "" {
return DLPPayloadLogSettings{}, ErrMissingResourceIdentifier
}
uri := buildURI(fmt.Sprintf("/%s/%s/dlp/payload_log", rc.Level, rc.Identifier), nil)
res, err := api.makeRequestContext(ctx, http.MethodGet, uri, nil)
if err != nil {
return DLPPayloadLogSettings{}, err
}
var dlpPayloadLogSettingsResponse DLPPayloadLogSettingsResponse
err = json.Unmarshal(res, &dlpPayloadLogSettingsResponse)
if err != nil {
return DLPPayloadLogSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err)
}
return dlpPayloadLogSettingsResponse.Result, nil
}
// UpdateDLPPayloadLogSettings sets the current DLP payload logging settings to new values.
//
// API reference: https://api.cloudflare.com/#dlp-payload-log-settings-update-settings
func (api *API) UpdateDLPPayloadLogSettings(ctx context.Context, rc *ResourceContainer, settings DLPPayloadLogSettings) (DLPPayloadLogSettings, error) {
if rc.Identifier == "" {
return DLPPayloadLogSettings{}, ErrMissingResourceIdentifier
}
uri := buildURI(fmt.Sprintf("/%s/%s/dlp/payload_log", rc.Level, rc.Identifier), nil)
res, err := api.makeRequestContext(ctx, http.MethodPut, uri, settings)
if err != nil {
return DLPPayloadLogSettings{}, err
}
var dlpPayloadLogSettingsResponse DLPPayloadLogSettingsResponse
err = json.Unmarshal(res, &dlpPayloadLogSettingsResponse)
if err != nil {
return DLPPayloadLogSettings{}, fmt.Errorf("%s: %w", errUnmarshalError, err)
}
return dlpPayloadLogSettingsResponse.Result, nil
}
|
package main
import (
"bufio"
"bytes"
"fmt"
//"io"
"os"
"regexp"
"strings"
)
const dbg_parse_line = false
func fmt_line(data []string) string {
s := bytes.NewBufferString("[")
for i, v := range data {
if i == 0 {
fmt.Fprintf(s, "%q", v)
} else {
fmt.Fprintf(s, ", %q", v)
}
}
fmt.Fprintf(s, "]")
return string(s.Bytes())
}
// dropCR drops a terminal \r from the data.
func dropCR(data []byte) []byte {
if len(data) > 0 && data[len(data)-1] == '\r' {
return data[0 : len(data)-1]
}
return data
}
func scan_line(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = bufio.ScanLines(data, atEOF)
return
// sz := len(token)
// if sz > 0 && token[sz-1] == '\\' {
// return
// }
}
type Parser struct {
req *ReqFile
isPrivate bool
table map[string]ParseFunc
f *os.File
scanner *bufio.Scanner
ctx []string
tokens []string
}
func (p *Parser) Close() error {
if p.f == nil {
return nil
}
err := p.f.Close()
p.f = nil
p.scanner = nil
return err
}
func NewParser(fname string) (*Parser, error) {
var err error
f, err := os.Open(fname)
if err != nil {
return nil, err
}
scanner := bufio.NewScanner(bufio.NewReader(f))
if scanner == nil {
return nil, fmt.Errorf("cmt2yml: nil bufio.Scanner")
}
p := &Parser{
table: g_dispatch,
f: f,
scanner: scanner,
req: &ReqFile{Filename: fname},
tokens: nil,
ctx: []string{tok_BEG_PUBLIC},
}
return p, nil
}
func (p *Parser) run() error {
var err error
bline := []byte{}
my_printf := func(format string, args ...interface{}) (int, error) {
return 0, nil
}
if dbg_parse_line {
my_printf = func(format string, args ...interface{}) (int, error) {
return fmt.Printf(format, args...)
}
}
for p.scanner.Scan() {
data := p.scanner.Bytes()
data = bytes.TrimSpace(data)
my_printf("-data: %v\n", string(data))
data = bytes.Trim(data, " \t\r\n")
my_printf("+data: %v\n", string(data))
if len(data) == 0 {
continue
}
if data[0] == '#' {
continue
}
idx := len(data) - 1
if data[idx] == '\\' {
my_printf("!data: %v (line-continuation)\n", string(data))
bline = append(bline, ' ')
bline = append(bline, data[:idx]...)
continue
} else {
bline = append(bline, ' ')
bline = append(bline, data...)
}
var tokens []string
tokens, err = parse_line(bline)
if err != nil {
return err
}
p.tokens = tokens
fct, ok := p.table[p.tokens[0]]
if !ok {
return fmt.Errorf("cmt2yml: unknown token [%v]", tokens[0])
}
err = fct(p)
if err != nil {
return err
}
bline = nil
}
return err
}
func parse_file(fname string) (*ReqFile, error) {
fmt.Printf("req=%q\n", fname)
p, err := NewParser(fname)
if err != nil {
return nil, err
}
defer p.Close()
err = p.run()
if err != nil {
fmt.Printf("req=%q [ERR]\n", fname)
return nil, err
}
fmt.Printf("req=%q [done]\n", fname)
return p.req, err
}
func parse_line(data []byte) ([]string, error) {
var err error
line := []string{}
worder := bufio.NewScanner(bytes.NewBuffer(data))
worder.Split(bufio.ScanWords)
tokens := []string{}
for worder.Scan() {
tok := worder.Text()
if tok != "" {
tokens = append(tokens, worder.Text())
}
}
my_printf := func(format string, args ...interface{}) (int, error) {
return 0, nil
}
if dbg_parse_line {
my_printf = func(format string, args ...interface{}) (int, error) {
return fmt.Printf(format, args...)
}
}
dq_re := regexp.MustCompile(`^(|\w*=)".*`)
sq_re := regexp.MustCompile(`^(|\w*=)'.*`)
my_printf("===============\n")
my_printf("@data: [%v]\n", string(data))
my_printf("tokens: %v\n", fmt_line(tokens))
in_dquote := false
in_squote := false
for i := 0; i < len(tokens); i++ {
tok := strings.Trim(tokens[i], " \t")
my_printf("tok[%d]=%q (q=%v)\n", i, tok, in_squote || in_dquote)
if in_squote || in_dquote {
if len(line) > 0 {
ttok := tok
if strings.HasPrefix(ttok, `"`) || strings.HasPrefix(ttok, "'") {
ttok = ttok[1:]
}
if strings.HasSuffix(ttok, `"`) || strings.HasSuffix(ttok, "'") {
if !strings.HasSuffix(ttok, `\"`) {
ttok = ttok[:len(ttok)-1]
}
}
ttok = strings.Trim(ttok, " \t")
if len(ttok) > 0 {
line_val := line[len(line)-1]
line_sep := ""
if len(line_val) > 0 {
line_sep = " "
}
ttok = strings.Replace(ttok, `\"`, `"`, -1)
line[len(line)-1] += line_sep + ttok
}
} else {
panic("logic error")
}
} else {
ttok := tok
if strings.HasPrefix(ttok, `"`) || strings.HasPrefix(ttok, "'") {
ttok = ttok[1:]
}
if strings.HasSuffix(ttok, `"`) || strings.HasSuffix(ttok, "'") {
if !strings.HasSuffix(ttok, `\"`) {
ttok = ttok[:len(ttok)-1]
}
}
ttok = strings.Replace(ttok, `\"`, `"`, -1)
line = append(line, strings.Trim(ttok, " \t"))
}
if len(tok) == 1 && strings.HasPrefix(tok, "\"") {
in_dquote = !in_dquote
continue
}
if len(tok) == 1 && strings.HasPrefix(tok, "'") {
in_squote = !in_squote
continue
}
if in_dquote && strings.HasSuffix(tok, "\"") && !strings.HasSuffix(tok, `\""`) {
in_dquote = !in_dquote
my_printf("<-- dquote: %v -> %v\n", !in_dquote, in_dquote)
}
if in_squote && strings.HasSuffix(tok, "'") {
in_squote = !in_squote
my_printf("<-- squote: %v -> %v\n", !in_squote, in_squote)
}
if dq_re.MatchString(tok) {
if !strings.HasSuffix(tok, "\"") || strings.Count(tok, `"`) == 1 {
in_dquote = !in_dquote
my_printf("--> dquote: %v -> %v\n", !in_dquote, in_dquote)
}
}
if sq_re.MatchString(tok) {
if !strings.HasSuffix(tok, "'") || strings.Count(tok, `'`) == 1 {
in_squote = !in_squote
my_printf("--> squote: %v -> %v\n", !in_squote, in_squote)
}
}
}
return line, err
}
// EOF
|
package main
import (
"encoding/json"
"log"
api "github.com/micro/go-api/proto"
"github.com/micro/go-micro/errors"
"context"
)
type Foo struct{}
// Foo.Bar is a method which will be served by http request /example/foo/bar
// Because Foo is not the same as the service name it is mapped beyond /example/
func (f *Foo) Bar(ctx context.Context, req *api.Request, rsp *api.Response) error {
log.Print("Received Foo.Bar request")
// check method
if req.Method != "POST" {
return errors.BadRequest("go.micro.api.example", "require post")
}
// let's make sure we get json
ct, ok := req.Header["Content-Type"]
if !ok || len(ct.Values) == 0 {
return errors.BadRequest("go.micro.api.example", "need content-type")
}
if ct.Values[0] != "application/json" {
return errors.BadRequest("go.micro.api.example", "expect application/json")
}
// parse body
var body map[string]interface{}
json.Unmarshal([]byte(req.Body), &body)
// do something with parsed body
return nil
} |
package cmd
import (
"fmt"
"regexp"
"strings"
"github.com/pkg/errors"
"github.com/spf13/cobra"
analytics "gopkg.in/segmentio/analytics-go.v3"
)
var (
// envCmd represents the env command
envCmd = &cobra.Command{
Use: "env <service>",
Short: "Print the secrets from the parameter store in a format to export as environment variables",
Args: cobra.ExactArgs(1),
RunE: env,
}
pattern *regexp.Regexp
)
func init() {
RootCmd.AddCommand(envCmd)
pattern = regexp.MustCompile(`[^\w@%+=:,./-]`)
}
func env(cmd *cobra.Command, args []string) error {
service := strings.ToLower(args[0])
if err := validateService(service); err != nil {
return errors.Wrap(err, "Failed to validate service")
}
secretStore, err := getSecretStore()
if err != nil {
return errors.Wrap(err, "Failed to get secret store")
}
secrets, err := secretStore.List(service, true)
if err != nil {
return errors.Wrap(err, "Failed to list store contents")
}
if analyticsEnabled && analyticsClient != nil {
analyticsClient.Enqueue(analytics.Track{
UserId: username,
Event: "Ran Command",
Properties: analytics.NewProperties().
Set("command", "env").
Set("chamber-version", chamberVersion).
Set("service", service).
Set("backend", backend),
})
}
for _, secret := range secrets {
fmt.Printf("export %s=%s\n",
strings.ToUpper(key(secret.Meta.Key)),
shellescape(*secret.Value))
}
return nil
}
// shellescape returns a shell-escaped version of the string s. The returned value
// is a string that can safely be used as one token in a shell command line.
func shellescape(s string) string {
if len(s) == 0 {
return "''"
}
if pattern.MatchString(s) {
return "'" + strings.Replace(s, "'", "'\"'\"'", -1) + "'"
}
return s
}
|
/*
-------------------------------------------------
Author : Zhang Fan
date: 2020/5/17
Description :
-------------------------------------------------
*/
package robot
import (
"encoding/json"
)
type TextMsg struct {
Content string `json:"content"`
}
type LinkMsg struct {
Title string `json:"title"`
Text string `json:"text"`
MessageURL string `json:"messageUrl"`
PicURL string `json:"picUrl,omitempty"`
}
type Markdown struct {
Title string `json:"title"`
Text string `json:"text"`
}
type Button struct {
Title string `json:"title"`
ActionURL string `json:"actionURL"`
}
type ActionCard struct {
Title string `json:"title"`
Text string `json:"text"`
SingleTitle string `json:"singleTitle,omitempty"`
SingleURL string `json:"singleURL,omitempty"`
Buttons []Button `json:"btns,omitempty"`
BtnOrientation string `json:"btnOrientation,omitempty"` // 0-按钮竖直排列,1-按钮横向排列
}
type FeedLinkMsg struct {
Title string `json:"title"`
MessageURL string `json:"messageURL"`
PicURL string `json:"picURL"`
}
type FeedCard struct {
Links []FeedLinkMsg `json:"links"`
}
type At struct {
AtMobiles []string `json:"atMobiles,omitempty"`
IsAtAll bool `json:"isAtAll,omitempty"`
}
type Msg struct {
MsgType string `json:"msgtype"`
Text *TextMsg `json:"text,omitempty"`
Link *LinkMsg `json:"link,omitempty"`
Markdown *Markdown `json:"markdown,omitempty"`
ActionCard *ActionCard `json:"actionCard,omitempty"`
FeedCard *FeedCard `json:"feedCard,omitempty"`
At *At `json:"at,omitempty"`
}
// Text消息
func NewTextMsg(text string) *Msg {
return &Msg{
MsgType: "text",
Text: &TextMsg{
Content: text,
},
}
}
// Link消息
func NewLinkMsg(title, text, msgurl string, picurl ...string) *Msg {
pic := ""
if len(picurl) > 0 {
pic = picurl[0]
}
return &Msg{
MsgType: "link",
Link: &LinkMsg{
Title: title,
Text: text,
MessageURL: msgurl,
PicURL: pic,
},
}
}
// Markdown消息
func NewMarkdownMsg(title, text string) *Msg {
return &Msg{
MsgType: "markdown",
Markdown: &Markdown{
Title: title,
Text: text,
},
}
}
// 整体跳转ActionCard
func NewActionCard(title, text, single_title, single_url string) *Msg {
return &Msg{
MsgType: "actionCard",
ActionCard: &ActionCard{
Title: title,
Text: text,
SingleTitle: single_title,
SingleURL: single_url,
},
}
}
// 独立跳转ActionCard
func NewCustomCard(title, text string, btns ...Button) *Msg {
return &Msg{
MsgType: "actionCard",
ActionCard: &ActionCard{
Title: title,
Text: text,
Buttons: append([]Button{}, btns...),
},
}
}
// 为独立跳转ActionCard添加按钮
func (m *Msg) AddButton(btns ...Button) *Msg {
if m.ActionCard != nil {
m.ActionCard.Buttons = append(m.ActionCard.Buttons, btns...)
}
return m
}
// 设置按钮为垂直排列
func (m *Msg) VerticalButton() *Msg {
if m.ActionCard != nil {
m.ActionCard.BtnOrientation = "0"
}
return m
}
// 设置按钮为水平排列
func (m *Msg) HorizontalButton() *Msg {
if m.ActionCard != nil {
m.ActionCard.BtnOrientation = "1"
}
return m
}
// FeedCard
func NewFeedCard(links ...FeedLinkMsg) *Msg {
return &Msg{
MsgType: "feedCard",
FeedCard: &FeedCard{
Links: append([]FeedLinkMsg{}, links...),
},
}
}
// 为FeedCard添加Link
func (m *Msg) AddLinks(links ...FeedLinkMsg) *Msg {
if m.FeedCard != nil {
m.FeedCard.Links = append(m.FeedCard.Links, links...)
}
return m
}
// at指定人, 仅支持 text 和 link 消息. 官方文档说明支持markdown, 但是实际测试并不支持markdown
func (m *Msg) AtMobiles(mobiles ...string) *Msg {
m.At = &At{
AtMobiles: append(([]string)(nil), mobiles...),
}
return m
}
// at所有人, 仅支持 text 和 link 和 markdown 消息
func (m *Msg) AtAll() *Msg {
m.At = &At{
IsAtAll: true,
}
return m
}
// 转为body
func (m *Msg) Body() []byte {
body, _ := json.Marshal(m)
return body
}
|
package deezer
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"github.com/go-resty/resty/v2"
"golang.org/x/oauth2"
)
const (
// AuthURL is the URL to Deezer Accounts Service's OAuth2 endpoint.
AuthURL = "https://connect.deezer.com/oauth/auth.php"
// TokenURL is the URL to the Deezer Accounts Service's OAuth2
// token endpoint.
TokenURL = "https://connect.deezer.com/oauth/access_token.php"
)
// Scopes let you specify exactly which types of data your application wants to access.
// The set of scopes you pass in your authentication request determines what access the
// permissions the user is asked to grant.
const (
// ScopeBasic Access users basic information
// Incl. name, firstname, profile picture only.
ScopeBasic = "basic_access"
// ScopeEmail Get the user's email
ScopeEmail = "email"
// ScopeOfflineAccess Access user data any time
// Application may access user data at any time
ScopeOfflineAccess = "offline_access"
// ScopeManageLibrary Manage users' library
// Add/rename a playlist. Add/order songs in the playlist.
ScopeManageLibrary = "manage_library"
)
// Authenticator provides convenience functions for implementing the OAuth2 flow.
// You should always use `NewAuthenticator` to make them.
//
// Example:
//
// a := deezer.NewAuthenticator(redirectURL, deezer.ScopeUserLibaryRead, deezer.ScopeUserFollowRead)
// // direct user to Deezer to log in
// http.Redirect(w, r, a.AuthURL("state-string"), http.StatusFound)
//
// // then, in redirect handler:
// token, err := a.Token(state, r)
// client := a.NewClient(token)
//
type Authenticator struct {
config *oauth2.Config
context context.Context
}
// NewAuthenticator creates an authenticator which is used to implement the
// OAuth2 authorization flow. The redirectURL must exactly match one of the
// URLs specified in your Deezer developer account.
//
// By default, NewAuthenticator pulls your client ID and secret key from the
// SPOTIFY_ID and SPOTIFY_SECRET environment variables. If you'd like to provide
// them from some other source, you can call `SetAuthInfo(id, key)` on the
// returned authenticator.
func NewAuthenticator(redirectURL string, scopes ...string) Authenticator {
cfg := &oauth2.Config{
ClientID: os.Getenv("DEEZER_ID"),
ClientSecret: os.Getenv("DEEZER_SECRET"),
RedirectURL: redirectURL,
Scopes: scopes,
Endpoint: oauth2.Endpoint{
AuthURL: AuthURL,
TokenURL: TokenURL,
},
}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{})
return Authenticator{
config: cfg,
context: ctx,
}
}
// SetAuthInfo overwrites the client ID and secret key used by the authenticator.
// You can use this if you don't want to store this information in environment variables.
func (a *Authenticator) SetAuthInfo(clientID, secretKey string) {
a.config.ClientID = clientID
a.config.ClientSecret = secretKey
}
// AuthURL returns a URL to the the Deezer Accounts Service's OAuth2 endpoint.
//
// State is a token to protect the user from CSRF attacks. You should pass the
// same state to `Token`, where it will be validated. For more info, refer to
// http://tools.ietf.org/html/rfc6749#section-10.12.
func (a Authenticator) AuthURL(state string) string {
return a.config.AuthCodeURL(state)
}
// AuthURLWithDialog returns the same URL as AuthURL, but sets show_dialog to true
func (a Authenticator) AuthURLWithDialog(state string) string {
return a.config.AuthCodeURL(state, oauth2.SetAuthURLParam("show_dialog", "true"))
}
// Token pulls an authorization code from an HTTP request and attempts to exchange
// it for an access token. The standard use case is to call Token from the handler
// that handles requests to your application's redirect URL.
func (a Authenticator) Token(state string, r *http.Request) (*oauth2.Token, error) {
values := r.URL.Query()
if e := values.Get("error"); e != "" {
return nil, errors.New("deezer: auth failed - " + e)
}
code := values.Get("code")
if code == "" {
return nil, errors.New("deezer: didn't get access code")
}
actualState := values.Get("state")
if actualState != state {
return nil, errors.New("deezer: redirect state parameter doesn't match")
}
fmt.Printf("Exchanging with this code %s\n", code)
return a.config.Exchange(oauth2.NoContext, code)
}
// Exchange is like Token, except it allows you to manually specify the access
// code instead of pulling it out of an HTTP request.
func (a Authenticator) Exchange(code string) (*oauth2.Token, error) {
return a.config.Exchange(a.context, code)
}
// NewClient creates a Client that will use the specified access token for its API requests.
func (a Authenticator) NewClient(token *oauth2.Token) Client {
client := a.config.Client(a.context, token)
restyClient := resty.NewWithClient(client)
restyClient.SetAuthToken(token.AccessToken)
return Client{
http: restyClient,
token: token.AccessToken,
baseURL: baseAddress,
}
}
// Token gets the client's current token.
func (c *Client) Token() (*oauth2.Token, error) {
transport, ok := c.http.GetClient().Transport.(*oauth2.Transport)
if !ok {
return nil, errors.New("deezer: oauth2 transport type not correct")
}
t, err := transport.Source.Token()
if err != nil {
return nil, err
}
return t, nil
}
|
package test
import (
"github.com/agiledragon/trans-dsl"
"github.com/agiledragon/trans-dsl/test/context"
"github.com/agiledragon/trans-dsl/test/context/action"
. "github.com/smartystreets/goconvey/convey"
"testing"
"time"
)
var eventId = "assign cmd"
func newWaitTrans() *transdsl.Transaction {
trans := &transdsl.Transaction{
Fragments: []transdsl.Fragment{
new(action.StubTransferMoney),
&transdsl.Wait{
EventId: eventId,
Timeout: 100,
Fragment: new(action.StubAssignCmd),
},
new(action.StubAttachSomething),
new(action.StubActivateSomething),
},
}
return trans
}
type TransObj struct {
trans *transdsl.Transaction
transInfo *transdsl.TransInfo
}
var transIds map[string]string
var transObjs map[string]TransObj
var key string
func handleEvent(eventId string, eventBody []byte) {
transId := transIds[key]
transObj := transObjs[transId]
trans := transObj.trans
transInfo := transObj.transInfo
stubInfo := transInfo.AppInfo.(*context.StubInfo)
stubInfo.EventBody = eventBody
<-time.After(50 * time.Millisecond)
trans.HandleEvent(eventId, transInfo)
}
func TestWaitTrans(t *testing.T) {
trans := newWaitTrans()
transIds = make(map[string]string)
key = "business id"
transId := "123456"
transIds[key] = transId
transObjs = make(map[string]TransObj)
transInfo := &transdsl.TransInfo{
Ch: make(chan struct{}),
AppInfo: &context.StubInfo{
TransId: "",
X: "info",
Y: 1,
},
}
transObjs[transId] = TransObj{trans: trans, transInfo: transInfo}
Convey("TestWaitTrans", t, func() {
Convey("wait succ", func() {
go handleEvent(eventId, nil)
err := trans.Start(transInfo)
So(err, ShouldEqual, nil)
So(transInfo.AppInfo.(*context.StubInfo).Y, ShouldEqual, 8)
})
Convey("wait timeout", func() {
transInfo := &transdsl.TransInfo{
Ch: make(chan struct{}),
AppInfo: &context.StubInfo{
X: "info",
Y: 1,
},
}
err := trans.Start(transInfo)
So(err.Error(), ShouldEqual, transdsl.ErrTimeout.Error())
})
})
}
|
package commonTestUtils
import (
"context"
"github.com/go-logr/logr"
hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sync"
)
type MockEvent struct {
EventType string
Reason string
Msg string
}
type EventEmitterMock struct {
storedEvents []MockEvent
lock *sync.Mutex
}
func NewEventEmitterMock() *EventEmitterMock {
return &EventEmitterMock{
storedEvents: make([]MockEvent, 0),
lock: &sync.Mutex{},
}
}
func (eem *EventEmitterMock) Reset() {
eem.lock.Lock()
defer eem.lock.Unlock()
eem.storedEvents = make([]MockEvent, 0)
}
func (EventEmitterMock) Init(_ context.Context, _ manager.Manager, _ hcoutil.ClusterInfo, _ logr.Logger) {
/* not implemented; mock only */
}
func (eem *EventEmitterMock) EmitEvent(_ runtime.Object, eventType, reason, msg string) {
event := MockEvent{
EventType: eventType,
Reason: reason,
Msg: msg,
}
eem.lock.Lock()
defer eem.lock.Unlock()
eem.storedEvents = append(eem.storedEvents, event)
}
func (EventEmitterMock) UpdateClient(_ context.Context, _ client.Reader, _ logr.Logger) {
/* not implemented; mock only */
}
func (eem EventEmitterMock) CheckEvents(expectedEvents []MockEvent) bool {
eem.lock.Lock()
defer eem.lock.Unlock()
for _, expectedEvent := range expectedEvents {
if !eventInArray(eem.storedEvents, expectedEvent) {
return false
}
}
return true
}
func eventInArray(eventList []MockEvent, event MockEvent) bool {
for _, expectedEvent := range eventList {
if event == expectedEvent {
return true
}
}
return false
}
|
package main
import (
"fmt"
"time"
)
func main() {
now := time.Now() //获取当前时间
fmt.Printf("current time:%v\n", now)
year := now.Year()
month := now.Month()
day := now.Day()
minute := now.Minute()
hour := now.Hour()
second := now.Second()
fmt.Printf("%v-%v-%0v %v:%v:%v\n", year, month, day, hour, minute, second)
fmt.Printf("%d-%02d-%02d %02d:%02d:%02d\n", year, month, day, hour, minute, second)
fmt.Println(now.Unix()) //获取时间戳
fmt.Println(now.UnixNano()) //纳秒时间戳
timetmp := time.Unix(1610523284, 0) //时间戳转化为时间类型,时间戳单位为秒
fmt.Println(timetmp)
yestd := now.Add(-24 * time.Hour) //时间转化为24小时前
fmt.Println(yestd)
dd := now.Sub(yestd) //now-yestd
fmt.Println(dd) //24h0m0s
// tmpt := time.Tick(2 * time.Second) //两秒钟执行一次
// for i := range tmpt {
// fmt.Println(i)
// }
//24小时制
fmt.Println(now.Format("2006-01-02 15:04:05"))
fmt.Println(now.Format("2006-01-02 15:04:05 Jan Mon"))
//12小时制
fmt.Println(now.Format("2006-01-02 03:04:05.000 PM"))
//按照对应的格式解析字符串类型的时间
timeobj, err := time.Parse("2006-01-02", "2021-01-13")
if err != nil {
fmt.Printf("err:%v\n", err)
return
}
fmt.Println(timeobj)
fmt.Println(timeobj.Unix())
}
|
package report
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewToken(t *testing.T) {
type testCase struct {
id uint64
key uint16
token uint64
}
cases := []testCase{
{0, 0, 0},
{1, 1, 0x201},
{0x7FFFFFFFFFFFFF, 0x1FF, 0xFFFFFFFFFFFFFFFF},
}
for _, c := range cases {
assert.Equal(t, UploadToken{token: c.token}, newUploadToken(c.id, c.key))
}
panicCases := []testCase{
// id is 2^55
{0x80000000000000, 0, 0},
// key is 2^9
{0, 0x200, 0},
}
for _, c := range panicCases {
assert.PanicsWithValue(t, "newToken: invalid key or id", func() { newUploadToken(c.id, c.key) })
}
}
func TestTokenFormatParse(t *testing.T) {
// For each of the first 2^16 token values, ensure that parsing is the
// inverse of formatting.
for i := uint64(0); i < 1<<16; i++ {
// Test that formatting and parsing are inverses.
t0 := UploadToken{token: i}
s := t0.String()
t1, err := parseUploadToken(s)
assert.Nil(t, err)
assert.Equal(t, t0, t1)
// Test that JSON marshaling and unmarshaling are inverses.
bytes, err := json.Marshal(t0)
assert.Nil(t, err)
t1 = UploadToken{}
err = json.Unmarshal(bytes, &t1)
assert.Nil(t, err)
assert.Equal(t, t0, t1)
}
}
type tokenTestCase struct {
token UploadToken
format string
}
var tokenTestCases = []tokenTestCase{
{UploadToken{token: 0}, "09"},
{UploadToken{token: 1}, "19"},
{UploadToken{token: 8}, "109"},
{UploadToken{token: 64}, "100-9"},
{UploadToken{token: 512}, "100-09"},
{UploadToken{token: 32768}, "100-000-9"},
{UploadToken{token: 1<<64 - 1}, "177-777-777-777-777-777-777-79"},
}
func TestTokenFormat(t *testing.T) {
for _, c := range tokenTestCases {
assert.Equal(t, c.format, c.token.String())
}
}
func TestTokenParse(t *testing.T) {
// Unlike tokenTestCases, these test cases are only valid in the parsing
// direction.
cases := []tokenTestCase{
{UploadToken{token: 0}, "--0--9--"},
{UploadToken{token: 1<<64 - 1}, "17777777777777777777779"},
}
for _, c := range append(cases, tokenTestCases...) {
tok, err := parseUploadToken(c.format)
assert.Nil(t, err)
assert.Equal(t, c.token, tok)
}
type errorTestCase struct {
format string
err error
}
errCases := []errorTestCase{
{"9", tokenParseError},
{"", tokenParseError},
}
for _, c := range errCases {
tok, err := parseUploadToken(c.format)
assert.Equal(t, tok, UploadToken{token: 0})
assert.Equal(t, err, c.err)
}
}
|
package acme
import (
"encoding/json"
)
type Gopher struct {
ID json.Number `json:"gopher_id"`
Name string `json:"name"`
Description string `json:"description"`
}
type Thing struct {
ID json.Number `json:"thing_id"`
GopherID json.Number `json:"gopher_id"`
Name string `json:"name"`
Description string `json:"description"`
}
|
package controllers
func (s *Server) initializeRoutes() {
v1 := s.Router.Group("/api/v1")
{
// Books routes
v1.POST("/books", s.PostBook)
v1.GET("/books", s.GetBooks)
v1.GET("/books/:id", s.GetBookById)
v1.PUT("/books/:id", s.UpdateBook)
v1.DELETE("/books/:id", s.DeleteBook)
// Genres routes
v1.GET("/genres", s.GetGenres)
// Authors routes
v1.GET("/authors", s.GetAuthors)
// Search routes
v1.GET("/search/publisher/:publisher", s.GetBooksByPublisher)
v1.GET("/search/years/:years", s.GetBooksByYear)
v1.GET("/search/publisher/:publisher/years/:years", s.GetBooksByPublisherAndYear)
v1.GET("/search/keyword/:keyword", s.GetBooksByKeyword)
v1.GET("/search/title/:title", s.GetBooksByTitle)
v1.GET("/search/author/:author", s.GetBooksByAuthor)
// Seed DB
v1.POST("/seed", s.Seed)
}
}
|
package controller
import (
"encoding/json"
"errors"
"github.com/bearname/videohost/internal/common/infrarstructure/transport"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"io"
"net/http"
)
var (
ErrBadRequest = errors.New("bad request")
ErrRouteNotFound = errors.New("route not found")
)
type BaseController struct {
}
func (c *BaseController) ParseMuxVariable(request *http.Request, keys []string) ([]string, error) {
vars := mux.Vars(request)
var result []string
var videoId string
var ok bool
for _, key := range keys {
if videoId, ok = vars[key]; !ok {
return nil, errors.New(key + " not present")
}
result = append(result, videoId)
}
return result, nil
}
func (c *BaseController) AllowCorsRequest(writer *http.ResponseWriter) {
(*writer).Header().Set("Access-Control-Allow-Origin", "*")
(*writer).Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
(*writer).Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization")
}
func (c *BaseController) WriteJsonResponse(writer http.ResponseWriter, data interface{}) {
writer.Header().Set("Content-Type", "application/json")
jsonData, err := json.Marshal(data)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
_, err = writer.Write(jsonData)
if err != nil {
http.Error(writer, err.Error(), http.StatusInternalServerError)
return
}
}
func (c *BaseController) WriteResponse(w http.ResponseWriter, statusCode int, success bool, message string) {
w.WriteHeader(statusCode)
response := transport.Response{
Success: success,
Message: message,
}
c.WriteJsonResponse(w, response)
}
func (c *BaseController) WriteError(w http.ResponseWriter, err error, responseError TransportError) {
log.Error(err.Error())
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.WriteHeader(responseError.Status)
_ = json.NewEncoder(w).Encode(responseError.Response)
}
func (c *BaseController) WriteResponseData(w http.ResponseWriter, data interface{}) {
bytes, err := json.Marshal(data)
if err != nil {
log.Error(err.Error())
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusOK)
if _, err = io.WriteString(w, string(bytes)); err != nil {
log.WithField("err", err).Error("write response error")
}
}
|
package memcached
import (
"github.com/Qihoo360/poseidon/service/meta/store"
"github.com/bradfitz/gomemcache/memcache"
"github.com/golang/glog"
)
type Memcached struct {
conn *memcache.Client
config store.Config
}
func NewMemcachedStore(c store.Config) (store.Store, error) {
rc := &Memcached{
config: c,
}
err := rc.connectInit()
if err == nil {
return rc, nil
}
return nil, err
}
func (rc *Memcached) Get(key string) (result store.GetResult) {
if item, err := rc.conn.Get(key); err == nil {
result.Value = string(item.Value)
} else {
if err != memcache.ErrCacheMiss {
result.Err = err
}
}
return result
}
func (rc *Memcached) MultiGet(keys []string) map[string] /*the-key*/ store.GetResult {
size := len(keys)
rv := make(map[string] /*the-key*/ store.GetResult)
mv, err := rc.conn.GetMulti(keys)
if err == nil {
for _, v := range mv {
var r store.GetResult
r.Value = string(v.Value)
glog.Infof("Memcached.MultiGet key=[%v] value=[%v]", v.Key, r.Value)
rv[v.Key] = r
}
return rv
} else {
glog.Errorf("Memcached.MultiGet ERROR : %v", err.Error())
}
for i := 0; i < size; i++ {
var r store.GetResult
if err != memcache.ErrCacheMiss {
r.Err = err
}
rv[keys[i]] = r
}
return rv
}
func (rc *Memcached) Set(key, value string) error {
item := memcache.Item{Key: key, Value: []byte(value)}
return rc.conn.Set(&item)
}
func (rc *Memcached) Delete(key string) error {
return rc.conn.Delete(key)
}
func (rc *Memcached) connectInit() error {
rc.conn = memcache.New(rc.config.Addr)
return nil
}
func init() {
store.Register(store.MEMCACHED, NewMemcachedStore)
}
|
// Copyright (c) 2018 Benjamin Borbe All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package version_test
import (
"context"
"github.com/bborbe/kafka-dockerhub-version-collector/avro"
"github.com/bborbe/kafka-dockerhub-version-collector/mocks"
"github.com/bborbe/kafka-dockerhub-version-collector/version"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Version Sender", func() {
var syncer version.Syncer
var sender *mocks.Sender
var fetcher *mocks.Fetcher
var sendCounter int
BeforeEach(func() {
sendCounter = 0
sender = &mocks.Sender{}
sender.SendStub = func(ctx context.Context, availables <-chan avro.ApplicationVersionAvailable) error {
for {
select {
case <-ctx.Done():
return nil
case _, ok := <-availables:
if !ok {
return nil
}
sendCounter++
}
}
}
fetcher = &mocks.Fetcher{}
syncer = version.NewSyncer(
fetcher,
sender,
)
})
It("returns without error", func() {
err := syncer.Sync(context.Background())
Expect(err).NotTo(HaveOccurred())
})
It("alls send", func() {
fetcher.FetchStub = func(ctx context.Context, availables chan<- avro.ApplicationVersionAvailable) error {
select {
case <-ctx.Done():
return nil
case availables <- *avro.NewApplicationVersionAvailable():
return nil
}
}
err := syncer.Sync(context.Background())
Expect(err).NotTo(HaveOccurred())
Expect(sendCounter).To(Equal(1))
})
It("sends all fetched versions", func() {
counter := 10
fetcher.FetchStub = func(ctx context.Context, availables chan<- avro.ApplicationVersionAvailable) error {
for i := 0; i < counter; i++ {
select {
case <-ctx.Done():
return nil
case availables <- *avro.NewApplicationVersionAvailable():
}
}
return nil
}
err := syncer.Sync(context.Background())
Expect(err).NotTo(HaveOccurred())
Expect(sendCounter).To(Equal(counter))
})
})
|
package main
import (
"fmt"
"github.com/radovskyb/watcher"
"os"
"os/exec"
"path/filepath"
"syscall"
"time"
)
var (
cmd *exec.Cmd
)
func run(args []string) error {
if cmd != nil {
pgid, err := syscall.Getpgid(cmd.Process.Pid)
if err == nil {
syscall.Kill(-pgid, syscall.SIGTERM)
}
_ = cmd.Wait()
}
cmd = exec.Command("go", args...)
cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
return cmd.Start()
}
func main() {
args := os.Args[1:]
if len(args) < 1 {
fmt.Printf("[GOW] wrong number of args: usage: gow <file> <args>\n")
os.Exit(1)
}
runArgs := append([]string{"run"}, args...)
err := run(runArgs)
if err != nil {
fmt.Printf("[GOW] err starting: %v\n", err)
}
w := watcher.New()
w.FilterOps(watcher.Write, watcher.Create, watcher.Move, watcher.Remove, watcher.Rename)
go func() {
for {
select {
case event := <-w.Event:
if filepath.Ext(event.Path) == ".go" {
fmt.Printf("[GOW] %s changed, restarting...\n", filepath.Base(event.Path))
err := run(runArgs)
if err != nil {
fmt.Printf("[GOW] err restarting: %v\n", err)
}
}
case err := <-w.Error:
fmt.Printf("[GOW] err watching: %v\n", err)
case <-w.Closed:
return
}
}
}()
if err := w.AddRecursive("."); err != nil {
fmt.Printf("[GOW] err watching: %v\n", err)
os.Exit(1)
}
if err := w.Start(time.Millisecond * 100); err != nil {
fmt.Printf("[GOW] err watching: %v\n", err)
os.Exit(1)
}
}
|
package env
import (
"fmt"
"os"
)
func Lookup(key string) string {
value, isSuccessful := os.LookupEnv(key)
if !isSuccessful {
panic(fmt.Sprintf("Environment variable \"%s\" not set", key))
}
return value
}
|
// Copyright 2022 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package addindextest
import (
"testing"
)
func initCompCtx(t *testing.T) *suiteContext {
ctx := initTest(t)
initCompCtxParams(ctx)
return ctx
}
func TestMultiSchemaChangeCreateNonUniqueIndex(t *testing.T) {
var colIDs = [][]int{
{1, 4, 7},
{2, 5, 8},
{3, 6, 9},
}
ctx := initCompCtx(t)
ctx.CompCtx.isMultiSchemaChange = true
testOneColFrame(ctx, colIDs, addIndexNonUnique)
}
func TestMultiSchemaChangeCreateUniqueIndex(t *testing.T) {
var colIDs = [][]int{
{1, 6, 8},
{2, 19},
{11},
}
ctx := initCompCtx(t)
ctx.CompCtx.isMultiSchemaChange = true
testOneColFrame(ctx, colIDs, addIndexUnique)
}
func TestMultiSchemaChangeCreatePrimaryKey(t *testing.T) {
ctx := initCompCtx(t)
ctx.CompCtx.isMultiSchemaChange = true
testOneIndexFrame(ctx, 0, addIndexPK)
}
func TestMultiSchemaChangeCreateGenColIndex(t *testing.T) {
ctx := initCompCtx(t)
ctx.CompCtx.isMultiSchemaChange = true
testOneIndexFrame(ctx, 29, addIndexGenCol)
}
func TestMultiSchemaChangeMultiColsIndex(t *testing.T) {
var coliIDs = [][]int{
{1},
{2},
{3},
}
var coljIDs = [][]int{
{16},
{14},
{18},
}
ctx := initCompCtx(t)
ctx.CompCtx.isMultiSchemaChange = true
testTwoColsFrame(ctx, coliIDs, coljIDs, addIndexMultiCols)
}
|
package error
import "net/http"
type Err struct {
Code int
Msg string
}
func (e *Err) Error() string {
return e.Msg
}
var statusCode = map[int]int{
1001: http.StatusBadGateway,
}
func HttpStatusCode(code int) int {
v, ok := statusCode[code]
if ok {
return v
}
return http.StatusOK
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package pcap
import (
"bytes"
"time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/google/gopacket/pcapgo"
)
// Create a 1 packet PCAP buffer.
//
// Give a timestamp, a packet and a linktype return a []byte buffer
// containing a complete PCAP file.
func CreatePcap(timestamp time.Time, packet []byte, linktype layers.LinkType) ([]byte, error) {
var output bytes.Buffer
var err error
pcapWriter := pcapgo.NewWriter(&output)
err = pcapWriter.WriteFileHeader(0xffff, linktype)
if err != nil {
return nil, err
}
captureInfo := gopacket.CaptureInfo{
Timestamp: timestamp,
CaptureLength: len(packet),
Length: len(packet),
}
err = pcapWriter.WritePacket(captureInfo, packet)
if err != nil {
return nil, err
}
return output.Bytes(), nil
}
|
// Copyright © 2020 Banzai Cloud
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package istioingress
import (
"fmt"
istioOperatorApi "github.com/banzaicloud/istio-operator/pkg/apis/istio/v1beta1"
"github.com/go-logr/logr"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/banzaicloud/kafka-operator/api/v1beta1"
"github.com/banzaicloud/kafka-operator/pkg/resources/templates"
"github.com/banzaicloud/kafka-operator/pkg/util"
istioingressutils "github.com/banzaicloud/kafka-operator/pkg/util/istioingress"
)
func (r *Reconciler) meshgateway(log logr.Logger, externalListenerConfig v1beta1.ExternalListenerConfig) runtime.Object {
mgateway := &istioOperatorApi.MeshGateway{
ObjectMeta: templates.ObjectMeta(fmt.Sprintf(istioingressutils.MeshGatewayNameTemplate, r.KafkaCluster.Name), labelsForIstioIngress(r.KafkaCluster.Name, externalListenerConfig.Name), r.KafkaCluster),
Spec: istioOperatorApi.MeshGatewaySpec{
MeshGatewayConfiguration: istioOperatorApi.MeshGatewayConfiguration{
Labels: labelsForIstioIngress(r.KafkaCluster.Name, externalListenerConfig.Name),
ServiceAnnotations: externalListenerConfig.GetServiceAnnotations(),
BaseK8sResourceConfigurationWithHPAWithoutImage: istioOperatorApi.BaseK8sResourceConfigurationWithHPAWithoutImage{
ReplicaCount: util.Int32Pointer(r.KafkaCluster.Spec.IstioIngressConfig.GetReplicas()),
MinReplicas: util.Int32Pointer(r.KafkaCluster.Spec.IstioIngressConfig.GetReplicas()),
MaxReplicas: util.Int32Pointer(r.KafkaCluster.Spec.IstioIngressConfig.GetReplicas()),
BaseK8sResourceConfiguration: istioOperatorApi.BaseK8sResourceConfiguration{
Resources: r.KafkaCluster.Spec.IstioIngressConfig.GetResources(),
NodeSelector: r.KafkaCluster.Spec.IstioIngressConfig.NodeSelector,
Tolerations: r.KafkaCluster.Spec.IstioIngressConfig.Tolerations,
PodAnnotations: r.KafkaCluster.Spec.IstioIngressConfig.Annotations,
},
},
ServiceType: corev1.ServiceTypeLoadBalancer,
},
Ports: generateExternalPorts(r.KafkaCluster.Spec, externalListenerConfig),
Type: istioOperatorApi.GatewayTypeIngress,
},
}
if !r.KafkaCluster.Spec.HeadlessServiceEnabled && len(r.KafkaCluster.Spec.ListenersConfig.ExternalListeners) > 0 {
mgateway.Spec.Ports = append(mgateway.Spec.Ports, corev1.ServicePort{
Name: allBrokers,
TargetPort: intstr.FromInt(int(r.KafkaCluster.Spec.ListenersConfig.InternalListeners[0].ContainerPort)),
Port: r.KafkaCluster.Spec.ListenersConfig.InternalListeners[0].ContainerPort,
})
}
return mgateway
}
func generateExternalPorts(clusterSpec v1beta1.KafkaClusterSpec, externalListenerConfig v1beta1.ExternalListenerConfig) []corev1.ServicePort {
generatedPorts := make([]corev1.ServicePort, 0)
for _, broker := range clusterSpec.Brokers {
generatedPorts = append(generatedPorts, corev1.ServicePort{
Name: fmt.Sprintf("broker-%d", broker.Id),
TargetPort: intstr.FromInt(int(externalListenerConfig.ExternalStartingPort + broker.Id)),
Port: externalListenerConfig.ExternalStartingPort + broker.Id,
})
}
return generatedPorts
}
|
package idg
import (
"encoding/json"
"time"
"github.com/itsmontoya/mum"
)
// newID32 will return a new ID with the provided index and timestamp
// Note: If timestamp is set to -1, the current Unix timestamp will
// be utilized
func newID32(idx uint32, ts int64) (id ID32) {
// Helper for binary encoding
var bw mum.BinaryWriter
// Check if timestamp is set (or needs to be set)
if ts == -1 {
// Timestamp is set to -1, set timestamp to current Unix timestamp (in seconds)
// Note: Seconds was decided to be utilized instead of nanoseconds
// To aid in an easier integration with Javascript for front-end clients
// utilizing idg. Technically, we could utilize milliseconds and maintain
// Javascript compatibility. That being said, seconds feels like a much
// more universal Unix time reference interval.
ts = time.Now().Unix()
}
// Copy index bytes to first 8 bytes
copy(id[:4], bw.Uint32(uint32(idx)))
// Copy unix timestamp bytes to last 8 bytes
copy(id[4:], bw.Uint32(uint32(ts)))
return
}
// ID32 represents a 32-bit id
type ID32 [8]byte
func (id *ID32) parse(in []byte) (err error) {
if len(in) != strLen {
// Decoded value has to be 16 bytes or it's not valid
err = ErrInvalidLength
return
}
// Decode inbound bytes as base64
// Write the bytes directly to our array
_, err = b64.Decode(id[:], in)
return
}
// Index will return the index of an ID
func (id *ID32) Index() (idx uint32, err error) {
// Helper for binary decoding
var br mum.BinaryReader
// Check if ID is nil
if id == nil {
// ID is nil, return early
err = ErrEmptyID
return
}
// Grab the index from the first 8 bytes
return br.Uint32(id[:4])
}
// Time will return the time.Time of an ID
func (id *ID32) Time() (t time.Time, err error) {
var (
// Helper for binary decoding
br mum.BinaryReader
// Timestamp
ts uint32
)
// Check if ID is nil
if id == nil {
// ID is nil, return early
err = ErrEmptyID
return
}
// Grab the Unix timestamp from the last 8 bytes
if ts, err = br.Uint32(id[4:]); err != nil {
return
}
// Parse Unix timestamp (as nanoseconds)
t = time.Unix(int64(ts), 0)
return
}
// Bytes will return the byteslice representation
// Note: This function is unsafe and can change the underlying array
// Please.. read only!
func (id *ID32) Bytes() (out []byte) {
if id == nil {
return
}
out = id[:]
return
}
// String will return a string representation
// Note: This is referenced as a non-pointer so it can be called directly
// from a struct utilizing the non-pointer value of ID
func (id *ID32) String() (out string) {
if id == nil {
return
}
out = b64.EncodeToString(id[:])
return
}
// IsEmpty will return if an ID is empty
func (id *ID32) IsEmpty() (empty bool) {
return id == nil || *id == emptyID32
}
// MarshalJSON is a JSON encoding helper func
func (id *ID32) MarshalJSON() (out []byte, err error) {
// Check if ID is nil
if id == nil {
return
}
return json.Marshal(id.String())
}
// UnmarshalJSON is a JSON decoding helper func
func (id *ID32) UnmarshalJSON(in []byte) (err error) {
var str string
// Unmarshal inbound value as a string
if err = json.Unmarshal(in, &str); err != nil {
return
}
// Strip double-quotation from head and tail
stripped := in[1 : len(in)-1]
// Return result of the parsed value
return id.parse(stripped)
}
|
package main
import (
"encoding/hex"
"fmt"
uc "github.com/unicorn-engine/unicorn/bindings/go/unicorn"
"strings"
)
var asm = strings.Join([]string{
"48c7c003000000", // mov rax, 3
"0f05", // syscall
"48c7c700400000", // mov rdi, 0x4000
"488907", // mov [rdi], rdx
"488b07", // mov rdx, [rdi]
"4883c201", // add rdx, 1
}, "")
func addHooks(mu uc.Unicorn) {
mu.HookAdd(uc.HOOK_BLOCK, func(mu uc.Unicorn, addr uint64, size uint32) {
fmt.Printf("Block: 0x%x, 0x%x\n", addr, size)
}, 1, 0)
mu.HookAdd(uc.HOOK_CODE, func(mu uc.Unicorn, addr uint64, size uint32) {
fmt.Printf("Code: 0x%x, 0x%x\n", addr, size)
}, 1, 0)
mu.HookAdd(uc.HOOK_MEM_READ|uc.HOOK_MEM_WRITE, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) {
if access == uc.MEM_WRITE {
fmt.Printf("Mem write")
} else {
fmt.Printf("Mem read")
}
fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value)
}, 1, 0)
invalid := uc.HOOK_MEM_READ_INVALID | uc.HOOK_MEM_WRITE_INVALID | uc.HOOK_MEM_FETCH_INVALID
mu.HookAdd(invalid, func(mu uc.Unicorn, access int, addr uint64, size int, value int64) bool {
switch access {
case uc.MEM_WRITE_UNMAPPED | uc.MEM_WRITE_PROT:
fmt.Printf("invalid write")
case uc.MEM_READ_UNMAPPED | uc.MEM_READ_PROT:
fmt.Printf("invalid read")
case uc.MEM_FETCH_UNMAPPED | uc.MEM_FETCH_PROT:
fmt.Printf("invalid fetch")
default:
fmt.Printf("unknown memory error")
}
fmt.Printf(": @0x%x, 0x%x = 0x%x\n", addr, size, value)
return false
}, 1, 0)
mu.HookAdd(uc.HOOK_INSN, func(mu uc.Unicorn) {
rax, _ := mu.RegRead(uc.X86_REG_RAX)
fmt.Printf("Syscall: %d\n", rax)
}, 1, 0, uc.X86_INS_SYSCALL)
}
func run() error {
code, err := hex.DecodeString(asm)
if err != nil {
return err
}
// set up unicorn instance and add hooks
mu, err := uc.NewUnicorn(uc.ARCH_X86, uc.MODE_64)
if err != nil {
return err
}
addHooks(mu)
// map and write code to memory
if err := mu.MemMap(0x1000, 0x1000); err != nil {
return err
}
if err := mu.MemWrite(0x1000, code); err != nil {
return err
}
// map scratch space
if err := mu.MemMap(0x4000, 0x1000); err != nil {
return err
}
// set example register
if err := mu.RegWrite(uc.X86_REG_RDX, 1); err != nil {
return err
}
rdx, err := mu.RegRead(uc.X86_REG_RDX)
if err != nil {
return err
}
fmt.Printf("RDX is: %d\n", rdx)
// start emulation
if err := mu.Start(0x1000, 0x1000+uint64(len(code))); err != nil {
return err
}
// read back example register
rdx, err = mu.RegRead(uc.X86_REG_RDX)
if err != nil {
return err
}
fmt.Printf("RDX is now: %d\n", rdx)
return nil
}
func main() {
if err := run(); err != nil {
fmt.Println(err)
}
}
|
package main
import (
"fmt"
"os"
"bufio"
"regexp"
"runtime"
"strconv"
"strings"
"./unlib"
"./thread"
)
type Board struct {
Name string
Ita string
}
type MiniThread struct {
Name string
Ita string
Sure string
Point int
}
func NewMiniThread(t *thread.Thread) (this MiniThread) {
this.Name = t.Name
this.Ita = t.Ita
this.Sure = t.Sure
this.Point = t.Point
return
}
const g_base_path string = "/2ch/dat"
const g_output_path string = "/2ch/dat/2chpoint.tsv"
const g_board_list_path string = "/2ch/getboard.data"
const g_ita_data_path string = "/2ch/dat/ita.data"
const g_thread_list string = "subject.txt"
var LF_BYTE []byte = []byte{'\n'}
var HTML_DELI_BYTE []byte = []byte{'<', '>'}
func main() {
cpu := 0
if 1 < len(os.Args) {
if i, err := strconv.Atoi(os.Args[1]); (err == nil) && (i > 0) {
cpu = i
runtime.GOMAXPROCS(cpu)
}
} else {
cpu = 1
}
sl := serverList()
bl := boardList(sl)
tl := threadList(bl, cpu)
pl, qsort_err := unlib.Qsort(tl, cmp)
if qsort_err != nil {
panic("qsort")
}
fp, open_err := os.Open(g_output_path, os.O_WRONLY | os.O_CREAT, 0777)
if open_err != nil { panic("g_output_path") }
defer fp.Close()
bfp := bufio.NewWriter(fp)
for _, p := range pl[0:5] {
it := p.(MiniThread)
dot := strings.Index(it.Sure, ".")
if dot > 0 {
bfp.WriteString(fmt.Sprintf("%d\t%s\t%s\t%s\n", it.Point, it.Name, it.Ita, it.Sure[0:dot]))
}
}
bfp.Flush()
}
/*
func boardList(sl map[string]Board) ([]Board) {
data, open_err := unlib.FileGetContents(g_board_list_path)
if open_err != nil { panic("g_board_list_path") }
bl := strings.Split(string(data), "\n", -1)
list := make([]Board, 0, len(bl))
for _, it := range bl {
if board, ok := sl[it]; ok {
list = append(list, board)
}
}
return list
}
*/
func boardList(sl map[string]Board) ([]Board) {
list := make([]Board, 0, len(sl))
for _, it := range sl {
list = append(list, it)
}
return list
}
func serverList() (map[string]Board) {
var line Board
list := make(map[string]Board, 1000)
data, open_err := unlib.FileGetContents(g_ita_data_path)
if open_err != nil { panic("g_ita_data_path") }
reg, reg_err := regexp.Compile("(.+)/(.+)<>(.+)")
if reg_err != nil { panic("reg err") }
sl := strings.Split(string(data), "\n", -1)
for _, it := range sl {
if match := reg.FindStringSubmatch(it); len(match) > 2 {
line.Name = match[3]
line.Ita = match[2]
list[line.Ita] = line
}
}
return list
}
func threadList(bl []Board, cpu int) ([]interface{}) {
tlist := make([]interface{}, 0, 400000)
ch := make(chan MiniThread, cpu * 16)
sync := make(chan bool, cpu)
go func(){
for {
if data := <- ch; data.Sure != "" {
tlist = append(tlist, data)
} else {
break
}
}
}()
for _, it := range bl {
sync <- true
runtime.GC()
go func(){
threadThread(it, ch)
<- sync
}()
}
for cpu > 0 {
sync <- true
runtime.GC()
cpu--
}
close(ch)
close(sync)
return tlist
}
func threadThread(it Board, ch chan MiniThread) {
base_path := g_base_path + "/" + it.Ita
b_path := base_path + "/" + g_thread_list
data, open_err := unlib.FileGetContents(b_path)
if open_err != nil { return }
list := strings.Split(string(data), "\n", -1)
for _, line := range list {
array := strings.Split(line, "<>", -1)
if len(array) > 1 {
t := thread.NewThread(g_base_path, it.Ita, array[0])
if ok, _ := t.GetData(); ok {
ch <- NewMiniThread(t)
}
t.Remove()
}
}
}
func cmp(a, b interface{}) int {
aa := a.(MiniThread)
bb := b.(MiniThread)
return bb.Point - aa.Point
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.