text stringlengths 11 4.05M |
|---|
package main
import "fmt"
func main() {
ch:=make(chan int)
quit:=make(chan bool)
go fibn(ch,quit,20)
for{
select {
case num:=<-ch:
fmt.Print(num," ")
case <-quit:
return
}
}
}
func fibn(ch chan<- int,quit chan <- bool,n int){
x,y:=1,1
for i:=1;i<20;i++{
ch<-x
x,y=y,x+y
}
quit<-true
} |
package main
import "github.com/ugoturner/podgo/cmd"
func main() {
cmd.Build()
}
|
package models
import (
"fmt"
"time"
"github.com/jinzhu/gorm"
)
// ===== BEGIN of all query sets
// ===== BEGIN of query set TrelloQuerySet
// TrelloQuerySet is an queryset type for Trello
type TrelloQuerySet struct {
db *gorm.DB
}
// NewTrelloQuerySet constructs new TrelloQuerySet
func NewTrelloQuerySet(db *gorm.DB) TrelloQuerySet {
return TrelloQuerySet{
db: db.Model(&Trello{}),
}
}
func (qs TrelloQuerySet) w(db *gorm.DB) TrelloQuerySet {
return NewTrelloQuerySet(db)
}
// All is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) All(ret *[]Trello) error {
return qs.db.Find(ret).Error
}
// Count is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) Count() (int, error) {
var count int
err := qs.db.Count(&count).Error
return count, err
}
// Create is an autogenerated method
// nolint: dupl
func (o *Trello) Create(db *gorm.DB) error {
return db.Create(o).Error
}
// CreatedAtEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtEq(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at = ?", createdAt))
}
// CreatedAtGt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtGt(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at > ?", createdAt))
}
// CreatedAtGte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtGte(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at >= ?", createdAt))
}
// CreatedAtLt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtLt(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at < ?", createdAt))
}
// CreatedAtLte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtLte(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at <= ?", createdAt))
}
// CreatedAtNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) CreatedAtNe(createdAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("created_at != ?", createdAt))
}
// Delete is an autogenerated method
// nolint: dupl
func (o *Trello) Delete(db *gorm.DB) error {
return db.Delete(o).Error
}
// Delete is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) Delete() error {
return qs.db.Delete(Trello{}).Error
}
// DescriptionEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) DescriptionEq(description string) TrelloQuerySet {
return qs.w(qs.db.Where("description = ?", description))
}
// DescriptionIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) DescriptionIn(description string, descriptionRest ...string) TrelloQuerySet {
iArgs := []interface{}{description}
for _, arg := range descriptionRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("description IN (?)", iArgs))
}
// DescriptionNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) DescriptionNe(description string) TrelloQuerySet {
return qs.w(qs.db.Where("description != ?", description))
}
// DescriptionNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) DescriptionNotIn(description string, descriptionRest ...string) TrelloQuerySet {
iArgs := []interface{}{description}
for _, arg := range descriptionRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("description NOT IN (?)", iArgs))
}
// GetUpdater is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) GetUpdater() TrelloUpdater {
return NewTrelloUpdater(qs.db)
}
// IDEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDEq(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id = ?", ID))
}
// IDGt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDGt(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id > ?", ID))
}
// IDGte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDGte(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id >= ?", ID))
}
// IDIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDIn(ID uint, IDRest ...uint) TrelloQuerySet {
iArgs := []interface{}{ID}
for _, arg := range IDRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("id IN (?)", iArgs))
}
// IDLt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDLt(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id < ?", ID))
}
// IDLte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDLte(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id <= ?", ID))
}
// IDNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDNe(ID uint) TrelloQuerySet {
return qs.w(qs.db.Where("id != ?", ID))
}
// IDNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) IDNotIn(ID uint, IDRest ...uint) TrelloQuerySet {
iArgs := []interface{}{ID}
for _, arg := range IDRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("id NOT IN (?)", iArgs))
}
// KeyEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) KeyEq(key string) TrelloQuerySet {
return qs.w(qs.db.Where("key = ?", key))
}
// KeyIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) KeyIn(key string, keyRest ...string) TrelloQuerySet {
iArgs := []interface{}{key}
for _, arg := range keyRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("key IN (?)", iArgs))
}
// KeyNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) KeyNe(key string) TrelloQuerySet {
return qs.w(qs.db.Where("key != ?", key))
}
// KeyNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) KeyNotIn(key string, keyRest ...string) TrelloQuerySet {
iArgs := []interface{}{key}
for _, arg := range keyRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("key NOT IN (?)", iArgs))
}
// Limit is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) Limit(limit int) TrelloQuerySet {
return qs.w(qs.db.Limit(limit))
}
// NameEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) NameEq(name string) TrelloQuerySet {
return qs.w(qs.db.Where("name = ?", name))
}
// NameIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) NameIn(name string, nameRest ...string) TrelloQuerySet {
iArgs := []interface{}{name}
for _, arg := range nameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("name IN (?)", iArgs))
}
// NameNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) NameNe(name string) TrelloQuerySet {
return qs.w(qs.db.Where("name != ?", name))
}
// NameNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) NameNotIn(name string, nameRest ...string) TrelloQuerySet {
iArgs := []interface{}{name}
for _, arg := range nameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("name NOT IN (?)", iArgs))
}
// One is used to retrieve one result. It returns gorm.ErrRecordNotFound
// if nothing was fetched
func (qs TrelloQuerySet) One(ret *Trello) error {
return qs.db.First(ret).Error
}
// OrderAscByCreatedAt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderAscByCreatedAt() TrelloQuerySet {
return qs.w(qs.db.Order("created_at ASC"))
}
// OrderAscByID is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderAscByID() TrelloQuerySet {
return qs.w(qs.db.Order("id ASC"))
}
// OrderAscByUpdatedAt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderAscByUpdatedAt() TrelloQuerySet {
return qs.w(qs.db.Order("updated_at ASC"))
}
// OrderDescByCreatedAt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderDescByCreatedAt() TrelloQuerySet {
return qs.w(qs.db.Order("created_at DESC"))
}
// OrderDescByID is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderDescByID() TrelloQuerySet {
return qs.w(qs.db.Order("id DESC"))
}
// OrderDescByUpdatedAt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) OrderDescByUpdatedAt() TrelloQuerySet {
return qs.w(qs.db.Order("updated_at DESC"))
}
// SetCreatedAt is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetCreatedAt(createdAt time.Time) TrelloUpdater {
u.fields[string(TrelloDBSchema.CreatedAt)] = createdAt
return u
}
// SetDescription is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetDescription(description string) TrelloUpdater {
u.fields[string(TrelloDBSchema.Description)] = description
return u
}
// SetID is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetID(ID uint) TrelloUpdater {
u.fields[string(TrelloDBSchema.ID)] = ID
return u
}
// SetKey is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetKey(key string) TrelloUpdater {
u.fields[string(TrelloDBSchema.Key)] = key
return u
}
// SetName is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetName(name string) TrelloUpdater {
u.fields[string(TrelloDBSchema.Name)] = name
return u
}
// SetToken is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetToken(token string) TrelloUpdater {
u.fields[string(TrelloDBSchema.Token)] = token
return u
}
// SetUpdatedAt is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetUpdatedAt(updatedAt time.Time) TrelloUpdater {
u.fields[string(TrelloDBSchema.UpdatedAt)] = updatedAt
return u
}
// SetUserName is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) SetUserName(userName string) TrelloUpdater {
u.fields[string(TrelloDBSchema.UserName)] = userName
return u
}
// TokenEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) TokenEq(token string) TrelloQuerySet {
return qs.w(qs.db.Where("token = ?", token))
}
// TokenIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) TokenIn(token string, tokenRest ...string) TrelloQuerySet {
iArgs := []interface{}{token}
for _, arg := range tokenRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("token IN (?)", iArgs))
}
// TokenNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) TokenNe(token string) TrelloQuerySet {
return qs.w(qs.db.Where("token != ?", token))
}
// TokenNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) TokenNotIn(token string, tokenRest ...string) TrelloQuerySet {
iArgs := []interface{}{token}
for _, arg := range tokenRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("token NOT IN (?)", iArgs))
}
// Update is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) Update() error {
return u.db.Updates(u.fields).Error
}
// UpdateNum is an autogenerated method
// nolint: dupl
func (u TrelloUpdater) UpdateNum() (int64, error) {
db := u.db.Updates(u.fields)
return db.RowsAffected, db.Error
}
// UpdatedAtEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtEq(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at = ?", updatedAt))
}
// UpdatedAtGt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtGt(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at > ?", updatedAt))
}
// UpdatedAtGte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtGte(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at >= ?", updatedAt))
}
// UpdatedAtLt is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtLt(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at < ?", updatedAt))
}
// UpdatedAtLte is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtLte(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at <= ?", updatedAt))
}
// UpdatedAtNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UpdatedAtNe(updatedAt time.Time) TrelloQuerySet {
return qs.w(qs.db.Where("updated_at != ?", updatedAt))
}
// UserNameEq is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UserNameEq(userName string) TrelloQuerySet {
return qs.w(qs.db.Where("user_name = ?", userName))
}
// UserNameIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UserNameIn(userName string, userNameRest ...string) TrelloQuerySet {
iArgs := []interface{}{userName}
for _, arg := range userNameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("user_name IN (?)", iArgs))
}
// UserNameNe is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UserNameNe(userName string) TrelloQuerySet {
return qs.w(qs.db.Where("user_name != ?", userName))
}
// UserNameNotIn is an autogenerated method
// nolint: dupl
func (qs TrelloQuerySet) UserNameNotIn(userName string, userNameRest ...string) TrelloQuerySet {
iArgs := []interface{}{userName}
for _, arg := range userNameRest {
iArgs = append(iArgs, arg)
}
return qs.w(qs.db.Where("user_name NOT IN (?)", iArgs))
}
// ===== END of query set TrelloQuerySet
// ===== BEGIN of Trello modifiers
type trelloDBSchemaField string
func (f trelloDBSchemaField) String() string {
return string(f)
}
// TrelloDBSchema stores db field names of Trello
var TrelloDBSchema = struct {
ID trelloDBSchemaField
CreatedAt trelloDBSchemaField
UpdatedAt trelloDBSchemaField
Name trelloDBSchemaField
Description trelloDBSchemaField
UserName trelloDBSchemaField
Token trelloDBSchemaField
Key trelloDBSchemaField
}{
ID: trelloDBSchemaField("id"),
CreatedAt: trelloDBSchemaField("created_at"),
UpdatedAt: trelloDBSchemaField("updated_at"),
Name: trelloDBSchemaField("name"),
Description: trelloDBSchemaField("description"),
UserName: trelloDBSchemaField("user_name"),
Token: trelloDBSchemaField("token"),
Key: trelloDBSchemaField("key"),
}
// Update updates Trello fields by primary key
func (o *Trello) Update(db *gorm.DB, fields ...trelloDBSchemaField) error {
dbNameToFieldName := map[string]interface{}{
"id": o.ID,
"created_at": o.CreatedAt,
"updated_at": o.UpdatedAt,
"name": o.Name,
"description": o.Description,
"user_name": o.UserName,
"token": o.Token,
"key": o.Key,
}
u := map[string]interface{}{}
for _, f := range fields {
fs := f.String()
u[fs] = dbNameToFieldName[fs]
}
if err := db.Model(o).Updates(u).Error; err != nil {
if err == gorm.ErrRecordNotFound {
return err
}
return fmt.Errorf("can't update Trello %v fields %v: %s",
o, fields, err)
}
return nil
}
// TrelloUpdater is an Trello updates manager
type TrelloUpdater struct {
fields map[string]interface{}
db *gorm.DB
}
// NewTrelloUpdater creates new Trello updater
func NewTrelloUpdater(db *gorm.DB) TrelloUpdater {
return TrelloUpdater{
fields: map[string]interface{}{},
db: db.Model(&Trello{}),
}
}
// ===== END of Trello modifiers
// ===== END of all query sets
|
package main
import (
// Standard library packages
"log"
"net/http"
"os"
// Third party packages
"github.com/julienschmidt/httprouter"
"gopkg.in/mgo.v2"
)
func main() {
f, _ := os.OpenFile("/tmp/heart.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
defer f.Close()
log.SetOutput(f)
log.Printf("Start")
// Instantiate a new router
r := httprouter.New()
// Get a ComponentController instance
cc := NewComponentController(getSession())
// Get a component resource
r.GET("/component/:id", cc.GetComponent)
// Get all risky components.
r.GET("/compute", cc.ComputeRisk)
// Check if a computation is running.
r.GET("/check_computation", cc.CheckComputation)
// List all vulnerability names.
r.GET("/vulnerabilities", cc.GetVulnerabilities)
// Fetch the summary for a specific vulnerability.
r.GET("/vulnerability/:name", cc.GetVulnerability)
// Create a new component
r.POST("/component", cc.CreateComponent)
// Remove an existing component
r.DELETE("/component/:id", cc.RemoveComponent)
// Get an InstanceController instance
ic := NewInstanceController(getSession())
// Create a new component
r.POST("/instance", ic.CreateInstance)
// Fire up the server
http.ListenAndServe("0.0.0.0:8076", r)
}
// getSession creates a new mongo session and panics if connection error occurs
func getSession() *mgo.Session {
// Connect to our local mongo
s, err := mgo.Dial("mongodb://localhost")
// Check if connection error, is mongo running?
if err != nil {
panic(err)
}
// Deliver session
return s
}
|
package server
import (
"bufio"
"config"
"io"
"log"
"net"
"sync"
"time"
)
type ConcurrentMap struct {
lock sync.RWMutex
data map[interface{}]interface{}
}
func (m *ConcurrentMap) Put(key, value interface{}) {
m.lock.Lock()
defer m.lock.Unlock()
if m.data == nil {
m.data = make(map[interface{}]interface{})
}
m.data[key] = value
}
func (m *ConcurrentMap) Get(key interface{}) interface{} {
m.lock.RLock()
defer m.lock.RUnlock()
if m.data == nil {
return nil
}
v := m.data[key]
return v
}
func (m *ConcurrentMap) Remove(key interface{}) {
m.lock.Lock()
defer m.lock.Unlock()
if m.data != nil {
delete(m.data, key)
}
}
func (m *ConcurrentMap) Size() int {
m.lock.RLock()
defer m.lock.RUnlock()
if m.data == nil {
return 0
}
return len(m.data)
}
var (
// conns map[string]net.Conn = make(map[string]net.Conn)
conns ConcurrentMap
Running bool = true
Retry time.Duration = 5
)
//启动网络监听
func Listen(address, network string) {
listen, err := net.Listen(network, address)
if err != nil {
log.Panicln(err)
}
defer listen.Close()
log.Printf("accept goroutine count:%d\r\n", config.GlobalConfig.Boss)
for i := 0; i < config.GlobalConfig.Boss; i++ {
config.W.Add(1)
go accept(listen)
}
config.W.Wait()
// accpet(listen)
}
func Stop() {
log.Println("server stopeing!")
Running = false
}
func accept(listen net.Listener) {
defer config.W.Done()
for Running {
conn, conn_err := listen.Accept()
if conn_err != nil {
log.Println("accept error")
log.Println(conn_err)
time.Sleep(Retry * time.Second)
continue
}
addr := conn.RemoteAddr()
log.Println(addr.String())
// conns[addr.String()] = conn
conns.Put(addr.String(), conn)
log.Printf("conns size:%d\r\n", conns.Size())
config.W.Add(1)
go handlerRead(conn)
// config.W.Add(1)
// go handlerWrite(conn)
// log.Println(conns)
// defer conn.Close()
// io.WriteString(conn, "Hello World!")
}
}
func handlerRead(conn net.Conn) {
addr := conn.RemoteAddr()
defer func() {
// delete(conns, addr.String())
conns.Remove(addr.String())
conn.Close()
config.W.Done()
}()
io.WriteString(conn, time.Now().String()+"\r\n")
reader := bufio.NewReader(conn)
for Running {
data, _, err := reader.ReadLine()
if err != nil {
// delete(conns, addr.String())
conns.Remove(addr.String())
conn.Close()
log.Println(err)
log.Printf("conns size:%d\r\n", conns.Size())
break
}
message := string(data)
log.Printf("%s[%s]\r\n", addr.String(), message)
if message == "exit" {
// delete(conns, addr.String())
conns.Remove(addr.String())
conn.Close()
log.Printf("conns size:%d\r\n", conns.Size())
break
}
// if len(conns) > 1 {
// for k, v := range conns {
// if k != addr.String() {
// io.WriteString(*v, addr.String()+":\r\n"+message+"\r\n")
// }
// }
// }
}
}
func handlerWrite(conn net.Conn) {
addr := conn.RemoteAddr()
defer func() {
// delete(conns, addr.String())
conns.Remove(addr.String())
conn.Close()
config.W.Done()
}()
for Running {
_, err := io.WriteString(conn, time.Now().String()+"\r\n")
if err != nil {
// delete(conns, addr.String())
conns.Remove(addr.String())
conn.Close()
log.Println(err)
log.Printf("conns size:%d\r\n", conns.Size())
break
}
time.Sleep(Retry * time.Second)
}
}
|
// ˅
package main
import (
"fmt"
"time"
)
// ˄
// Display values with a bar chart.
type BarChartObserver struct {
// ˅
// ˄
// ˅
// ˄
}
func NewBarChartObserver() *BarChartObserver {
// ˅
return &BarChartObserver{}
// ˄
}
func (self *BarChartObserver) Update(number *Number) {
// ˅
fmt.Print("Bar chart: ")
for i := 0; i < number.value; i++ {
fmt.Print("*")
}
fmt.Println("")
time.Sleep(100 * time.Millisecond)
// ˄
}
// ˅
// ˄
|
package serializetree
import "testing"
func TestNoData(t *testing.T) {
tree := NewTree()
actual := tree.Marshal()
expected := "{\"head\": null}"
if actual != expected {
t.Errorf("Bad marchalling, expected %s, actual was %s", expected, actual)
}
}
func TestBasicSerialization(t *testing.T) {
tree := NewTree()
tree.Insert(1)
tree.Insert(2)
tree.Insert(3)
actual := tree.Marshal()
expected := `{"head": {"val": 1, "left": null, "right": {"val": 2, "left": null, "right": {"val": 3, "left": null, "right": null}}}}`
if actual != expected {
t.Errorf("Bad marchalling, expected %s, actual was %s", expected, actual)
}
}
|
package cassandra
import (
"log"
"github.com/gocql/gocql"
"user-event-store/app/management"
)
var Session *gocql.Session
func init() {
var err error
configuration := management.Configuration.Config("cassandra")
host := configuration.String("host")
keyspace := configuration.String("keyspace")
cluster := gocql.NewCluster(host)
cluster.Keyspace = keyspace
Session, err = cluster.CreateSession()
if err != nil {
panic(err)
}
log.Printf("Cassandra init done")
}
|
package rest
import (
"errors"
"fmt"
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
jinmuidpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
generalpb "github.com/jinmukeji/proto/v3/gen/micro/idl/ptypes/v2"
"github.com/kataras/iris/v12"
)
const (
// SimpleChinese 简体中文
SimpleChinese = "zh-Hans"
// TraditionalChinese 繁体中文
TraditionalChinese = "zh-Hant"
// English 英文
English = "en"
)
// Language 语言
type Language struct {
Language string `json:"language"`
}
// SetWebLanguage 设置语言
func (h *webHandler) SetWebLanguage(ctx iris.Context) {
userID, err := ctx.Params().GetInt("user_id")
if err != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", err), false)
return
}
req := new(jinmuidpb.SetJinmuIDWebLanguageRequest)
req.UserId = int32(userID)
var body Language
errReadJSON := ctx.ReadJSON(&body)
if errReadJSON != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", err), false)
return
}
if body.Language == "" {
writeError(ctx, wrapError(ErrEmptyLanguage, "", errors.New("language is empty")), false)
return
}
protoLanguage, errmapRestLanguageToProto := mapRestLanguageToProto(body.Language)
if errmapRestLanguageToProto != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", errmapRestLanguageToProto), false)
return
}
req.Language = protoLanguage
_, errSetJinmuIDWebLanguage := h.rpcSvc.SetJinmuIDWebLanguage(
newRPCContext(ctx), req,
)
if errSetJinmuIDWebLanguage != nil {
writeRpcInternalError(ctx, errSetJinmuIDWebLanguage, false)
return
}
rest.WriteOkJSON(ctx, nil)
}
// GetWebLanguage 得到语言
func (h *webHandler) GetWebLanguage(ctx iris.Context) {
userID, err := ctx.Params().GetInt("user_id")
if err != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", err), false)
return
}
req := new(jinmuidpb.GetJinmuIDWebLanguageRequest)
req.UserId = int32(userID)
resp, errGetJinmuIDWebLanguage := h.rpcSvc.GetJinmuIDWebLanguage(
newRPCContext(ctx), req,
)
if errGetJinmuIDWebLanguage != nil {
writeRpcInternalError(ctx, errGetJinmuIDWebLanguage, false)
return
}
stringLanguage, ermapProtoLanguageToRest := mapProtoLanguageToRest(resp.Language)
if ermapProtoLanguageToRest != nil {
// 默认使用简体中文
stringLanguage = SimpleChinese
}
rest.WriteOkJSON(ctx, Language{
Language: stringLanguage,
})
}
func mapProtoLanguageToRest(lanuage generalpb.Language) (string, error) {
switch lanuage {
case generalpb.Language_LANGUAGE_INVALID:
return "", fmt.Errorf("invalid proto language %d", generalpb.Language_LANGUAGE_INVALID)
case generalpb.Language_LANGUAGE_UNSET:
return "", fmt.Errorf("invalid proto language %d", generalpb.Language_LANGUAGE_UNSET)
case generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE:
return SimpleChinese, nil
case generalpb.Language_LANGUAGE_TRADITIONAL_CHINESE:
return TraditionalChinese, nil
case generalpb.Language_LANGUAGE_ENGLISH:
return English, nil
}
return SimpleChinese, fmt.Errorf("invalid proto language %d", generalpb.Language_LANGUAGE_INVALID)
}
func mapRestLanguageToProto(language string) (generalpb.Language, error) {
switch language {
case SimpleChinese:
return generalpb.Language_LANGUAGE_SIMPLIFIED_CHINESE, nil
case TraditionalChinese:
return generalpb.Language_LANGUAGE_TRADITIONAL_CHINESE, nil
case English:
return generalpb.Language_LANGUAGE_ENGLISH, nil
}
return generalpb.Language_LANGUAGE_INVALID, fmt.Errorf("invalid string language %s", language)
}
|
/*
* Copyright (c) 2021. Alibaba Cloud. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package exporter
import (
"time"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/metric/ttl"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/nydussdk/model"
"github.com/prometheus/client_golang/prometheus"
)
var (
imageRefLabel = "image_ref"
defaultTTL = 3 * time.Minute
)
var (
// Counters
ReadCount = ttl.NewGaugeVecWithTTL(
prometheus.GaugeOpts{
Name: "nydusd_read_count",
Help: "Total number read of a nydus fs, in Byte.",
},
[]string{imageRefLabel},
defaultTTL,
)
OpenFdCount = ttl.NewGaugeVecWithTTL(
prometheus.GaugeOpts{
Name: "nydusd_open_fd_count",
Help: "Number of current open files.",
},
[]string{imageRefLabel},
defaultTTL,
)
OpenFdMaxCount = ttl.NewGaugeVecWithTTL(
prometheus.GaugeOpts{
Name: "nydusd_open_fd_max_count",
Help: "Number of max open files.",
},
[]string{imageRefLabel},
defaultTTL,
)
LastFopTimestamp = ttl.NewGaugeVecWithTTL(
prometheus.GaugeOpts{
Name: "nydusd_last_fop_timestamp",
Help: "Timestamp of last file operation.",
},
[]string{imageRefLabel},
defaultTTL,
)
)
// Fs metric histograms
var FsMetricHists = []*FsMetricHistogram{
{
Desc: prometheus.NewDesc(
"nydusd_block_count_read_hist",
"Read size histogram, in 1KB, 4KB, 16KB, 64KB, 128KB, 512K, 1024K.",
[]string{imageRefLabel},
prometheus.Labels{},
),
Buckets: []uint64{1, 4, 16, 64, 128, 512, 1024, 2048},
GetCounters: func(m *model.FsMetric) []uint64 {
return m.BlockCountRead
},
},
{
Desc: prometheus.NewDesc(
"nydusd_fop_hit_hist",
"File operations histogram",
[]string{imageRefLabel},
prometheus.Labels{},
),
Buckets: MakeFopBuckets(),
GetCounters: func(m *model.FsMetric) []uint64 {
return m.FopHits
},
},
{
Desc: prometheus.NewDesc(
"nydusd_fop_errors_hist",
"File operations' error histogram",
[]string{imageRefLabel},
prometheus.Labels{},
),
Buckets: MakeFopBuckets(),
GetCounters: func(m *model.FsMetric) []uint64 {
return m.FopErrors
},
},
{
Desc: prometheus.NewDesc(
"nydusd_read_latency_hist",
"Read latency histogram, in microseconds",
[]string{imageRefLabel},
prometheus.Labels{},
),
Buckets: []uint64{1, 20, 50, 100, 500, 1000, 2000, 4000},
GetCounters: func(m *model.FsMetric) []uint64 {
return m.ReadLatencyDist
},
},
}
|
package pkg
import (
"encoding/json"
"github.com/fabric-lab/hyperledger-fabric-manager/server/pkg/entity"
"github.com/fabric-lab/hyperledger-fabric-manager/server/pkg/store"
"github.com/fabric-lab/hyperledger-fabric-manager/server/pkg/util"
"github.com/gin-gonic/gin"
"strings"
)
func GetEntitys(c *gin.Context) {
en := c.Param("entity")
ens := strings.Split(en, ",")
entitys := make(map[string][]interface{})
for _, v := range ens {
records, err := store.Bt.View(v)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
entitys[v] = records
var newRecords []interface{}
for _, r := range records {
e := entity.MapToEntity(r, v)
if g, ok := e.(entity.Get); ok {
g.GetEntity()
newRecords = append(newRecords, g)
}
}
if newRecords != nil {
entitys[v] = newRecords
}
}
c.JSON(200, entitys)
}
func GetEntity(c *gin.Context) {
en := c.Param("entity")
id := c.Param("id")
record, err := store.Bt.ViewByKey(en, id)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
c.JSON(200, record)
return
}
func GetNodeState(c *gin.Context) {
en := c.Param("entity")
id := c.Param("id")
cache := util.Caches.Get(en + "." + id)
if cache != nil {
c.JSON(200, gin.H{"state": "running"})
return
}
c.JSON(200, gin.H{"state": "stop"})
return
}
func CreateEntity(c *gin.Context) {
en := c.Param("entity")
id := c.Param("id")
var i interface{}
c.BindJSON(&i)
e := entity.MapToEntity(i, en)
if a, ok := e.(entity.Action); ok {
a.Create()
}
b, _ := json.Marshal(e)
err := store.Bt.AddJson(en, id, b)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
c.JSON(200, gin.H{})
}
func DelEntity(c *gin.Context) {
en := c.Param("entity")
id := c.Param("id")
err := store.Bt.DelByKey(en, id)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
records, err := store.Bt.View(en)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
c.JSON(200, records)
}
func ExecCMD(c *gin.Context) {
en := c.Param("entity")
id := c.Param("id")
var cmd map[string]string
c.BindJSON(&cmd)
e, err := store.Bt.ViewByKey(en, id)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
e = entity.MapToEntity(e, en)
var res string
if a, ok := e.(entity.CMD); ok {
res = a.Exec(cmd)
}
cache := util.Caches.Get(en + "." + id)
state := "stop"
if cache != nil {
state = "running"
}
c.JSON(200, gin.H{"msg": res, "state": state})
}
func GetCert(c *gin.Context) {
id := c.Param("id")
caName := c.Param("ca")
e, err := store.Bt.ViewByKey("organizations", id)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
e = entity.MapToEntity(e, "organizations")
if o, ok := e.(*entity.Organization); ok {
ca, key, err := entity.GetCA(caName, *o)
if err != nil {
c.JSON(500, gin.H{"Error": err.Error()})
return
}
ca.SignCert.Raw = []byte{}
ca.SignCert.RawTBSCertificate = []byte{}
ca.SignCert.RawSubjectPublicKeyInfo = []byte{}
ca.SignCert.RawSubject = []byte{}
ca.SignCert.RawIssuer = []byte{}
ca.SignCert.PublicKey = []byte{}
cert, _ := json.Marshal(ca.SignCert)
c.JSON(200, gin.H{
"ca": string(cert),
"key": key,
})
} else {
if err != nil {
c.JSON(500, gin.H{"Error": "error"})
return
}
}
}
|
package nougat
import "net/url"
// Base sets the rawURL.
// If you intend to extend the url with Path, baseUrl should be specified
// with a trailing slash.
func (r *Nougat) Base(rawURL string) *Nougat {
r.rawURL = rawURL
return r
}
// Path extends the rawURL with the given path by resolving the reference
// to an absolute URL.
// If parsing errors occur, the rawURL is left unmodified.
func (r *Nougat) Path(path string) *Nougat {
baseURL, baseErr := url.Parse(r.rawURL)
pathURL, pathErr := url.Parse(path)
if baseErr == nil && pathErr == nil {
r.rawURL = baseURL.ResolveReference(pathURL).String()
return r
}
return r
}
// QueryStruct appends the queryStruct to the Nougat's queryStructs.
// The value pointed to by each queryStruct will be encoded as url query
// parameters on new requests (see Request()).
// The queryStruct argument should be a pointer to a url tagged struct.
// See https://godoc.org/github.com/google/go-querystring/query for details.
func (r *Nougat) QueryStruct(queryStruct interface{}) *Nougat {
if queryStruct != nil {
r.queryStructs = append(r.queryStructs, queryStruct)
}
return r
}
|
package beater
import (
"fmt"
"os/exec"
"strconv"
"strings"
"time"
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/publisher"
"github.com/kussj/cassandrabeat/config"
)
type Cassandrabeat struct {
done chan struct{}
config config.Config
client publisher.Client
table []string
}
// Creates beater
func New(b *beat.Beat, cfg *common.Config) (beat.Beater, error) {
config := config.DefaultConfig
if err := cfg.Unpack(&config); err != nil {
return nil, fmt.Errorf("Error reading config file: %v", err)
}
bt := &Cassandrabeat{
done: make(chan struct{}),
config: config,
}
return bt, nil
}
func (bt *Cassandrabeat) Run(b *beat.Beat) error {
logp.Info("MOD - cassandrabeat is running! Hit CTRL-C to stop it.")
bt.client = b.Publisher.Connect()
bt.table = bt.config.Table[:]
ticker := time.NewTicker(bt.config.Period)
for {
select {
case <-bt.done:
return nil
case <-ticker.C:
}
for _, table := range bt.table {
logp.Info("Getting latency for table: %s", table)
bt.getLatency(table)
}
logp.Info("Event sent")
}
}
func (bt *Cassandrabeat) Stop() {
bt.client.Close()
close(bt.done)
}
func (bt *Cassandrabeat) getLatency(table string) {
cmdName := "awkscript.sh"
cmdArgs := []string{table}
cmdOut := exec.Command(cmdName, cmdArgs...).Output
output, err := cmdOut()
if err != nil {
fmt.Println(err)
time.Sleep(5000 * time.Millisecond)
return
}
latency := strings.Split(string(output), "\n")
/*
fmt.Printf("Results back from nodetool cfstats for %s\n", table)
for i := range latency {
fmt.Println(i, latency[i])
}
*/
if len(latency) < 8 {
fmt.Printf("Not enough values (%v) returned from nodetool script. Bailing.\n", len(latency))
return
}
var readLatency, writeLatency float64
var pendingFlushes, sstableCount, spaceUsedLive, spaceUsedTotal int64
var spaceUsedSnapshotTotal, numberOfKeys int64
if strings.Compare(latency[0], "NAN") == 0 {
pendingFlushes = 0
} else {
pendingFlushes, _ = strconv.ParseInt(latency[0], 10, 64)
}
if strings.Compare(latency[1], "NAN") == 0 {
sstableCount, _ = strconv.ParseInt(latency[1], 10, 64)
} else {
sstableCount = 0
}
if strings.Compare(latency[2], "NAN") == 0 {
spaceUsedLive, _ = strconv.ParseInt(latency[2], 10, 64)
} else {
spaceUsedLive = 0
}
if strings.Compare(latency[3], "NAN") == 0 {
spaceUsedTotal, _ = strconv.ParseInt(latency[3], 10, 64)
} else {
spaceUsedTotal = 0
}
if strings.Compare(latency[4], "NAN") == 0 {
spaceUsedSnapshotTotal, _ = strconv.ParseInt(latency[4], 10, 64)
} else {
spaceUsedSnapshotTotal = 0
}
if strings.Compare(latency[5], "NAN") == 0 {
numberOfKeys, _ = strconv.ParseInt(latency[5], 10, 64)
} else {
numberOfKeys = 0
}
if strings.Compare(latency[6], "NaN") == 0 {
readLatency = 0.0
} else {
readLatency, _ = strconv.ParseFloat(latency[6], 64)
}
if strings.Compare(latency[7], "NaN") == 0 {
writeLatency = 0.0
} else {
writeLatency, _ = strconv.ParseFloat(latency[7], 64)
}
event := common.MapStr{
"@timestamp": common.Time(time.Now()),
"type": "stats",
"count": 1,
"table_name": table,
"write_latency": writeLatency,
"read_latency": readLatency,
"pending_flushes": pendingFlushes,
"sstable_count": sstableCount,
"space_used_live": spaceUsedLive,
"space_used_total": spaceUsedTotal,
"space_used_snapshot_total": spaceUsedSnapshotTotal,
"number_of_keys": numberOfKeys,
}
bt.client.PublishEvent(event)
}
|
/*
* Package targets
* Target GEN/SMD (Sega Genesis / Megadrive)
*
* Part of XPMC.
* Contains data/functions specific to the GEN output target
*
* /Mic, 2012-2015
*/
package targets
import (
"os"
"time"
"../specs"
"../utils"
"../effects"
"../timing"
)
/* Sega Genesis / Megadrive *
****************************/
func (t *TargetGen) Init() {
t.Target.Init()
t.Target.SetOutputSyntax(SYNTAX_GAS_68K)
utils.DefineSymbol("GEN", 1)
utils.DefineSymbol("SMD", 1)
specs.SetChannelSpecs(&t.ChannelSpecs, 0, 0, specs.SpecsSN76489) // A..D
specs.SetChannelSpecs(&t.ChannelSpecs, 0, 4, specs.SpecsYM2612) // E..J
t.ID = TARGET_SMD
t.MaxTempo = 300
t.MinVolume = 0
t.SupportsPanning = 1
t.MaxLoopDepth = 2
t.SupportsPal = true
t.AdsrLen = 5
t.AdsrMax = 63
t.MinWavLength = 1
t.MaxWavLength = 2097152 // 2MB
t.MinWavSample = 0
t.MaxWavSample = 255
t.MachineSpeed = 3579545
}
/* Output data suitable for the SEGA Genesis (Megadrive) playback library
*/
func (t *TargetGen) Output(outputFormat int) {
utils.DEBUG("TargetGen.Output")
fileEnding := ".asm"
outputVgm := false
if outputFormat == OUTPUT_VGM {
fileEnding = ".vgm"
outputVgm = true
} else if outputFormat == OUTPUT_VGZ {
fileEnding = ".vgz"
outputVgm = true
}
if outputVgm {
// ToDo: output VGM/VGZ
return
}
outFile, err := os.Create(t.CompilerItf.GetShortFileName() + fileEnding)
if err != nil {
utils.ERROR("Unable to open file: " + t.CompilerItf.GetShortFileName() + fileEnding)
}
now := time.Now()
outFile.WriteString("; Written by XPMC on " + now.Format(time.RFC1123) + "\n\n")
// Convert ADSR envelopes to the format used by the YM2612
envelopes := make([][]interface{}, len(effects.ADSRs.GetKeys()))
for i, key := range effects.ADSRs.GetKeys() {
envelopes[i] = packADSR(effects.ADSRs.GetData(key).MainPart, specs.CHIP_YM2612)
effects.ADSRs.GetData(key).MainPart = make([]interface{}, len(envelopes[i]))
copy(effects.ADSRs.GetData(key).MainPart, envelopes[i])
}
// Convert modulation parameters to the format used by the YM2612
mods := make([][]interface{}, len(effects.MODs.GetKeys()))
for i, key := range effects.MODs.GetKeys() {
mods[i] = packMOD(effects.MODs.GetData(key).MainPart, specs.CHIP_YM2612)
effects.MODs.GetData(key).MainPart = make([]interface{}, len(mods[i]))
copy(effects.MODs.GetData(key).MainPart, mods[i])
}
/* ToDo: translate
for i = 1 to length(feedbackMacros[1]) do
feedbackMacros[ASSOC_DATA][i][LIST_MAIN] = (feedbackMacros[ASSOC_DATA][i][LIST_MAIN])*8
feedbackMacros[ASSOC_DATA][i][LIST_LOOP] = (feedbackMacros[ASSOC_DATA][i][LIST_LOOP])*8
end for*/
/*numSongs = 0
for i = 1 to length(songs) do
if sequence(songs[i]) then
numSongs += 1
end if
end for*/
if timing.UpdateFreq == 50 {
outFile.WriteString(".equ XPMP_50_HZ, 1\n")
t.MachineSpeed = 3546893
} else {
t.MachineSpeed = 3579545
}
tableSize := t.outputStandardEffects(outFile)
tableSize += t.outputTable(outFile, "xpmp_FB_mac", effects.FeedbackMacros, true, 1, 0x80)
tableSize += t.outputTable(outFile, "xpmp_ADSR", effects.ADSRs, false, 1, 0)
tableSize += t.outputTable(outFile, "xpmp_MOD", effects.MODs, false, 1, 0)
/*tableSize += output_m68kas_table("xpmp_VS_mac", volumeSlides, 1, 1, 0)
tableSize += output_m68kas_table("xpmp_FB_mac", feedbackMacros,1, 1, 0)
tableSize += output_m68kas_table("xpmp_ADSR", adsrs, 0, 1, 0)
tableSize += output_m68kas_table("xpmp_MOD", mods, 0, 1, 0)*/
cbSize := 0
utils.INFO("Size of effect tables: %d bytes\n", tableSize)
patSize := t.outputPatterns(outFile)
utils.INFO("Size of patterns table: %d bytes\n", patSize)
songSize := t.outputChannelData(outFile)
utils.INFO("Total size of song(s): %d bytes\n", songSize + patSize + tableSize + cbSize)
outFile.Close()
}
|
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cipher_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/apid/apid-core/cipher"
"testing"
)
func TestEvents(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Cipher Suite")
}
var _ = Describe("APID Cipher", func() {
Context("AES", func() {
Context("AES/ECB/PKCS7Padding Encrypt/Decrypt", func() {
type testData struct {
key []byte
plaintext []byte
ciphertext []byte
}
data := []testData{
{
// 128-bit
[]byte{2, 122, 212, 83, 150, 164, 180, 4, 148, 242, 65, 189, 3, 188, 76, 247},
[]byte("aUWQKgAwmaR0p2kY"),
// 32-byte after padding
[]byte{218, 53, 247, 87, 119, 80, 231, 16, 125, 11, 214, 101, 246, 202, 178, 163, 202, 102,
146, 245, 79, 215, 74, 228, 17, 83, 213, 134, 105, 203, 31, 14},
},
{
// 192-bit
[]byte{2, 122, 212, 83, 150, 164, 180, 4, 148, 242, 65, 189, 3, 188, 76, 247,
2, 122, 212, 83, 150, 164, 180, 4},
[]byte("a"),
// 16-byte after padding
[]byte{225, 2, 177, 65, 152, 88, 116, 43, 71, 215, 84, 240, 221, 175, 11, 131},
},
{
// 256-bit
[]byte{2, 122, 212, 83, 150, 164, 180, 4, 148, 242, 65, 189, 3, 188, 76, 247,
2, 122, 212, 83, 150, 164, 180, 4, 148, 242, 65, 189, 3, 188, 76, 247},
[]byte(""),
// 16-byte after padding
[]byte{88, 192, 164, 235, 153, 89, 14, 134, 224, 122, 31, 36, 238, 117, 121, 117},
},
}
It("Encrypt", func() {
for i := 0; i < len(data); i++ {
c, err := cipher.CreateAesCipher(data[i].key)
Expect(err).Should(Succeed())
Expect(c.Encrypt(data[i].plaintext, cipher.ModeEcb, cipher.PaddingPKCS5)).Should(Equal(data[i].ciphertext))
Expect(c.Encrypt(data[i].plaintext, cipher.ModeEcb, cipher.PaddingPKCS7)).Should(Equal(data[i].ciphertext))
}
})
It("Decrypt", func() {
for i := 0; i < len(data); i++ {
c, err := cipher.CreateAesCipher(data[i].key)
Expect(err).Should(Succeed())
Expect(c.Encrypt(data[i].plaintext, cipher.ModeEcb, cipher.PaddingPKCS5)).Should(Equal(data[i].ciphertext))
Expect(c.Encrypt(data[i].plaintext, cipher.ModeEcb, cipher.PaddingPKCS7)).Should(Equal(data[i].ciphertext))
}
})
})
It("Invalid Parameters", func() {
_, err := cipher.CreateAesCipher(make([]byte, 15))
Expect(err).ToNot(Succeed())
_, err = cipher.CreateAesCipher(nil)
Expect(err).ToNot(Succeed())
key := make([]byte, 16)
c, err := cipher.CreateAesCipher(key)
Expect(err).Should(Succeed())
_, err = c.Encrypt([]byte{1, 2, 3}, cipher.Mode("unsupported"), cipher.PaddingPKCS7)
Expect(err).ToNot(Succeed())
_, err = c.Encrypt([]byte{1, 2, 3}, cipher.ModeEcb, cipher.Padding("unsupported"))
Expect(err).ToNot(Succeed())
_, err = c.Decrypt([]byte{88, 192, 164, 235, 153, 89, 14, 134, 224, 122, 31, 36, 238, 117, 121, 117},
cipher.Mode("unsupported"), cipher.PaddingPKCS7)
Expect(err).ToNot(Succeed())
_, err = c.Decrypt([]byte{88, 192, 164, 235, 153, 89, 14, 134, 224, 122, 31, 36, 238, 117, 121, 117},
cipher.ModeEcb, cipher.Padding("unsupported"))
Expect(err).ToNot(Succeed())
})
})
})
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1beta1"
"github.com/oam-dev/kubevela/pkg/oam/util"
)
var _ = Describe("Test application of the specified definition version", func() {
ctx := context.Background()
var namespace string
var ns corev1.Namespace
BeforeEach(func() {
namespace = randomNamespaceName("config-e2e-test")
ns = corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
Eventually(func() error {
return k8sClient.Create(ctx, &ns)
}, time.Second*3, time.Microsecond*300).Should(SatisfyAny(BeNil(), &util.AlreadyExistMatcher{}))
})
It("Test the workflow", func() {
var app v1beta1.Application
Expect(yaml.Unmarshal([]byte(manageConfigApplication), &app)).Should(BeNil())
app.Namespace = namespace
Expect(k8sClient.Create(context.TODO(), &app)).Should(BeNil())
Eventually(
func() common.ApplicationPhase {
var getAPP v1beta1.Application
if err := k8sClient.Get(ctx, client.ObjectKey{Namespace: namespace, Name: app.Name}, &getAPP); err != nil {
klog.Errorf("fail to query the app %s", err.Error())
}
klog.Infof("the application status is %s (%+v)", getAPP.Status.Phase, getAPP.Status.Workflow)
return getAPP.Status.Phase
},
time.Second*30, time.Second*2).Should(Equal(common.ApplicationRunning))
})
AfterEach(func() {
By("Clean up resources after a test")
k8sClient.DeleteAllOf(ctx, &v1beta1.Application{}, client.InNamespace(namespace))
})
})
var manageConfigApplication = `
kind: Application
apiVersion: core.oam.dev/v1beta1
metadata:
name: test-config
namespace: "config-e2e-test"
spec:
components: []
workflow:
steps:
- name: write-config
type: create-config
properties:
name: test
config:
key1: value1
key2: 2
key3: true
key4:
key5: value5
- name: read-config
type: read-config
properties:
name: test
outputs:
- fromKey: config
name: read-config
- name: delete-config
type: delete-config
properties:
name: test
`
|
package main
import (
"fmt"
"math/bits"
)
func main() {
a := 2
b := 7
total := 0
for i := a; i <= b; i++ {
total += bits.OnesCount16(i)
}
fmt.Println(total)
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
time := "12:23PM"
end := strings.Index(time, ":")
typ := time[end+3 : len(time)]
fmt.Println(typ)
change := string(time[0:end])
fmt.Println(change)
num, _ := strconv.Atoi(change)
changeType := false
if num == 1 {
num = 12
} else if num == 12 {
changeType = true
num--
} else {
num--
}
finalStr := ""
numStr := strconv.Itoa(num)
finalStr += numStr
if changeType == false {
for i := end; i < len(time); i++ {
finalStr += string(time[i])
}
} else {
for i := end; i < len(time)-2; i++ {
finalStr += string(time[i])
}
if typ == "PM" {
finalStr += "AM"
} else {
finalStr += "PM"
}
}
fmt.Println(finalStr)
}
|
package main
import "runtime"
var ninserts int64
var nupserts int64
var ndeletes int64
var xdeletes int64
var numentries int64
var totalwrites int64
var totalreads int64
var delmod = int64(0)
var updmod = int64(1)
var conflicts = int64(0)
var rollbacks = int64(0)
var numcpus = runtime.GOMAXPROCS(-1)
|
package mobile
import (
"github.com/golang/protobuf/proto"
"github.com/textileio/go-textile/core"
"github.com/textileio/go-textile/pb"
)
// Feed calls core Feed
func (m *Mobile) Feed(req []byte) ([]byte, error) {
if !m.node.Started() {
return nil, core.ErrStopped
}
mreq := new(pb.FeedRequest)
if err := proto.Unmarshal(req, mreq); err != nil {
return nil, err
}
items, err := m.node.Feed(mreq)
if err != nil {
return nil, err
}
return proto.Marshal(items)
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println(wordBreak("carcars", []string{
"car", "ca", "rs",
}))
fmt.Println(wordBreak("cars", []string{
"car", "ca", "rs",
}))
}
func wordBreak(s string, wordDict []string) bool {
mm := make(map[string]bool) // 为了快速判断字符床是否存在
for _, w := range wordDict {
mm[w] = true
}
dp := make([]bool, len(s)+1) // dp[i] 表示字符串 s 的前 i 个字符能否被拆分成 wordDict 中的单词
dp[0] = true // 空字符串默认为 true
for i := 1; i <= len(s); i++ {
for j := 0; j < i; j++ {
if dp[j] && mm[s[j:i]] { // 如果前 j 个字符可以拆分,并且从 j 到 i 的子串在 wordDict 中出现过
dp[i] = true // 那么前 i 个字符也可以拆分
break
}
}
}
return dp[len(s)]
}
func wordBreak2(s string, wordDict []string) bool {
mm := make(map[string]bool) // 为了快速判断字符床是否存在
for _, w := range wordDict {
mm[w] = true
}
cache := make(map[string]bool) // 缓存一下,避免重复计算
var dfs func(s string) bool
dfs = func(s string) (ret bool) {
if mm[s] {
return true
}
if v, ok := cache[s]; ok {
return v
}
defer func() {
cache[s] = ret
}()
var ans bool //计算结果,只要本轮有一个 true 那就可以结束了
for i := 1; i < len(s); i++ {
if mm[s[:i]] && dfs(s[i:]) { // 看当前字符串在不在,看看后续的字符串在不在
ans = true // 在的话就停止遍历了
break
}
}
return ans
}
return dfs(s)
}
|
package core
import (
"crypto/sha256"
"encoding/hex"
"strconv"
"time"
)
type Block struct {
Index int64
Timestamp int64
PrevBlockHash string
Data string
Hash string
}
//区块hash生成
func calculateHash(b Block) string {
blockData := strconv.Itoa(int(b.Index)) + strconv.Itoa(int(b.Timestamp)) + b.PrevBlockHash + b.Data
hashInBytes := sha256.Sum256([]byte(blockData))
return hex.EncodeToString(hashInBytes[:])
}
/**
创建新区块
*/
func GenerateNewBlock(preBlock Block, data string) Block {
newBlock := Block{}
newBlock.Index = preBlock.Index + 1
newBlock.Timestamp = time.Now().Unix()
newBlock.PrevBlockHash = preBlock.Hash
newBlock.Data = data
newBlock.Hash = calculateHash(newBlock)
return newBlock
}
//初始化区块
func GenerateGenesisBlock() Block {
preBlock := Block{}
preBlock.Index = -1
preBlock.Hash = ""
return GenerateNewBlock(preBlock, "起始块")
}
|
package main
import (
"fmt"
"runtime"
"sync"
"testing"
"time"
)
func TestGetGomaxprocs(t *testing.T) {
group := sync.WaitGroup{}
for i := 0; i < 100; i++ {
group.Add(1)
go func() {
time.Sleep(3 * time.Second)
group.Done()
}()
}
totalCpu := runtime.NumCPU()
fmt.Println("Total CPU ", totalCpu)
totalThread := runtime.GOMAXPROCS(-1)
fmt.Println("Total Thread ", totalThread)
totalGoRoutine := runtime.NumGoroutine()
fmt.Println("Total Go Routine", totalGoRoutine)
group.Wait()
}
|
package main
func main() {
/*
fmt.Println("1 + 1 =", 1+1)
var name string = "golang" // değişken adı küçük harfle başlarsa dışarıdan erişilemez olur
fmt.Println(name)
var Version string = "1.2" // değişken adı büyük harfle başlarsa public olur
fmt.Println(Version)
*/
/*
var message string
message = "Merhaba Merve"
//var message string = "Merhaba"
//var message = "Merhaba Merve"
fmt.Println(message)
*/
/*
var a, b, c int = 3, 4, 5
fmt.Println(a)
fmt.Println(b)
fmt.Println(c)
*/
//tanımladıgımız degiskenleri kullanmazsak hata alıyoruz, yorum satırı yapabiliriz eğer kullanmayacaksak.
/*
var k, o string = "abc" , "xyz"
var p = 42
u :=55 //herhangi bir tip belirtmeden bu şekilde tanımlama da yapabiliriz.
v, n := "abc",true
message := "Merhaba Go"
k, l, m := 4, "You", false
*/
/*
a := "merve" // fakat function içindeyken bunu bu şekilde kullanabiliriz, global alanda bunu bu şekilde tanımlayamayız izin vermiyor go.
c := 'M' // char tipinde , karakter. Bunun çıktısı ASCII kodunu verecektir
fmt.Println(a,b,c)
*/
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-07-30 16:58
* Description:
*****************************************************************/
package xthrift
import (
"github.com/apache/thrift/lib/go/thrift"
)
const TProtocolMapKeyTypes = thrift.STRING | thrift.I08 | thrift.I16 | thrift.I32 | thrift.I64 | thrift.DOUBLE
type TProtocolDataType thrift.TType
var MESSAGE TProtocolDataType = thrift.UTF16 + 1
func (pt TProtocolDataType) String() string {
if pt == MESSAGE {
return "message"
}
return thrift.TType(pt).String()
}
type ProtocolDataToken interface {
GetType() TProtocolDataType
Write(out thrift.TTransport) error
Read(in thrift.TTransport) error
}
|
package news_api
import (
"config"
"encoding/json"
"models"
"net/http"
)
func FindAll(response http.ResponseWriter, request *http.Request) {
db, err := config.GetDB()
if err != nil {
respondWithError(response, http.StatusBadRequest, err.Error())
} else {
newsModel := models.NewsModel{
Db: db,
}
newsnew, err2 := newsModel.FindAll()
if err2 != nil {
respondWithError(response, http.StatusBadRequest, err2.Error())
} else {
respondWithJson(response, http.StatusOK, newsnew)
}
}
}
// func Search(response http.ResponseWriter, request *http.Request) {
// vars := mux.Vars(request)
// keyword := vars["keyword"]
// db, err := config.GetDB()
// if err != nil {
// respondWithError(response, http.StatusBadRequest, err.Error())
// } else {
// newsModel := models.NewsModel{
// Db: db,
// }
// newsnew, err2 := newsModel.Search(keyword)
// if err2 != nil {
// respondWithError(response, http.StatusBadRequest, err2.Error())
// } else {
// respondWithJson(response, http.StatusOK, newsnew)
// }
// }
// }
func respondWithError(w http.ResponseWriter, code int, msg string) {
respondWithJson(w, code, map[string]string{"error": msg})
}
func respondWithJson(w http.ResponseWriter, code int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
w.Write(response)
}
|
package hash
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"hash/crc32"
"io"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/longpath"
"github.com/pkg/errors"
)
// Password hashes the password with sha256 and returns the string
func Password(password string) (string, error) {
sha256Bytes := sha256.Sum256([]byte(password))
return hex.EncodeToString(sha256Bytes[:]), nil
}
// File creates the hash value of a file
func File(path string) (string, error) {
file, err := os.Open(path)
if err != nil {
return "", err
}
defer file.Close()
hash := sha256.New()
_, err = io.Copy(hash, file)
if err != nil {
return "", err
}
return hex.EncodeToString(hash.Sum(nil)), nil
}
// Directory creates the hash value of a directory
func Directory(path string) (string, error) {
hash := sha256.New()
// Stat dir / file
fileInfo, err := os.Stat(path)
if err != nil {
return "", err
}
// Hash file
if fileInfo.IsDir() == false {
size := strconv.FormatInt(fileInfo.Size(), 10)
mTime := strconv.FormatInt(fileInfo.ModTime().UnixNano(), 10)
io.WriteString(hash, path+";"+size+";"+mTime)
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
// Hash directory
err = filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
if err != nil {
// We ignore errors
return nil
}
size := strconv.FormatInt(info.Size(), 10)
mTime := strconv.FormatInt(info.ModTime().UnixNano(), 10)
io.WriteString(hash, path+";"+size+";"+mTime)
return nil
})
if err != nil {
return "", err
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
// DirectoryExcludes calculates a hash for a directory and excludes the submitted patterns
func DirectoryExcludes(srcPath string, excludePatterns []string, fast bool) (string, error) {
srcPath, err := filepath.Abs(srcPath)
if err != nil {
return "", err
}
hash := sha256.New()
// Fix the source path to work with long path names. This is a no-op
// on platforms other than Windows.
if runtime.GOOS == "windows" {
srcPath = longpath.AddPrefix(srcPath)
}
pm, err := fileutils.NewPatternMatcher(excludePatterns)
if err != nil {
return "", err
}
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
stat, err := os.Lstat(srcPath)
if err != nil {
return "", err
}
if !stat.IsDir() {
return "", errors.Errorf("Path %s is not a directory", srcPath)
}
include := "."
seen := make(map[string]bool)
walkRoot := filepath.Join(srcPath, include)
err = filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error {
if err != nil {
return errors.Errorf("Hash: Can't stat file %s to hash: %s", srcPath, err)
}
relFilePath, err := filepath.Rel(srcPath, filePath)
if err != nil {
// Error getting relative path OR we are looking
// at the source directory path. Skip in both situations.
return err
}
skip := false
// If "include" is an exact match for the current file
// then even if there's an "excludePatterns" pattern that
// matches it, don't skip it. IOW, assume an explicit 'include'
// is asking for that file no matter what - which is true
// for some files, like .dockerignore and Dockerfile (sometimes)
if relFilePath != "." {
skip, err = pm.Matches(relFilePath)
if err != nil {
return errors.Errorf("Error matching %s: %v", relFilePath, err)
}
}
if skip {
// If we want to skip this file and its a directory
// then we should first check to see if there's an
// excludes pattern (e.g. !dir/file) that starts with this
// dir. If so then we can't skip this dir.
// Its not a dir then so we can just return/skip.
if !f.IsDir() {
return nil
}
// No exceptions (!...) in patterns so just skip dir
if !pm.Exclusions() {
return filepath.SkipDir
}
dirSlash := relFilePath + string(filepath.Separator)
for _, pat := range pm.Patterns() {
if !pat.Exclusion() {
continue
}
if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) {
// found a match - so can't skip this dir
return nil
}
}
// No matching exclusion dir so just skip dir
return filepath.SkipDir
}
if seen[relFilePath] {
return nil
}
seen[relFilePath] = true
if f.IsDir() {
// Path is enough
io.WriteString(hash, filePath)
} else {
if fast {
io.WriteString(hash, filePath+";"+strconv.FormatInt(f.Size(), 10)+";"+strconv.FormatInt(f.ModTime().Unix(), 10))
} else {
// Check file change
checksum, err := hashFileCRC32(filePath, 0xedb88320)
if err != nil {
return nil
}
io.WriteString(hash, filePath+";"+checksum)
}
}
return nil
})
if err != nil {
return "", errors.Errorf("Error hashing %s: %v", srcPath, err)
}
return fmt.Sprintf("%x", hash.Sum(nil)), nil
}
// String hashes a given string
func String(s string) string {
hash := sha256.New()
io.WriteString(hash, s)
return fmt.Sprintf("%x", hash.Sum(nil))
}
func hashFileCRC32(filePath string, polynomial uint32) (string, error) {
//Initialize an empty return string now in case an error has to be returned
var returnCRC32String string
//Open the fhe file located at the given path and check for errors
file, err := os.Open(filePath)
if err != nil {
return returnCRC32String, err
}
//Tell the program to close the file when the function returns
defer file.Close()
//Create the table with the given polynomial
tablePolynomial := crc32.MakeTable(polynomial)
//Open a new hash interface to write the file to
hash := crc32.New(tablePolynomial)
//Copy the file in the interface
if _, err := io.Copy(hash, file); err != nil {
return returnCRC32String, err
}
//Generate the hash
hashInBytes := hash.Sum(nil)[:]
//Encode the hash to a string
returnCRC32String = hex.EncodeToString(hashInBytes)
//Return the output
return returnCRC32String, nil
}
|
/*
Copyright 2020 The SuperEdge Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"github.com/hashicorp/serf/serf"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
)
type Cache interface {
CacheList
CacheEventHandler
LocalAppInfoHandler
}
type CacheList interface {
GetNode(hostName string) *v1.Node
GetServices() []*v1.Service
GetEndpoints() []*v1.Endpoints
}
type CacheEventHandler interface {
NodeEventHandler() cache.ResourceEventHandler
ServiceEventHandler() cache.ResourceEventHandler
EndpointsEventHandler() cache.ResourceEventHandler
}
type LocalAppInfoHandler interface {
SetLocalAppInfo(map[types.NamespacedName][]serf.Member)
ClearLocalAppInfo()
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package membuf
const (
defaultPoolSize = 1024
defaultBlockSize = 1 << 20 // 1M
defaultLargeAllocThreshold = 1 << 16 // 64K
)
// Allocator is the abstract interface for allocating and freeing memory.
type Allocator interface {
Alloc(n int) []byte
Free([]byte)
}
type stdAllocator struct{}
func (stdAllocator) Alloc(n int) []byte {
return make([]byte, n)
}
func (stdAllocator) Free(_ []byte) {}
// Pool is like `sync.Pool`, which manages memory for all bytes buffers.
//
// NOTE: we don't used a `sync.Pool` because when will sync.Pool release is depending on the
// garbage collector which always release the memory so late. Use a fixed size chan to reuse
// can decrease the memory usage to 1/3 compare with sync.Pool.
type Pool struct {
allocator Allocator
blockSize int
blockCache chan []byte
largeAllocThreshold int
}
// Option configures a pool.
type Option func(p *Pool)
// WithPoolSize configures how many blocks cached by this pool.
func WithPoolSize(size int) Option {
return func(p *Pool) {
p.blockCache = make(chan []byte, size)
}
}
// WithBlockSize configures the size of each block.
func WithBlockSize(size int) Option {
return func(p *Pool) {
p.blockSize = size
}
}
// WithAllocator specifies the allocator used by pool to allocate and free memory.
func WithAllocator(allocator Allocator) Option {
return func(p *Pool) {
p.allocator = allocator
}
}
// WithLargeAllocThreshold configures the threshold for large allocation of a Buffer.
// If allocate size is larger than this threshold, bytes will be allocated directly
// by the make built-in function and won't be tracked by the pool.
func WithLargeAllocThreshold(threshold int) Option {
return func(p *Pool) {
p.largeAllocThreshold = threshold
}
}
// NewPool creates a new pool.
func NewPool(opts ...Option) *Pool {
p := &Pool{
allocator: stdAllocator{},
blockSize: defaultBlockSize,
blockCache: make(chan []byte, defaultPoolSize),
largeAllocThreshold: defaultLargeAllocThreshold,
}
for _, opt := range opts {
opt(p)
}
return p
}
func (p *Pool) acquire() []byte {
select {
case b := <-p.blockCache:
return b
default:
return p.allocator.Alloc(p.blockSize)
}
}
func (p *Pool) release(b []byte) {
select {
case p.blockCache <- b:
default:
p.allocator.Free(b)
}
}
// NewBuffer creates a new buffer in current pool.
func (p *Pool) NewBuffer() *Buffer {
return &Buffer{pool: p, bufs: make([][]byte, 0, 128), curBufIdx: -1}
}
// Destroy frees all buffers.
func (p *Pool) Destroy() {
close(p.blockCache)
for b := range p.blockCache {
p.allocator.Free(b)
}
}
// TotalSize is the total memory size of this Pool.
func (p *Pool) TotalSize() int64 {
return int64(len(p.blockCache) * p.blockSize)
}
// Buffer represents the reuse buffer.
type Buffer struct {
pool *Pool
bufs [][]byte
curBuf []byte
curIdx int
curBufIdx int
curBufLen int
}
// addBuf adds buffer to Buffer.
func (b *Buffer) addBuf() {
if b.curBufIdx < len(b.bufs)-1 {
b.curBufIdx++
b.curBuf = b.bufs[b.curBufIdx]
} else {
buf := b.pool.acquire()
b.bufs = append(b.bufs, buf)
b.curBuf = buf
b.curBufIdx = len(b.bufs) - 1
}
b.curBufLen = len(b.curBuf)
b.curIdx = 0
}
// Reset resets the buffer.
func (b *Buffer) Reset() {
if len(b.bufs) > 0 {
b.curBuf = b.bufs[0]
b.curBufLen = len(b.bufs[0])
b.curBufIdx = 0
b.curIdx = 0
}
}
// Destroy frees all buffers.
func (b *Buffer) Destroy() {
for _, buf := range b.bufs {
b.pool.release(buf)
}
b.bufs = nil
}
// TotalSize represents the total memory size of this Buffer.
func (b *Buffer) TotalSize() int64 {
return int64(len(b.bufs) * b.pool.blockSize)
}
// AllocBytes allocates bytes with the given length.
func (b *Buffer) AllocBytes(n int) []byte {
if n > b.pool.largeAllocThreshold {
return make([]byte, n)
}
if b.curIdx+n > b.curBufLen {
b.addBuf()
}
idx := b.curIdx
b.curIdx += n
return b.curBuf[idx:b.curIdx:b.curIdx]
}
// AddBytes adds the bytes into this Buffer.
func (b *Buffer) AddBytes(bytes []byte) []byte {
buf := b.AllocBytes(len(bytes))
copy(buf, bytes)
return buf
}
|
package main
import (
"fmt"
"github.com/kwo/bigmux"
"gopkg.in/yaml.v2"
"io/ioutil"
"log"
"os"
"path"
)
func main() {
if len(os.Args) != 2 {
fmt.Printf("Usage: %s <config-file>\n", path.Base(os.Args[0]))
os.Exit(1)
}
configFilename := os.Args[1]
data, errFile := ioutil.ReadFile(configFilename)
if errFile != nil {
fmt.Printf("Cannot read configuration file: %s\n", errFile.Error())
os.Exit(1)
}
cfg := &bigmux.BigMux{}
if err := yaml.Unmarshal(data, cfg); err != nil {
fmt.Printf("Cannot parse configuration file: %s\n", err.Error())
os.Exit(1)
}
if err := cfg.CheckConfiguration(true); err != nil {
fmt.Printf("Bad configuration file: %s\n", err.Error())
os.Exit(1)
}
if err := cfg.Start(); err != nil {
log.Printf("Error: %s", err.Error())
os.Exit(1)
}
}
|
/*
Package kinematics calculates forward and inverse kinematics for robotic arm
systems.
Forward kinematics takes joint angles and returns an XyzWxyz coordinate of the
end effector. Inverse kinematics takes an XyzWxyz coordinate and returns joint
angles that move the end effector to that coordinate.
ForwardKinematics (joint angles -> xyzwxyz)
InverseKinematics (xyzwxyz -> joint angles)
*/
package kinematics
import (
"errors"
"gonum.org/v1/gonum/mat"
"gonum.org/v1/gonum/optimize"
"math"
"math/rand"
)
// DhParameters stand for "Denavit-Hartenberg Parameters". These parameters
// define a robotic arm for input into forward or reverse kinematics.
type DhParameters struct {
ThetaOffsets [6]float64
AlphaValues [6]float64
AValues [6]float64
DValues [6]float64
}
// StepperTheta represents angles of the joint stepper motors.
type StepperTheta struct {
J1 float64
J2 float64
J3 float64
J4 float64
J5 float64
J6 float64
}
func (st *StepperTheta) toFloat() []float64 {
return []float64{st.J1, st.J2, st.J3, st.J4, st.J5, st.J6}
}
// XyzWxyz represents an Xyz Qw-Qx-Qy-Qz coordinate, where Qw-Qx-Qy-Qz are
// quaternion coordinates for the rotation of a given end effector.
type XyzWxyz struct {
X float64
Y float64
Z float64
Qx float64
Qy float64
Qz float64
Qw float64
}
// ForwardKinematics calculates the end effector XyzWxyz coordinates given
// joint angles and robotic arm parameters.
func ForwardKinematics(thetas StepperTheta, dhParameters DhParameters) XyzWxyz {
// First, setup variables. We use 4 variables - theta, alpha, a and d to calculate a matrix
// which is then multiplied to an accumulator matrix.
thetaArray := []float64{thetas.J1, thetas.J2, thetas.J3, thetas.J4, thetas.J5, thetas.J6}
var theta float64
var alpha float64
var a float64
var d float64
// Setup accumulator matrix - an identity matrix.
accumulatortMat := mat.NewDense(4, 4, []float64{1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1,
})
// Iterate through each joint and built a new
// matrix, multiplying it against the accumulator.
for jointIdx := 0; jointIdx < 6; jointIdx++ {
theta = thetaArray[jointIdx]
theta = theta + dhParameters.ThetaOffsets[jointIdx]
alpha = dhParameters.AlphaValues[jointIdx]
a = dhParameters.AValues[jointIdx]
d = dhParameters.DValues[jointIdx]
tMat := mat.NewDense(4, 4, []float64{
// First row
math.Cos(theta),
-math.Sin(theta) * math.Cos(alpha),
math.Sin(theta) * math.Sin(alpha),
a * math.Cos(theta),
// Second row
math.Sin(theta),
math.Cos(theta) * math.Cos(alpha),
-math.Cos(theta) * math.Sin(alpha),
a * math.Sin(theta),
// Third row
0,
math.Sin(alpha),
math.Cos(alpha),
d,
// Forth row
0,
0,
0,
1,
})
// Multiply tMat against accumulatortMat
x := mat.NewDense(4, 4, nil)
x.Mul(accumulatortMat, tMat)
accumulatortMat = x
}
// Now that we have the final accumulatorMatrix, lets figure out the euler angles.
var output XyzWxyz
output.X = accumulatortMat.At(0, 3)
output.Y = accumulatortMat.At(1, 3)
output.Z = accumulatortMat.At(2, 3)
output.Qw, output.Qx, output.Qy, output.Qz = matrixToQuaterian(accumulatortMat)
return output
}
// InverseKinematics calculates joint angles to achieve an XyzWxyz end effector
// position given the desired XyzWxyz coordinates and the robotic arm
// parameters.
func InverseKinematics(desiredEndEffector XyzWxyz, dhParameters DhParameters) (StepperTheta, error) {
thetasInit := StepperTheta{0, 0, 0, 0, 0, 0}
// Initialize an objective function for the optimization problem
objectiveFunction := func(s []float64) float64 {
stepperThetaTest := StepperTheta{s[0], s[1], s[2], s[3], s[4], s[5]}
currentEndEffector := ForwardKinematics(stepperThetaTest, dhParameters)
// Get XYZ offsets
xOffset := desiredEndEffector.X - currentEndEffector.X
yOffset := desiredEndEffector.Y - currentEndEffector.Y
zOffset := desiredEndEffector.Z - currentEndEffector.Z
// Get rotational offsets. Essentially, do this in Golang (from python): np.arccos(np.clip(2*(np.dot(target_quat, source_quat)**2) - 1, -1, 1))
dotOffset := (desiredEndEffector.Qw * currentEndEffector.Qw) + (desiredEndEffector.Qx * currentEndEffector.Qx) + (desiredEndEffector.Qy * currentEndEffector.Qy) + (desiredEndEffector.Qz * currentEndEffector.Qz)
dotOffset = (2*(dotOffset*dotOffset) - 1)
if dotOffset > 1 {
dotOffset = 1
}
rotationalOffset := math.Acos(dotOffset)
// Get the error vector
errorVector := ((xOffset * xOffset) + (yOffset * yOffset) + (zOffset * zOffset) + (rotationalOffset * rotationalOffset)) * 0.25
return errorVector
}
// Setup problem and method for solving
problem := optimize.Problem{Func: objectiveFunction}
// Solve
result, err := optimize.Minimize(problem, thetasInit.toFloat(), nil, nil)
if err != nil {
return StepperTheta{}, err
}
f := result.Location.F
// If the results aren't up to spec, queue up another theta seed and test again.
// We arbitrarily choose 10e-6 because that is small enough that the errors do not matter.
for i := 0; f > 0.000001; i++ {
// Get a random seed
randTheta := func() float64 {
return 360 * rand.Float64()
}
randomSeed := StepperTheta{randTheta(), randTheta(), randTheta(), randTheta(), randTheta(), randTheta()}
// Solve
result, err := optimize.Minimize(problem, randomSeed.toFloat(), nil, nil)
if err != nil {
return StepperTheta{}, err
}
f = result.Location.F
if i == 100 {
return StepperTheta{}, errors.New("Failed to iterate 100 times to succes")
}
}
r := result.Location.X
return StepperTheta{r[0], r[1], r[2], r[3], r[4], r[5]}, nil
}
// matrixToQuaterian converts a rotation matrix to a quaterian. This code has
// been tested in all cases vs the python implementation with scipy rotation
// and works properly.
func matrixToQuaterian(accumulatortMat *mat.Dense) (float64, float64, float64, float64) {
// http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
var qw float64
var qx float64
var qy float64
var qz float64
var tr float64
var s float64
tr = accumulatortMat.At(0, 0) + accumulatortMat.At(1, 1) + accumulatortMat.At(2, 2)
switch {
case tr > 0:
s = math.Sqrt(tr+1.0) * 2
qw = 0.25 * s
qx = (accumulatortMat.At(2, 1) - accumulatortMat.At(1, 2)) / s
qy = (accumulatortMat.At(0, 2) - accumulatortMat.At(2, 0)) / s
qz = (accumulatortMat.At(1, 0) - accumulatortMat.At(0, 1)) / s
case accumulatortMat.At(0, 0) > accumulatortMat.At(1, 1) && accumulatortMat.At(0, 0) > accumulatortMat.At(2, 2):
s = math.Sqrt(1.0+accumulatortMat.At(0, 0)-accumulatortMat.At(1, 1)-accumulatortMat.At(2, 2)) * 2
qw = (accumulatortMat.At(2, 1) - accumulatortMat.At(1, 2)) / s
qx = 0.25 * s
qy = (accumulatortMat.At(0, 1) + accumulatortMat.At(1, 0)) / s
qz = (accumulatortMat.At(0, 2) + accumulatortMat.At(2, 0)) / s
case accumulatortMat.At(1, 1) > accumulatortMat.At(2, 2):
s = math.Sqrt(1.0+accumulatortMat.At(1, 1)-accumulatortMat.At(0, 0)-accumulatortMat.At(2, 2)) * 2
qw = (accumulatortMat.At(0, 2) - accumulatortMat.At(2, 0)) / s
qx = (accumulatortMat.At(0, 1) + accumulatortMat.At(1, 0)) / s
qy = 0.25 * s
qz = (accumulatortMat.At(2, 1) + accumulatortMat.At(1, 2)) / s
default:
s = math.Sqrt(1.0+accumulatortMat.At(2, 2)-accumulatortMat.At(0, 0)-accumulatortMat.At(1, 1)) * 2
qw = (accumulatortMat.At(0, 1) - accumulatortMat.At(1, 0))
qx = (accumulatortMat.At(0, 2) + accumulatortMat.At(2, 0)) / s
qy = (accumulatortMat.At(2, 1) + accumulatortMat.At(1, 2)) / s
qz = 0.25 * s
}
return qw, qx, qy, qz
}
|
package main
import (
"fmt"
"math/rand"
)
//switch语句中fallthrough穿透,没有break语句跳出,默认都是跳出
func main() {
a := 6
//有判断条件
switch a {
case 0:
fmt.Println("值为0")
fallthrough
case 1:
fmt.Println("值为1")
fallthrough
case 2:
fmt.Println("值为2")
case 3, 4, 5, 6, 7, 8:
fmt.Println("值在3, 4, 5, 6, 7, 8之间")
default:
fmt.Println("值未知")
}
//无条件判断
switch {
case a > 0 && a < 5:
fmt.Println("a > 0 && a < 5")
case a >= 5 && a < 10:
fmt.Println("a >= 5 && a < 10")
default:
fmt.Println("default")
}
//语句块判断 后面要加分号
switch a = 3 * a;{
case a > 0 && a < 5:
fmt.Println("a > 0 && a < 5")
case a >= 5 && a < 10:
fmt.Println("a >= 5 && a < 10")
default:
fmt.Println("a >= 10")
}
//判断有没有猜对随机数
n := rand.Intn(100)
for{
var input int
fmt.Scanf("%d", &input)
flag := false
switch {
case input == n:
flag = true
fmt.Println("you are right!!!!!")
case input > n:
fmt.Println("bigger")
case input < n:
fmt.Println("less")
}
if flag {
break
}
}
}
|
package main
import (
"context"
"errors"
"fmt"
"io"
"log"
"os"
"github.com/akito0107/xsqlparser"
"github.com/akito0107/xsqlparser/dialect"
"github.com/urfave/cli"
"github.com/xo/dburl"
"github.com/akito0107/xmigrate"
)
func main() {
app := cli.NewApp()
app.Name = "pginverse"
app.Usage = "postgres db migration utility"
app.UsageText = "pginverse [db url] [OPTIONS]"
app.Flags = []cli.Flag{
cli.StringFlag{Name: "in,i", Value: "stdin", Usage: "input file name (default = stdin)"},
cli.StringFlag{Name: "out,o", Value: "stdout", Usage: "output file name (default = stdout)"},
}
app.Action = func(c *cli.Context) error {
dbsrc := c.Args().Get(0)
if dbsrc == "" {
return errors.New("db url is required")
}
u, err := dburl.Parse(dbsrc)
if err != nil {
return err
}
return diffAction(c, u)
}
if err := app.Run(os.Args); err != nil {
log.Fatalf("%+v", err)
}
}
func diffAction(c *cli.Context, u *dburl.URL) error {
ctx := context.Background()
insrc := c.GlobalString("in")
outsrc := c.GlobalString("out")
var in io.Reader
if insrc == "stdin" {
in = os.Stdin
} else {
f, err := os.Open(insrc)
if err != nil {
return err
}
defer f.Close()
in = f
}
var out io.Writer
if outsrc == "stdout" {
out = os.Stdout
} else {
f, err := os.Open(outsrc)
if err != nil {
return err
}
defer f.Close()
out = f
}
parser, err := xsqlparser.NewParser(in, &dialect.PostgresqlDialect{})
if err != nil {
return err
}
stmts, err := parser.ParseSQL()
if err != nil {
return err
}
diffs, err := xmigrate.DSLToDiff(stmts)
if err != nil {
return err
}
dumper := xmigrate.NewPGDumpFromURL(u)
current, err := dumper.Dump(ctx)
if err != nil {
return err
}
for _, d := range diffs {
inv, err := xmigrate.Inverse(d, current)
if err != nil {
return err
}
fmt.Fprintf(out, "%s\n", inv.Spec.ToSQLString())
}
return nil
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
)
type iPInfo struct {
Data []string
}
func main() {
if len(os.Args) != 2 {
fmt.Println("Need a file name")
return
}
inputFile, inputError := os.Open(os.Args[1]) //变量指向os.Open打开的文件时生成的文件句柄
if inputError != nil {
fmt.Printf("An error occurred on opening the inputfile\n")
return
}
defer inputFile.Close()
inputReader := bufio.NewReader(inputFile)
lineCounter := 0
for {
inputString, readerError := inputReader.ReadString('\n')
//inputString, readerError := inputReader.ReadBytes('\n')
if readerError == io.EOF {
return
}
lineCounter++
fmt.Printf("%d : %s\r\n", lineCounter, trans(strings.TrimSpace(inputString)))
}
}
func trans(line string) string {
reg, _ := regexp.Compile("\\d+\\.\\d+\\.\\d+\\.\\d+")
result := reg.FindAll([]byte(line), -1)
var iPDesc string
for i := 0; i < len(result); i++ {
iPDesc += getIPInfo(string(result[i])) + "\t"
}
return fmt.Sprintf("%s\t\t%s", line, iPDesc)
}
func getIPInfo(ip string) string {
resp, err := http.Get("http://192.168.202.81:12101/find?ip=" + ip)
if err != nil {
// handle error
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
// handle error
}
var iPJson iPInfo
err = json.Unmarshal(body, &iPJson)
if err != nil {
fmt.Println("Decode json fail")
}
if len(iPJson.Data) > 0 {
return iPJson.Data[0]
}
return ""
}
|
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"strings"
"sync"
)
func handler(w http.ResponseWriter, r *http.Request) {
log.Print("Home page request.")
body, err := ioutil.ReadFile("static/index.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Write(body)
}
func testHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "404 not found.", http.StatusNotFound)
return
}
trace := r.Header.Get("X-Cloud-Trace-Context")
log.Print("Test request ", trace)
var lines []string
cmd := exec.Command("./smlbench")
cmd.Dir = "/home/testground/testground/plans/smlbench/cmd"
var stdoutBuf, stderrBuf bytes.Buffer
stdoutIn, _ := cmd.StdoutPipe()
stderrIn, _ := cmd.StderrPipe()
stdout := io.MultiWriter(os.Stdout, &stdoutBuf)
stderr := io.MultiWriter(os.Stderr, &stderrBuf)
err := cmd.Start()
if err != nil {
http.Error(w, "500 Exception", http.StatusInternalServerError)
log.Print(err)
return
}
var wg sync.WaitGroup
wg.Add(1)
var errStdout, errStderr error
go func() {
_, errStdout = io.Copy(stdout, stdoutIn)
wg.Done()
}()
_, errStderr = io.Copy(stderr, stderrIn)
wg.Wait()
err = cmd.Wait()
if err != nil || errStdout != nil || errStderr != nil {
http.Error(w, "500 Exception", http.StatusInternalServerError)
log.Print(err)
return
}
output := strings.Split(string(stdoutBuf.Bytes()), "\n")
for _, line := range output {
lines = append(lines, line)
}
io.WriteString(w, strings.Join(lines, "\n"))
}
func main() {
http.HandleFunc("/", handler)
http.HandleFunc("/test", testHandler)
port := os.Getenv("PORT")
if port == "" {
port = "8099"
}
log.Printf("Web server started on port: %s\n", port)
log.Fatal(http.ListenAndServe(fmt.Sprintf(":%s", port), nil))
}
|
package main
import (
bootstrap "code-cadets-2021/lecture_2/05_offerfeed/cmd/bootstrap"
"code-cadets-2021/lecture_2/05_offerfeed/internal/tasks"
)
func main() {
signalHandler := bootstrap.NewSignalHandler()
feed := bootstrap.NewAxilisOfferFeed()
queue := bootstrap.NewOrderedQueue()
processingService := bootstrap.FeedProcessingService(feed, queue)
// blocking call, start "the application"
tasks.RunTasks(signalHandler, feed, queue, processingService)
}
|
package Models
import (
"fmt"
_ "github.com/go-sql-driver/mysql"
"resource-api/Config"
)
//GetAllClients Fetch all client data
func GetAllClients(client *[]Client) (err error) {
if err = Config.DB.Find(client).Error; err != nil {
return err
}
return nil
}
//CreateClient ... Insert New data
func CreateClient(client *Client) (err error) {
if err = Config.DB.Create(client).Error; err != nil {
return err
}
return nil
}
//GetClientByID ... Fetch only one client by Id
func GetClientByID(client *Client, id string) (err error) {
if err = Config.DB.Where("id = ?", id).First(client).Error; err != nil {
return err
}
return nil
}
//UpdateClient ... Update client
func UpdateClient(client *Client, id string) (err error) {
fmt.Println(client)
Config.DB.Save(client)
return nil
}
//DeleteClient ... Delete client
func DeleteClient(client *Client, id string) (err error) {
Config.DB.Where("id = ?", id).Delete(client)
return nil
} |
package types
import (
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestMessageInfoHandlesMultipleCoins(t *testing.T) {
info := MessageInfo{
Sender: "foobar",
Funds: []Coin{
{Denom: "peth", Amount: "12345"},
{Denom: "uatom", Amount: "789876"},
},
}
bz, err := json.Marshal(info)
require.NoError(t, err)
// we can unmarshal it properly into struct
var recover MessageInfo
err = json.Unmarshal(bz, &recover)
require.NoError(t, err)
assert.Equal(t, info, recover)
}
func TestMessageInfoHandlesMissingCoins(t *testing.T) {
info := MessageInfo{
Sender: "baz",
}
bz, err := json.Marshal(info)
require.NoError(t, err)
// we can unmarshal it properly into struct
var recover MessageInfo
err = json.Unmarshal(bz, &recover)
require.NoError(t, err)
assert.Equal(t, info, recover)
// make sure "funds":[] is in JSON
var raw map[string]json.RawMessage
err = json.Unmarshal(bz, &raw)
require.NoError(t, err)
funds, ok := raw["funds"]
require.True(t, ok)
assert.Equal(t, "[]", string(funds))
}
func TestBlockInfoSerialization(t *testing.T) {
block := BlockInfo{
Height: 123,
Time: 1578939743_987654321,
ChainID: "foobar",
}
bz, err := json.Marshal(block)
require.NoError(t, err)
assert.Equal(t, `{"height":123,"time":"1578939743987654321","chain_id":"foobar"}`, string(bz))
block = BlockInfo{
Height: 0,
Time: 0,
ChainID: "",
}
bz, err = json.Marshal(block)
require.NoError(t, err)
assert.Equal(t, `{"height":0,"time":"0","chain_id":""}`, string(bz))
}
func TestBlockInfoDeserialization(t *testing.T) {
var block BlockInfo
var err error
// All set
err = json.Unmarshal([]byte(`{"height":123,"time":"1578939743987654321","chain_id":"foobar"}`), &block)
require.NoError(t, err)
assert.Equal(t, BlockInfo{
Height: 123,
Time: 1578939743_987654321,
ChainID: "foobar",
}, block)
// All zero
err = json.Unmarshal([]byte(`{"height":0,"time":"0","chain_id":""}`), &block)
require.NoError(t, err)
assert.Equal(t, BlockInfo{
Height: 0,
Time: 0,
ChainID: "",
}, block)
// Empty string is not a valid uint64 string
err = json.Unmarshal([]byte(`{"height":0,"time":"","chain_id":""}`), &block)
require.ErrorContains(t, err, "invalid use of ,string struct tag, trying to unmarshal \"\" into uint64")
}
|
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"os/signal"
"sync"
"syscall"
"github.com/bobrovka/calendar/internal"
"github.com/bobrovka/calendar/internal/consumer"
"github.com/bobrovka/calendar/internal/models"
"github.com/heetch/confita"
"github.com/heetch/confita/backend/file"
flag "github.com/spf13/pflag"
"github.com/streadway/amqp"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
var configPath string
func init() {
flag.StringVarP(&configPath, "config", "c", "", "path to config file")
}
func main() {
flag.Parse()
cfg := getConfig()
logCfg := zap.NewDevelopmentConfig()
logCfg.EncoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
logCfg.EncoderConfig.EncodeTime = zapcore.EpochMillisTimeEncoder
logCfg.OutputPaths = []string{cfg.LogFileSender}
logger, err := logCfg.Build()
failOnError(err, "cant create logger")
defer logger.Sync()
mqConsumer := consumer.NewConsumer("", fmt.Sprintf(
"amqp://%s:%s@%s:%d",
cfg.RabbitUser,
cfg.RabbitPassword,
cfg.RabbitHost,
cfg.RabbitPort,
), "event.exchange", "direct", "event.queue", "event.notification")
ctx, cancel := context.WithCancel(context.Background())
wg := &sync.WaitGroup{}
go func() {
err := mqConsumer.Handle(ctx, wg, func(msgs <-chan amqp.Delivery) {
for {
select {
case msg, ok := <-msgs:
// если закроется канал, завершить обработчик
if !ok {
return
}
var e models.Event
err := json.Unmarshal(msg.Body, &e)
if err != nil {
logger.Warn(fmt.Sprintf("got invalid message %s", msg.Body))
msg.Reject(false)
} else {
logger.Info(fmt.Sprintf("Notification to %s\n%s at %v", e.User, e.Title, e.StartAt))
msg.Ack(false)
}
case <-ctx.Done():
// если завершается программа, завершить обработчик
return
}
}
}, 3)
failOnError(err, "handling error")
}()
termChan := make(chan os.Signal)
signal.Notify(termChan, syscall.SIGINT, syscall.SIGTERM)
<-termChan
cancel()
wg.Wait()
fmt.Println("Sender stopped")
}
func failOnError(err error, msg string) {
if err != nil {
log.Fatalf("%s: %s", msg, err)
}
}
func getConfig() *internal.Config {
if configPath == "" {
log.Fatal("no config file")
}
cfg := &internal.Config{
HTTPListen: "127.0.0.1:50051",
LogLevel: "debug",
}
loader := confita.NewLoader(
file.NewBackend(configPath),
)
err := loader.Load(context.Background(), cfg)
failOnError(err, "cannot read config")
fmt.Println(cfg)
return cfg
}
|
package main
import (
f "fmt"
)
const(
PUTIH = iota
HITAM
BIRU
MERAH
KUNING
)
type Warna byte
type Kotak struct{
lebar, tinggi, panjang float64
warna Warna
}
type DaftarKotak []Kotak
func (k Kotak) volume() float64{
return k.lebar * k.panjang * k.tinggi
}
func (k *Kotak) SetWarna(w Warna) {
k.warna = w
}
func (dk DaftarKotak) WarnaTertinggi() Warna {
v := 0.0
k := Warna(PUTIH)
for _, ko := range dk{
if ko.volume() > v{
v = ko.volume()
k = ko.warna
}
}
return k
}
func (dk DaftarKotak) WarnaiKeHitam() {
for i, _ := range dk {
dk[i].SetWarna(HITAM)
}
}
func (w Warna) String() string {
strings := []string {"PUTIH","HITAM", "BIRU", "MERAH", "KUNING"}
return strings[w]
}
func main() {
kotaks := DaftarKotak{
Kotak{4,4,4,MERAH},
Kotak{10,10,1, KUNING},
Kotak{1,1,20, HITAM},
Kotak{10,10,1, BIRU},
Kotak{10,30,1,PUTIH},
Kotak{20,20,20, KUNING},
}
for k,v := range kotaks{
f.Println(k,v)
}
f.Printf("jumlah kotak yang ada = %d \n", len(kotaks))
f.Println("Volume kotak pertama", kotaks[0].volume(), "cm3")
f.Println("Warna kotak terkahir", kotaks[len(kotaks)-1].warna.String())
f.Println("Yang paling besar", kotaks.WarnaTertinggi().String())
f.Println("Warnai semua kotak dengan warna HITAM")
kotaks.WarnaiKeHitam()
f.Println("Warna kotak kedua : ", kotaks[1].warna.String())
f.Println("Sekarang warna terbesar adalah ", kotaks.WarnaTertinggi().String())
} |
package main
import (
"fmt"
"net/http"
"os"
"strings"
)
func main() {
input := os.Args[1:]
if len(input) == 0 {
fmt.Fprintf(os.Stderr, "No args found\n")
os.Exit(1)
}
url := input[0]
if !strings.HasPrefix(url, "http://") {
url = "http://" + url
}
resp, err := http.Get(url)
if err != nil {
fmt.Fprintf(os.Stderr, "fetch error: %s", err)
os.Exit(1)
}
fmt.Printf("response code: %d", resp.StatusCode)
}
|
package main
import (
"fmt"
)
func main() {
fmt.Println("Selection sort: start")
Array := []int { 4, 9, 1, 3, 1, 5, 7, 0, 11, 2, 5, 12, 10 };
fmt.Println(Array)
for i := 1; i < len(Array); i++ {
j := i
for j > 0 && Array[j-1] > Array[j] {
Array[j-1], Array[j] = Array[j], Array[j-1]
j--
}
}
fmt.Println(Array)
fmt.Println("Selection sort: end")
} |
package controller
import (
"context"
"errors"
"cloud.google.com/go/firestore"
"google.golang.org/api/iterator"
)
type findRepo interface {
}
type DBRepoStruct struct {
client *firestore.Client
}
//NewFindRepo return new memory for findrepostruct struct
func NewDBRepo(client *firestore.Client) *DBRepoStruct {
return &DBRepoStruct{client}
}
//FindOneByField will get document in collection filtered by field and return one document
func (x *DBRepoStruct) FindOneByField(collection string, field string, value string) (map[string]interface{}, error) {
ctx := context.Background()
iter := x.client.Collection(collection).Where(field, "==", value).Documents(ctx)
var dataFound map[string]interface{}
for {
doc, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return map[string]interface{}{}, err
}
dataFound = doc.Data()
dataFound["ID"] = doc.Ref.ID
break
}
if dataFound == nil {
return nil, errors.New("NO RESULT")
}
return dataFound, nil
}
//FindOneByID return one full document which query is filtered by doc ref / ID
func (x *DBRepoStruct) FindOneByID(collection string, ID string) (map[string]interface{}, error) {
ctx := context.Background()
result, err := x.client.Collection(collection).Doc(ID).Get(ctx)
if err != nil {
return map[string]interface{}{}, err
}
if result == nil {
return nil, errors.New("NO RESULT")
}
return result.Data(), nil
}
//FindAllByField return all result of filtered by field query
func (x *DBRepoStruct) FindAllByField(collection string, field string, value string) ([]map[string]interface{}, error) {
ctx := context.Background()
iter := x.client.Collection(collection).Where(field, "==", value).Documents(ctx)
var result []map[string]interface{}
for {
doc, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return []map[string]interface{}{}, err
}
result = append(result, doc.Data())
}
if result == nil {
return nil, errors.New("NO RESULT")
}
return result, nil
}
//FindAllSubColByID will find all doc in subcollection filtered by id
func (x *DBRepoStruct) FindAllSubColByID(collectionOne string, ID string, collectionTwo string) ([]map[string]interface{}, error) {
ctx := context.Background()
iter := x.client.Collection(collectionOne).Doc(ID).Collection(collectionTwo).Documents(ctx)
var result []map[string]interface{}
for {
doc, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return []map[string]interface{}{}, err
}
result = append(result, doc.Data())
}
if result == nil {
return nil, errors.New("NO RESULT")
}
return result, nil
}
//FindAllSubColByIDField will find all doc in subcollection filtered by field
func (x *DBRepoStruct) FindAllSubColByIDField(collectionOne string, ID string, field string, value string, collectionTwo string) ([]map[string]interface{}, error) {
ctx := context.Background()
iter := x.client.Collection(collectionOne).Doc(ID).Collection(collectionTwo).Where(field, "==", value).Documents(ctx)
var result []map[string]interface{}
for {
doc, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
return []map[string]interface{}{}, err
}
result = append(result, doc.Data())
}
if result == nil {
return nil, errors.New("NO RESULT")
}
return result, nil
}
|
package main
import (
"fmt"
"sync"
)
const (
DEPTH = 4
ENDINGDEPTH = DEPTH - 1
)
type Fetcher interface {
// Fetch returns the body of URL and
// a slice of URLs found on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl
// pages starting with url, to a maximum of depth.
func Crawl(url string, depth int, fetcher Fetcher) {
var visited []string
var mutex = sync.Mutex{}
var channels []chan int
visited = append(visited, url)
doCrawling(url, depth, &visited, &mutex, nil, true, &channels)
for _, ch := range channels {
<-ch
}
return
}
func Crawler(url string, depth int, visited *[]string, mutex *sync.Mutex, ch chan int) {
doCrawling(url, depth, visited, mutex, ch, false, nil)
checkEnd(depth, ch)
return
}
func doCrawling(url string, depth int, visited *[]string, mutex *sync.Mutex, ch chan int, isRootCrawler bool, channels *[]chan int) {
if depth <= 0 {
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
return
}
fmt.Printf("Found: %s %v\n", url, body)
for _, u := range urls {
f := getCrawlerFunction(isRootCrawler, u, depth, visited, mutex, ch, channels)
f()
}
}
func getCrawlerFunction(isRootCrawler bool, u string, depth int, visited *[]string, mutex *sync.Mutex, ch chan int, channels *[]chan int) func() {
if !isRootCrawler {
return concurrentCrawlerTask(u, depth, visited, mutex, ch)
} else {
return rootCrawlerTask(u, depth, visited, mutex, channels)
}
}
func concurrentCrawlerTask(u string, depth int, visited *[]string, mutex *sync.Mutex, ch chan int) func() {
return func() {
if !contains(u, visited, mutex) {
*visited = append(*visited, u)
Crawler(u, depth-1, visited, mutex, ch)
}
}
}
func rootCrawlerTask(u string, depth int, visited *[]string, mutex *sync.Mutex, channels *[]chan int) func() {
return func() {
var ch = make(chan int)
*channels = append(*channels, ch)
*visited = append(*visited, u)
go Crawler(u, depth-1, visited, mutex, ch)
}
}
func checkEnd(depth int, ch chan int) {
if depth == ENDINGDEPTH {
ch <- 1
}
}
func contains(url string, visited *[]string, mutex *sync.Mutex) bool {
mutex.Lock()
for _, element := range *visited {
if element == url {
defer mutex.Unlock()
return true
}
}
defer mutex.Unlock()
return false
}
func main() {
Crawl("http://golang.org/", DEPTH, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
// fetcher is a populated fakeFetcher.
var fetcher = fakeFetcher{
"http://golang.org/": &fakeResult{
"The Go Programming Language",
[]string{
"http://golang.org/pkg/",
"http://golang.org/cmd/",
},
},
"http://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"http://golang.org/",
"http://golang.org/cmd/",
"http://golang.org/pkg/fmt/",
"http://golang.org/pkg/os/",
},
},
"http://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"http://golang.org/",
"http://golang.org/pkg/",
},
},
"http://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"http://golang.org/",
"http://golang.org/pkg/",
},
},
}
|
package main
import (
"Mmx/Modules"
"Mmx/Router"
)
func main() {
Modules.Config.Init()
Modules.Global.Init()
Router.InitRouter()
}
|
package main
import (
"log"
"os"
"strings"
"github.com/bmaupin/go-epub"
)
func genetatePdfFile(bookProjectDir, bookVersion, coverImagePath string, forPrint bool) string {
var e *epub.Epub
var outFilename string
var indexArticleTitle string
var bookWebsite string
var engVersion bool
target := "pdf"
css := PdfCSS
ext := ".pdf"
if forPrint {
target = "print"
css = PrintCSS
}
projectName := confirmBookProjectName(bookProjectDir)
switch projectName {
default:
log.Fatal("unknow book porject: ", projectName)
case "Go101":
e = epub.NewEpub("Go 101")
e.SetAuthor("Tapir Liu")
indexArticleTitle = "Contents"
bookWebsite = "https://go101.org"
engVersion = true
outFilename = "Go101-" + bookVersion + ext
case "Golang101":
e = epub.NewEpub("Go语言101")
e.SetAuthor("老貘")
indexArticleTitle = "目录"
bookWebsite = "https://gfw.go101.org"
engVersion = false
outFilename = "Golang101-" + bookVersion + ext
}
cssFilename := "all.css"
tempCssFile := mustCreateTempFile("all*.css", []byte(css))
defer os.Remove(tempCssFile)
cssPath, err := e.AddCSS(tempCssFile, cssFilename)
if err != nil {
log.Fatalln("add css", cssFilename, "failed:", err)
}
// ...
tempOutFilename := outFilename + "*.epub"
tempOutFilename = mustCreateTempFile(tempOutFilename, nil)
defer os.Remove(tempOutFilename)
//tempOutFilename := outFilename + ".epub"
writeEpub_Go101(tempOutFilename, e, -1, bookWebsite, projectName, indexArticleTitle, bookProjectDir, coverImagePath, cssPath, target, engVersion)
removePagesFromEpub(tempOutFilename, "EPUB/xhtml/cover.xhtml")
epub2pdf := func(serifFont, fontSize, inputFilename, outputFilename string) {
conversionParameters := make([]string, 0, 32)
pushParams := func(params ...string) {
conversionParameters = append(conversionParameters, params...)
}
pushParams(inputFilename, outputFilename)
pushParams("--toc-title", indexArticleTitle)
pushParams("--pdf-header-template", `<p style="text-align: center; font-size: small;">_SECTION_</p>`)
pushParams("--pdf-footer-template", `<p style="text-align: center; font-size: small;">_PAGENUM_</p>`)
//pushParams("--pdf-page-numbers")
pushParams("--paper-size", "a4")
pushParams("--pdf-serif-family", serifFont)
//pushParams("--pdf-sans-family", serifFont)
pushParams("--pdf-mono-family", "Liberation Mono")
pushParams("--pdf-default-font-size", fontSize)
pushParams("--pdf-mono-font-size", "15")
pushParams("--pdf-page-margin-top", "36")
pushParams("--pdf-page-margin-bottom", "36")
if forPrint {
pushParams("--pdf-add-toc")
pushParams("--pdf-page-margin-left", "72")
pushParams("--pdf-page-margin-right", "72")
} else {
pushParams("--pdf-page-margin-left", "36")
pushParams("--pdf-page-margin-right", "36")
}
pushParams("--preserve-cover-aspect-ratio")
runShellCommand(".", "ebook-convert", conversionParameters...)
log.Println("Create", outputFilename, "done!")
}
if forPrint {
outFilenameForPrinting := strings.Replace(outFilename, ".pdf", ".pdf-ForPrinting.pdf", 1)
if projectName == "Go101" {
epub2pdf("Liberation Serif", "17", tempOutFilename, outFilenameForPrinting)
} else if projectName == "Golang101" {
epub2pdf("AR PL SungtiL GB", "16", tempOutFilename, outFilenameForPrinting)
}
} else {
if projectName == "Go101" {
epub2pdf("Liberation Serif", "17", tempOutFilename, outFilename)
} else if projectName == "Golang101" {
outFilenameKaiTi := strings.Replace(outFilename, ".pdf", ".pdf-KaiTi.pdf", 1)
epub2pdf("AR PL KaitiM GB", "16", tempOutFilename, outFilenameKaiTi)
outFilenameSongTi := strings.Replace(outFilename, ".pdf", ".pdf-SongTi.pdf", 1)
epub2pdf("AR PL SungtiL GB", "16", tempOutFilename, outFilenameSongTi)
}
}
return outFilename
}
|
package data
import(
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
"time"
)
const DATABASE_NAME="danmu"
/**如果数据不存在 则创建表*/
func createDB(database_name string){
db,err :=sql.Open(driver_name,database_url)
if err!=nil {
fmt.Printf(err.Error())
}
result,err :=db.Exec("create database "+database_name)
if err!=nil{//如果创建失败 直接打开
fmt.Printf(err.Error())
db,err:=sql.Open(driver_name,database_url+database_name)
if err!=nil{
println(err.Error())
}else{
db.Exec("use "+database_name)
createTable(db)
}
}else{
result.LastInsertId()
db.Exec("use "+database_name)
createTable(db)
}
}
/**创建数据库表*/
func createTable(db *sql.DB){
db.Exec(create_table_user)
db.Exec(create_table_topic)
db.Exec(create_table_commit)
db.Exec(create_table_like)
}
func InitDB(){
createDB(DATABASE_NAME)
}
func insertUserData(){
cur:=time.Now().UnixNano()
fmt.Printf(string(cur))
}
func InsertUser( user UserInfo){
db,err:=sql.Open(driver_name,database_url+DATABASE_NAME)
if err ==nil {
db.Exec(user.InsertSql())
}
}
func Find_with_openid(user UserInfo) UserInfo{
db,err:=sql.Open(driver_name,database_url+DATABASE_NAME)
if err ==nil {
rows,err:=db.Query(user.FindWithUserid())
if err == nil{
for rows.Next(){
rows.Scan(&user.ID,&user.Nickname,&user.AvatarUrl,&user.Gender)
}
return user
}
}
return user
}
func TestData(){
fmt.Printf("hello world")
db, err := sql.Open("mysql", "root:nbin312@tcp(localhost:3307)/")
if err != nil {
fmt.Printf(err.Error()) // Just for example purpose. You should use proper error handling instead of panic
}
db.Exec("create database Bus")
// db.Exec("use Girl")
db.Exec("CREATE TABLE example ( id integer, name varchar(32) )")
/**
rows,err:=db.Query("SELECT Name FROM city")
if err!=nil {
fmt.Printf(err.Error())
}else{
for rows.Next() {
var Name string
err = rows.Scan(&Name)
fmt.Println(Name)
}
}
defer rows.Close()
*/
defer db.Close()
}
|
package report
import (
"encoding/json"
"io"
"net/http"
"os"
"strconv"
"github.com/gorilla/mux"
"github.com/reynaldipane/toko-ijah/appcontext"
helper "github.com/reynaldipane/toko-ijah/helpers"
orders "github.com/reynaldipane/toko-ijah/order"
productstock "github.com/reynaldipane/toko-ijah/product_stock"
)
var reportService ServiceInterface
func getService() ServiceInterface {
if reportService == nil {
return &Service{
repo: initRepository(appcontext.GetDB()),
productStockService: &productstock.Service{
Repo: productstock.InitRepository(appcontext.GetDB()),
},
orderService: &orders.Service{
Repo: orders.InitRepository(appcontext.GetDB()),
},
}
}
return reportService
}
// GenerateProductValuesReport will return product values report
func GenerateProductValuesReport(w http.ResponseWriter, r *http.Request) {
result, err := getService().getProductValuesReport()
if err != nil {
helper.BuildResponseWithError(w, helper.ContentJSON, 404, "report not found")
return
}
response, err := json.Marshal(result)
helper.BuildResponse(w, helper.ContentJSON, 200, string(response))
}
//ExportProductValuesReportCsv will return link to exported product values report to download
func ExportProductValuesReportCsv(w http.ResponseWriter, r *http.Request) {
fileName := getService().convertProductValuesReportToCsv()
response, _ := json.Marshal(fileName)
helper.BuildResponse(w, helper.ContentJSON, 200, string(response))
}
// GenerateProductSalesDetailReport will return product sales detail report
func GenerateProductSalesDetailReport(w http.ResponseWriter, r *http.Request) {
result, err := getService().getSalesReport()
if err != nil {
helper.BuildResponseWithError(w, helper.ContentJSON, 404, "report not found")
return
}
response, err := json.Marshal(result)
helper.BuildResponse(w, helper.ContentJSON, 200, string(response))
}
//ExportSalesReportCsv will return link to exported sales report to download
func ExportSalesReportCsv(w http.ResponseWriter, r *http.Request) {
fileName := getService().convertSalesReportDetailToCsv()
response, _ := json.Marshal(fileName)
helper.BuildResponse(w, helper.ContentJSON, 200, string(response))
}
// DownloadProductValuesReport will return exported csv file to client
func DownloadProductValuesReport(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
fileName := vars["fileName"]
if fileName == "" {
helper.BuildResponseWithError(w, helper.ContentJSON, 400, "report not found")
return
}
filePath := "./csv_exports/" + fileName
Openfile, err := os.Open(filePath)
defer Openfile.Close()
if err != nil {
helper.BuildResponseWithError(w, helper.ContentJSON, 400, "report not found")
return
}
FileHeader := make([]byte, 512)
Openfile.Read(FileHeader)
FileContentType := http.DetectContentType(FileHeader)
FileStat, _ := Openfile.Stat()
FileSize := strconv.FormatInt(FileStat.Size(), 10)
w.Header().Set("Content-Disposition", "attachment; filename="+fileName)
w.Header().Set("Content-Type", FileContentType)
w.Header().Set("Content-Length", FileSize)
Openfile.Seek(0, 0)
io.Copy(w, Openfile)
return
}
|
package msaevents
import (
"fmt"
"reflect"
"strconv"
"strings"
)
type CustomFieldMapLabelValue struct {
Label string
Value string
}
type CustomField struct {
Field CustomFieldConfig `json:"field"`
Data *CustomFieldData `json:"data"`
}
func (c *CustomField) StringValue() string {
if c.Data != nil {
return c.Data.StringValue()
}
return ""
}
type CustomFieldData struct {
FieldId int64 `json:"field_id"`
FieldIdStr string `json:"field_id_str"`
Value interface{} `json:"value"`
}
func (c *CustomFieldData) StringValue() string {
if c == nil || c.Value == nil {
return ""
}
v := reflect.ValueOf(c.Value)
if v.IsNil() {
return ""
}
switch v.Kind() {
case reflect.String:
return v.String()
case reflect.Slice, reflect.Array:
output := []string{}
for i := 0; i < v.Len(); i++ {
output = append(output, fmt.Sprint(v.Index(i)))
}
return strings.Join(output, ",")
case reflect.Int, reflect.Int64:
return strconv.Itoa(int(v.Int()))
case reflect.Float32, reflect.Float64:
return strconv.Itoa(int(v.Float()))
default:
//fmt.Printf("fail to found type of %+v\n", v.String())
return ""
}
}
type CustomFieldConfig struct {
Id int64
IdStr string
Labels map[string]string
Enabled bool
FieldType string
Position *int
Description string
}
func (c *CustomFieldConfig) Label(lang string) string {
if label, ok := c.Labels[lang]; ok {
return label
}
if label, ok := c.Labels[fallbackLanguage]; ok {
return label
}
return ""
}
|
package e2e
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"strconv"
"strings"
"sync"
"time"
"github.com/blang/semver/v4"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gstruct"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
authorizationv1 "k8s.io/api/authorization/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
"k8s.io/client-go/util/retry"
"sigs.k8s.io/controller-runtime/pkg/client"
opver "github.com/operator-framework/api/pkg/lib/version"
operatorsv1 "github.com/operator-framework/api/pkg/operators/v1"
operatorsv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1"
"github.com/operator-framework/operator-lifecycle-manager/pkg/api/client/clientset/versioned"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/operators/catalog"
"github.com/operator-framework/operator-lifecycle-manager/pkg/controller/registry"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/kubernetes/pkg/apis/rbac"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
"github.com/operator-framework/operator-lifecycle-manager/test/e2e/ctx"
"github.com/operator-framework/operator-lifecycle-manager/test/e2e/util"
)
const (
deprecatedCRDDir = "deprecated-crd"
)
var _ = Describe("Install Plan", func() {
var (
c operatorclient.ClientInterface
crc versioned.Interface
ns corev1.Namespace
)
BeforeEach(func() {
namespaceName := genName("install-plan-e2e-")
og := operatorsv1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-operatorgroup", namespaceName),
Namespace: namespaceName,
},
}
ns = SetupGeneratedTestNamespaceWithOperatorGroup(namespaceName, og)
c = ctx.Ctx().KubeClient()
crc = ctx.Ctx().OperatorClient()
})
AfterEach(func() {
TeardownNamespace(ns.GetName())
})
When("an InstallPlan step contains a deprecated resource version", func() {
var (
csv operatorsv1alpha1.ClusterServiceVersion
plan operatorsv1alpha1.InstallPlan
deprecated client.Object
manifest string
counter float64
)
BeforeEach(func() {
dc, err := discovery.NewDiscoveryClientForConfig(ctx.Ctx().RESTConfig())
Expect(err).ToNot(HaveOccurred())
v, err := dc.ServerVersion()
Expect(err).ToNot(HaveOccurred())
if minor, err := strconv.Atoi(v.Minor); err == nil && minor < 16 {
Skip("test is dependent on CRD v1 introduced at 1.16")
}
})
BeforeEach(func() {
counter = 0
for _, metric := range getMetricsFromPod(ctx.Ctx().KubeClient(), getPodWithLabel(ctx.Ctx().KubeClient(), "app=catalog-operator")) {
if metric.Family == "installplan_warnings_total" {
counter = metric.Value
}
}
deprecatedCRD, err := util.DecodeFile(filepath.Join(testdataDir, deprecatedCRDDir, "deprecated.crd.yaml"), &apiextensionsv1.CustomResourceDefinition{})
Expect(err).NotTo(HaveOccurred())
Expect(ctx.Ctx().Client().Create(context.Background(), deprecatedCRD)).To(Succeed())
csv = newCSV(genName("test-csv-"), ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &csv)).To(Succeed())
deprecated, err = util.DecodeFile(filepath.Join(testdataDir, deprecatedCRDDir, "deprecated.cr.yaml"), &unstructured.Unstructured{}, util.WithNamespace(ns.GetName()))
Expect(err).NotTo(HaveOccurred())
scheme := runtime.NewScheme()
{
var b bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(deprecated, &b)).To(Succeed())
manifest = b.String()
}
plan = operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: genName("test-plan-"),
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), &plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Resolving: csv.GetName(),
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: deprecated.GetName(),
Version: "v1",
Kind: "Deprecated",
Manifest: manifest,
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), &plan)).To(Succeed())
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return &plan, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&plan), &plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
})
AfterEach(func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &csv))
}).Should(Succeed())
Eventually(func() error {
deprecatedCRD := &apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "deprecateds.operators.io.operator-framework",
},
}
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), deprecatedCRD))
}).Should(Succeed())
})
It("creates an Event surfacing the deprecation warning", func() {
Eventually(func() ([]corev1.Event, error) {
var events corev1.EventList
if err := ctx.Ctx().Client().List(context.Background(), &events, client.InNamespace(ns.GetName())); err != nil {
return nil, err
}
var result []corev1.Event
for _, item := range events.Items {
result = append(result, corev1.Event{
InvolvedObject: corev1.ObjectReference{
APIVersion: item.InvolvedObject.APIVersion,
Kind: item.InvolvedObject.Kind,
Namespace: item.InvolvedObject.Namespace,
Name: item.InvolvedObject.Name,
FieldPath: item.InvolvedObject.FieldPath,
},
Reason: item.Reason,
Message: item.Message,
})
}
return result, nil
}).Should(ContainElement(corev1.Event{
InvolvedObject: corev1.ObjectReference{
APIVersion: operatorsv1alpha1.InstallPlanAPIVersion,
Kind: operatorsv1alpha1.InstallPlanKind,
Namespace: ns.GetName(),
Name: plan.GetName(),
FieldPath: "status.plan[0]",
},
Reason: "AppliedWithWarnings",
Message: fmt.Sprintf("1 warning(s) generated during installation of operator \"%s\" (Deprecated \"%s\"): operators.io.operator-framework/v1 Deprecated is deprecated", csv.GetName(), deprecated.GetName()),
}))
})
It("increments a metric counting the warning", func() {
Eventually(func() []Metric {
return getMetricsFromPod(ctx.Ctx().KubeClient(), getPodWithLabel(ctx.Ctx().KubeClient(), "app=catalog-operator"))
}).Should(ContainElement(LikeMetric(
WithFamily("installplan_warnings_total"),
WithValueGreaterThan(counter),
)))
})
})
When("a CustomResourceDefinition step resolved from a bundle is applied", func() {
var (
crd apiextensionsv1.CustomResourceDefinition
manifest string
)
BeforeEach(func() {
csv := newCSV("test-csv", ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &csv)).To(Succeed())
crd = apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "tests.example.com",
},
TypeMeta: metav1.TypeMeta{
Kind: "CustomResourceDefinition",
APIVersion: apiextensionsv1.SchemeGroupVersion.String(),
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: "example.com",
Scope: apiextensionsv1.ClusterScoped,
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "tests",
Singular: "test",
Kind: "Test",
ListKind: "TestList",
},
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{{
Name: "v1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
Type: "object",
},
},
}},
},
}
scheme := runtime.NewScheme()
Expect(corev1.AddToScheme(scheme)).To(Succeed())
{
var b bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(&crd, &b)).To(Succeed())
manifest = b.String()
}
plan := operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-plan",
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), &plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Resolving: "test-csv",
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: crd.GetName(),
Version: apiextensionsv1.SchemeGroupVersion.String(),
Kind: "CustomResourceDefinition",
Manifest: manifest,
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), &plan)).To(Succeed())
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return &plan, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&plan), &plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
})
AfterEach(func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
})
It("is annotated with a reference to its associated ClusterServiceVersion", func() {
Eventually(func() (map[string]string, error) {
if err := ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&crd), &crd); err != nil {
return nil, err
}
return crd.GetAnnotations(), nil
}).Should(HaveKeyWithValue(
HavePrefix("operatorframework.io/installed-alongside-"),
fmt.Sprintf("%s/test-csv", ns.GetName()),
))
})
When("a second plan includes the same CustomResourceDefinition", func() {
var (
csv operatorsv1alpha1.ClusterServiceVersion
plan operatorsv1alpha1.InstallPlan
)
BeforeEach(func() {
csv = newCSV("test-csv-two", ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &csv)).To(Succeed())
plan = operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-plan-two",
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), &plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Resolving: "test-csv-two",
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: crd.GetName(),
Version: apiextensionsv1.SchemeGroupVersion.String(),
Kind: "CustomResourceDefinition",
Manifest: manifest,
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), &plan)).To(Succeed())
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return &plan, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&plan), &plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
})
AfterEach(func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &csv))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &plan))
}).Should(Succeed())
})
It("has one annotation for each ClusterServiceVersion", func() {
Eventually(func() ([]struct{ Key, Value string }, error) {
if err := ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&crd), &crd); err != nil {
return nil, err
}
var pairs []struct{ Key, Value string }
for k, v := range crd.GetAnnotations() {
pairs = append(pairs, struct{ Key, Value string }{Key: k, Value: v})
}
return pairs, nil
}).Should(ConsistOf(
MatchFields(IgnoreExtras, Fields{
"Key": HavePrefix("operatorframework.io/installed-alongside-"),
"Value": Equal(fmt.Sprintf("%s/test-csv", ns.GetName())),
}),
MatchFields(IgnoreExtras, Fields{
"Key": HavePrefix("operatorframework.io/installed-alongside-"),
"Value": Equal(fmt.Sprintf("%s/test-csv-two", ns.GetName())),
}),
))
})
})
})
When("an error is encountered during InstallPlan step execution", func() {
var (
plan *operatorsv1alpha1.InstallPlan
owned *corev1.ConfigMap
)
BeforeEach(func() {
// It's hard to reliably generate transient
// errors in an uninstrumented end-to-end
// test, so simulate it by constructing an
// error state that can be easily corrected
// during a test.
owned = &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-owned",
OwnerReferences: []metav1.OwnerReference{{
APIVersion: "operators.coreos.com/v1alpha1",
Kind: "ClusterServiceVersion",
Name: "test-owner", // Does not exist!
UID: "", // catalog-operator populates this if the named CSV exists.
}},
},
}
scheme := runtime.NewScheme()
Expect(corev1.AddToScheme(scheme)).To(Succeed())
var manifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(owned, &manifest)).To(Succeed())
plan = &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-plan",
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: owned.GetName(),
Version: "v1",
Kind: "ConfigMap",
Manifest: manifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), plan)).To(Succeed())
})
AfterEach(func() {
Expect(ctx.Ctx().Client().Delete(context.Background(), owned)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
Expect(ctx.Ctx().Client().Delete(context.Background(), plan)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
})
It("times out if the error persists", func() {
Eventually(
func() (*operatorsv1alpha1.InstallPlan, error) {
return plan, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(plan), plan)
},
90*time.Second,
).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseFailed))
})
It("succeeds if there is no error on a later attempt", func() {
owner := newCSV("test-owner", ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &owner)).To(Succeed())
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return plan, ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(plan), plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
})
})
When("an InstallPlan transfers ownership of a ServiceAccount to a new ClusterServiceVersion", func() {
var (
csv1, csv2 operatorsv1alpha1.ClusterServiceVersion
sa corev1.ServiceAccount
plan operatorsv1alpha1.InstallPlan
)
BeforeEach(func() {
csv1 = newCSV("test-csv-old", ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &csv1)).To(Succeed())
csv2 = newCSV("test-csv-new", ns.GetName(), "", semver.Version{}, nil, nil, nil)
Expect(ctx.Ctx().Client().Create(context.Background(), &csv2)).To(Succeed())
sa = corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-serviceaccount",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: operatorsv1alpha1.SchemeGroupVersion.String(),
Kind: operatorsv1alpha1.ClusterServiceVersionKind,
Name: csv1.GetName(),
UID: csv1.GetUID(),
},
},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), &sa)).To(Succeed())
scheme := runtime.NewScheme()
Expect(corev1.AddToScheme(scheme)).To(Succeed())
var manifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(&corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-serviceaccount",
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: operatorsv1alpha1.SchemeGroupVersion.String(),
Kind: operatorsv1alpha1.ClusterServiceVersionKind,
Name: csv2.GetName(),
},
},
},
}, &manifest)).To(Succeed())
plan = operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-plan",
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), &plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: sa.GetName(),
Version: "v1",
Kind: "ServiceAccount",
Manifest: manifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), &plan)).To(Succeed())
})
AfterEach(func() {
Expect(ctx.Ctx().Client().Delete(context.Background(), &sa)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
Expect(ctx.Ctx().Client().Delete(context.Background(), &csv1)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
Expect(ctx.Ctx().Client().Delete(context.Background(), &csv2)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
Expect(ctx.Ctx().Client().Delete(context.Background(), &plan)).To(Or(
Succeed(),
WithTransform(apierrors.IsNotFound, BeTrue()),
))
})
It("preserves owner references to both the old and the new ClusterServiceVersion", func() {
Eventually(func() ([]metav1.OwnerReference, error) {
if err := ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(&sa), &sa); err != nil {
return nil, err
}
return sa.GetOwnerReferences(), nil
}).Should(ContainElements([]metav1.OwnerReference{
{
APIVersion: operatorsv1alpha1.SchemeGroupVersion.String(),
Kind: operatorsv1alpha1.ClusterServiceVersionKind,
Name: csv1.GetName(),
UID: csv1.GetUID(),
},
{
APIVersion: operatorsv1alpha1.SchemeGroupVersion.String(),
Kind: operatorsv1alpha1.ClusterServiceVersionKind,
Name: csv2.GetName(),
UID: csv2.GetUID(),
},
}))
})
})
When("a ClusterIP service exists", func() {
var (
service *corev1.Service
)
BeforeEach(func() {
service = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-service",
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Port: 12345,
},
},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), service.DeepCopy())).To(Succeed())
})
AfterEach(func() {
Expect(ctx.Ctx().Client().Delete(context.Background(), service)).To(Succeed())
})
It("it can be updated by an InstallPlan step", func() {
scheme := runtime.NewScheme()
Expect(corev1.AddToScheme(scheme)).To(Succeed())
var manifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(service, &manifest)).To(Succeed())
plan := &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-plan",
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: service.Name,
Version: "v1",
Kind: "Service",
Manifest: manifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), plan)).To(Succeed())
key := client.ObjectKeyFromObject(plan)
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return plan, ctx.Ctx().Client().Get(context.Background(), key, plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), plan))).To(Succeed())
})
})
It("with CSVs across multiple catalog sources", func() {
log := func(s string) {
GinkgoT().Logf("%s: %s", time.Now().Format("15:04:05.9999"), s)
}
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginxdep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
stableChannel := "stable"
dependentCRD := newCRD(genName("ins-"))
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, nil)
dependentCSV := newCSV(dependentPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, nil)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
dependentCatalogName := genName("mock-ocs-dependent-")
mainCatalogName := genName("mock-ocs-main-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
dependentManifests := []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), dependentCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
// Create the catalog sources
require.NotEqual(GinkgoT(), "", ns.GetName())
_, cleanupDependentCatalogSource := createInternalCatalogSource(c, crc, dependentCatalogName, ns.GetName(), dependentManifests, []apiextensions.CustomResourceDefinition{dependentCRD}, []operatorsv1alpha1.ClusterServiceVersion{dependentCSV})
defer cleanupDependentCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, dependentCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, nil, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Create expected install plan step sources
expectedStepSources := map[registry.ResourceKey]registry.ResourceKey{
{Name: dependentCRD.Name, Kind: "CustomResourceDefinition"}: {Name: dependentCatalogName, Namespace: ns.GetName()},
{Name: dependentPackageStable, Kind: operatorsv1alpha1.ClusterServiceVersionKind}: {Name: dependentCatalogName, Namespace: ns.GetName()},
{Name: mainPackageStable, Kind: operatorsv1alpha1.ClusterServiceVersionKind}: {Name: mainCatalogName, Namespace: ns.GetName()},
{Name: strings.Join([]string{dependentPackageStable, dependentCatalogName, ns.GetName()}, "-"), Kind: operatorsv1alpha1.SubscriptionKind}: {Name: dependentCatalogName, Namespace: ns.GetName()},
}
subscriptionName := genName("sub-nginx-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
log(fmt.Sprintf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase))
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Fetch installplan again to check for unnecessary control loops
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, fetchedInstallPlan.GetName(), ns.GetName(), func(fip *operatorsv1alpha1.InstallPlan) bool {
// Don't compare object meta as labels can be applied by the operator controller.
Expect(equality.Semantic.DeepEqual(fetchedInstallPlan.Spec, fip.Spec)).Should(BeTrue(), diff.ObjectDiff(fetchedInstallPlan, fip))
Expect(equality.Semantic.DeepEqual(fetchedInstallPlan.Status, fip.Status)).Should(BeTrue(), diff.ObjectDiff(fetchedInstallPlan, fip))
return true
})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), len(expectedStepSources), len(fetchedInstallPlan.Status.Plan), "Number of resolved steps matches the number of expected steps")
// Ensure resolved step resources originate from the correct catalog sources
log(fmt.Sprintf("%#v", expectedStepSources))
for _, step := range fetchedInstallPlan.Status.Plan {
log(fmt.Sprintf("checking %s", step.Resource))
key := registry.ResourceKey{Name: step.Resource.Name, Kind: step.Resource.Kind}
expectedSource, ok := expectedStepSources[key]
require.True(GinkgoT(), ok, "didn't find %v", key)
require.Equal(GinkgoT(), expectedSource.Name, step.Resource.CatalogSource)
require.Equal(GinkgoT(), expectedSource.Namespace, step.Resource.CatalogSourceNamespace)
// delete
}
EXPECTED:
for key := range expectedStepSources {
for _, step := range fetchedInstallPlan.Status.Plan {
if step.Resource.Name == key.Name && step.Resource.Kind == key.Kind {
continue EXPECTED
}
}
GinkgoT().Fatalf("expected step %s not found in %#v", key, fetchedInstallPlan.Status.Plan)
}
log("All expected resources resolved")
// Verify that the dependent subscription is in a good state
dependentSubscription, err := fetchSubscription(crc, ns.GetName(), strings.Join([]string{dependentPackageStable, dependentCatalogName, ns.GetName()}, "-"), subscriptionStateAtLatestChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), dependentSubscription)
require.NotNil(GinkgoT(), dependentSubscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), dependentCSV.GetName(), dependentSubscription.Status.CurrentCSV)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), dependentCSV.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
// Update dependent subscription in catalog and wait for csv to update
updatedDependentCSV := newCSV(dependentPackageStable+"-v2", ns.GetName(), dependentPackageStable, semver.MustParse("0.1.1"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, nil)
dependentManifests = []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: updatedDependentCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
updateInternalCatalog(GinkgoT(), c, crc, dependentCatalogName, ns.GetName(), []apiextensions.CustomResourceDefinition{dependentCRD}, []operatorsv1alpha1.ClusterServiceVersion{dependentCSV, updatedDependentCSV}, dependentManifests)
// Wait for subscription to update
updatedDepSubscription, err := fetchSubscription(crc, ns.GetName(), strings.Join([]string{dependentPackageStable, dependentCatalogName, ns.GetName()}, "-"), subscriptionHasCurrentCSV(updatedDependentCSV.GetName()))
require.NoError(GinkgoT(), err)
// Verify installplan created and installed
fetchedUpdatedDepInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, updatedDepSubscription.Status.InstallPlanRef.Name, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
log(fmt.Sprintf("Install plan %s fetched with status %s", fetchedUpdatedDepInstallPlan.GetName(), fetchedUpdatedDepInstallPlan.Status.Phase))
require.NotEqual(GinkgoT(), fetchedInstallPlan.GetName(), fetchedUpdatedDepInstallPlan.GetName())
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), updatedDependentCSV.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
})
Context("creation with pre existing CRD owners", func() {
It("OnePreExistingCRDOwner", func() {
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginx-dep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageBeta := fmt.Sprintf("%s-beta", mainPackageName)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
dependentPackageBeta := fmt.Sprintf("%s-beta", dependentPackageName)
stableChannel := "stable"
betaChannel := "beta"
// Create manifests
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
{Name: betaChannel, CurrentCSVName: dependentPackageBeta},
},
DefaultChannelName: stableChannel,
},
}
// Create new CRDs
mainCRD := newCRD(genName("ins-"))
dependentCRD := newCRD(genName("ins-"))
// Create new CSVs
mainStableCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{mainCRD}, []apiextensions.CustomResourceDefinition{dependentCRD}, nil)
mainBetaCSV := newCSV(mainPackageBeta, ns.GetName(), mainPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, []apiextensions.CustomResourceDefinition{dependentCRD}, nil)
dependentStableCSV := newCSV(dependentPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, nil)
dependentBetaCSV := newCSV(dependentPackageBeta, ns.GetName(), dependentPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, nil)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), mainCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), dependentCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
// Create the catalog source
mainCatalogSourceName := genName("mock-ocs-main-" + strings.ToLower(K8sSafeCurrentTestDescription()) + "-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, mainCatalogSourceName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{dependentCRD, mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{dependentBetaCSV, dependentStableCSV, mainStableCSV, mainBetaCSV})
defer cleanupCatalogSource()
// Attempt to get the catalog source before creating install plan(s)
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
expectedSteps := map[registry.ResourceKey]struct{}{
{Name: mainCRD.Name, Kind: "CustomResourceDefinition"}: {},
{Name: mainPackageStable, Kind: operatorsv1alpha1.ClusterServiceVersionKind}: {},
}
// Create the preexisting CRD and CSV
cleanupCRD, err := createCRD(c, dependentCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
cleanupCSV, err := createCSV(c, crc, dependentBetaCSV, ns.GetName(), true, false)
require.NoError(GinkgoT(), err)
defer cleanupCSV()
GinkgoT().Log("Dependent CRD and preexisting CSV created")
subscriptionName := genName("sub-nginx-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogSourceName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or Failed before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed))
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Fetch installplan again to check for unnecessary control loops
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, fetchedInstallPlan.GetName(), ns.GetName(), func(fip *operatorsv1alpha1.InstallPlan) bool {
Expect(equality.Semantic.DeepEqual(fetchedInstallPlan, fip)).Should(BeTrue(), diff.ObjectDiff(fetchedInstallPlan, fip))
return true
})
require.NoError(GinkgoT(), err)
for _, step := range fetchedInstallPlan.Status.Plan {
GinkgoT().Logf("%#v", step)
}
require.Equal(GinkgoT(), len(fetchedInstallPlan.Status.Plan), len(expectedSteps), "number of expected steps does not match installed")
GinkgoT().Logf("Number of resolved steps matches the number of expected steps")
for _, step := range fetchedInstallPlan.Status.Plan {
key := registry.ResourceKey{
Name: step.Resource.Name,
Kind: step.Resource.Kind,
}
_, ok := expectedSteps[key]
require.True(GinkgoT(), ok)
// Remove the entry from the expected steps set (to ensure no duplicates in resolved plan)
delete(expectedSteps, key)
}
// Should have removed every matching step
require.Equal(GinkgoT(), 0, len(expectedSteps), "Actual resource steps do not match expected")
// Delete CRDs
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &mainCRD))).To(Succeed())
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &dependentCRD))).To(Succeed())
})
})
Describe("with CRD schema change", func() {
type schemaPayload struct {
name string
expectedPhase operatorsv1alpha1.InstallPlanPhase
oldCRD *apiextensions.CustomResourceDefinition
intermediateCRD *apiextensions.CustomResourceDefinition
newCRD *apiextensions.CustomResourceDefinition
}
var min float64 = 2
var max float64 = 256
var newMax float64 = 50
// generated outside of the test table so that the same naming can be used for both old and new CSVs
mainCRDPlural := genName("testcrd-")
// excluded: new CRD, same version, same schema - won't trigger a CRD update
tableEntries := []TableEntry{
Entry("all existing versions are present, different (backwards compatible) schema", schemaPayload{
name: "all existing versions are present, different (backwards compatible) schema",
expectedPhase: operatorsv1alpha1.InstallPlanPhaseComplete,
oldCRD: func() *apiextensions.CustomResourceDefinition {
oldCRD := newCRD(mainCRDPlural + "a")
oldCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
}
return &oldCRD
}(),
newCRD: func() *apiextensions.CustomResourceDefinition {
newCRD := newCRD(mainCRDPlural + "a")
newCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
{
Name: "v1alpha2",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
}
return &newCRD
}(),
}),
Entry("all existing versions are present, different (backwards incompatible) schema", schemaPayload{name: "all existing versions are present, different (backwards incompatible) schema",
expectedPhase: operatorsv1alpha1.InstallPlanPhaseFailed,
oldCRD: func() *apiextensions.CustomResourceDefinition {
oldCRD := newCRD(mainCRDPlural + "b")
oldCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
},
},
},
},
},
},
},
}
return &oldCRD
}(),
newCRD: func() *apiextensions.CustomResourceDefinition {
newCRD := newCRD(mainCRDPlural + "b")
newCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &newMax,
},
},
},
},
},
},
},
}
return &newCRD
}(),
}),
Entry("missing existing versions in new CRD", schemaPayload{name: "missing existing versions in new CRD",
expectedPhase: operatorsv1alpha1.InstallPlanPhaseComplete,
oldCRD: func() *apiextensions.CustomResourceDefinition {
oldCRD := newCRD(mainCRDPlural + "c")
oldCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1alpha2",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
}
return &oldCRD
}(),
newCRD: func() *apiextensions.CustomResourceDefinition {
newCRD := newCRD(mainCRDPlural + "c")
newCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
{
Name: "v1",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
}
return &newCRD
}()}),
Entry("existing version is present in new CRD (deprecated field)", schemaPayload{name: "existing version is present in new CRD (deprecated field)",
expectedPhase: operatorsv1alpha1.InstallPlanPhaseComplete,
oldCRD: func() *apiextensions.CustomResourceDefinition {
oldCRD := newCRD(mainCRDPlural + "d")
return &oldCRD
}(),
newCRD: func() *apiextensions.CustomResourceDefinition {
newCRD := newCRD(mainCRDPlural + "d")
newCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
{
Name: "v1alpha3",
Served: false,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{Type: "object"},
},
},
}
return &newCRD
}()}),
}
DescribeTable("Test", func(tt schemaPayload) {
mainPackageName := genName("nginx-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageBeta := fmt.Sprintf("%s-beta", mainPackageName)
stableChannel := "stable"
betaChannel := "beta"
// Create manifests
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
{Name: betaChannel, CurrentCSVName: mainPackageBeta},
},
DefaultChannelName: stableChannel,
},
}
// Create new CSVs
mainStableCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{*tt.oldCRD}, nil, nil)
mainBetaCSV := newCSV(mainPackageBeta, ns.GetName(), mainPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{*tt.oldCRD}, nil, nil)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.oldCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.newCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
if tt.intermediateCRD != nil {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.intermediateCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}
}()
// Existing custom resource
existingCR := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "cluster.com/v1alpha1",
"kind": tt.oldCRD.Spec.Names.Kind,
"metadata": map[string]interface{}{
"namespace": ns.GetName(),
"name": "my-cr-1",
},
"spec": map[string]interface{}{
"scalar": 100,
},
},
}
// Create the catalog source
mainCatalogSourceName := genName("mock-ocs-main-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, mainCatalogSourceName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{*tt.oldCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainStableCSV, mainBetaCSV})
defer cleanupCatalogSource()
// Attempt to get the catalog source before creating install plan(s)
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-alpha-")
cleanupSubscription := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogSourceName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or failed before checking resource presence
completeOrFailedFunc := buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed)
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), completeOrFailedFunc)
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Ensure that the desired resources have been created
expectedSteps := map[registry.ResourceKey]struct{}{
{Name: tt.oldCRD.Name, Kind: "CustomResourceDefinition"}: {},
{Name: mainPackageStable, Kind: operatorsv1alpha1.ClusterServiceVersionKind}: {},
}
require.Equal(GinkgoT(), len(expectedSteps), len(fetchedInstallPlan.Status.Plan), "number of expected steps does not match installed")
for _, step := range fetchedInstallPlan.Status.Plan {
key := registry.ResourceKey{
Name: step.Resource.Name,
Kind: step.Resource.Kind,
}
_, ok := expectedSteps[key]
require.True(GinkgoT(), ok, "couldn't find %v in expected steps: %#v", key, expectedSteps)
// Remove the entry from the expected steps set (to ensure no duplicates in resolved plan)
delete(expectedSteps, key)
}
// Should have removed every matching step
require.Equal(GinkgoT(), 0, len(expectedSteps), "Actual resource steps do not match expected")
// Create initial CR
cleanupCR, err := createCR(c, existingCR, "cluster.com", "v1alpha1", ns.GetName(), tt.oldCRD.Spec.Names.Plural, "my-cr-1")
require.NoError(GinkgoT(), err)
defer cleanupCR()
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogSourceName, ns.GetName(), []apiextensions.CustomResourceDefinition{*tt.newCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainStableCSV, mainBetaCSV}, mainManifests)
// Attempt to get the catalog source before creating install plan(s)
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Update the subscription resource to point to the beta CSV
err = retry.RetryOnConflict(retry.DefaultBackoff, func() error {
subscription, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
subscription.Spec.Channel = betaChannel
subscription, err = crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).Update(context.Background(), subscription, metav1.UpdateOptions{})
return err
})
// Wait for subscription to have a new installplan
subscription, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName()))
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName = subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or Failed before checking resource presence
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(tt.expectedPhase))
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), tt.expectedPhase, fetchedInstallPlan.Status.Phase)
// Ensure correct in-cluster resource(s)
fetchedCSV, err := fetchCSV(crc, mainBetaCSV.GetName(), ns.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
GinkgoT().Logf("All expected resources resolved %s", fetchedCSV.Status.Phase)
}, tableEntries)
})
Describe("with deprecated version CRD", func() {
// generated outside of the test table so that the same naming can be used for both old and new CSVs
mainCRDPlural := genName("ins")
type schemaPayload struct {
name string
expectedPhase operatorsv1alpha1.InstallPlanPhase
oldCRD *apiextensions.CustomResourceDefinition
intermediateCRD *apiextensions.CustomResourceDefinition
newCRD *apiextensions.CustomResourceDefinition
}
// excluded: new CRD, same version, same schema - won't trigger a CRD update
tableEntries := []TableEntry{
Entry("upgrade CRD with deprecated version", schemaPayload{
name: "upgrade CRD with deprecated version",
expectedPhase: operatorsv1alpha1.InstallPlanPhaseComplete,
oldCRD: func() *apiextensions.CustomResourceDefinition {
oldCRD := newCRD(mainCRDPlural)
oldCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
}
return &oldCRD
}(),
intermediateCRD: func() *apiextensions.CustomResourceDefinition {
intermediateCRD := newCRD(mainCRDPlural)
intermediateCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha2",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1alpha1",
Served: false,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
}
return &intermediateCRD
}(),
newCRD: func() *apiextensions.CustomResourceDefinition {
newCRD := newCRD(mainCRDPlural)
newCRD.Spec.Versions = []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha2",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1beta1",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1alpha1",
Served: false,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
}
return &newCRD
}(),
}),
}
DescribeTable("Test", func(tt schemaPayload) {
mainPackageName := genName("nginx-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageBeta := fmt.Sprintf("%s-beta", mainPackageName)
mainPackageDelta := fmt.Sprintf("%s-delta", mainPackageName)
stableChannel := "stable"
// Create manifests
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create new CSVs
mainStableCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{*tt.oldCRD}, nil, nil)
mainBetaCSV := newCSV(mainPackageBeta, ns.GetName(), mainPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{*tt.intermediateCRD}, nil, nil)
mainDeltaCSV := newCSV(mainPackageDelta, ns.GetName(), mainPackageBeta, semver.MustParse("0.3.0"), []apiextensions.CustomResourceDefinition{*tt.newCRD}, nil, nil)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.oldCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.newCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
if tt.intermediateCRD != nil {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), tt.intermediateCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}
}()
// Defer crd clean up
defer func() {
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), tt.newCRD))).To(Succeed())
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), tt.oldCRD))).To(Succeed())
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), tt.intermediateCRD))).To(Succeed())
}()
// Create the catalog source
mainCatalogSourceName := genName("mock-ocs-main-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, mainCatalogSourceName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{*tt.oldCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainStableCSV})
defer cleanupCatalogSource()
// Attempt to get the catalog source before creating install plan(s)
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-")
// this subscription will be cleaned up below without the clean up function
createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogSourceName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or failed before checking resource presence
completeOrFailedFunc := buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed)
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), completeOrFailedFunc)
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Ensure CRD versions are accurate
expectedVersions := map[string]struct{}{
"v1alpha1": {},
}
validateCRDVersions(GinkgoT(), c, tt.oldCRD.GetName(), expectedVersions)
// Update the manifest
mainManifests = []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageBeta},
},
DefaultChannelName: stableChannel,
},
}
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogSourceName, ns.GetName(), []apiextensions.CustomResourceDefinition{*tt.intermediateCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainStableCSV, mainBetaCSV}, mainManifests)
// Attempt to get the catalog source before creating install plan(s)
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscription, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(installPlanName))
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName = subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or Failed before checking resource presence
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed))
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), tt.expectedPhase, fetchedInstallPlan.Status.Phase)
// Ensure correct in-cluster resource(s)
fetchedCSV, err := fetchCSV(crc, mainBetaCSV.GetName(), ns.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Ensure CRD versions are accurate
expectedVersions = map[string]struct{}{
"v1alpha1": {},
"v1alpha2": {},
}
validateCRDVersions(GinkgoT(), c, tt.oldCRD.GetName(), expectedVersions)
// Update the manifest
mainManifests = []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageDelta},
},
DefaultChannelName: stableChannel,
},
}
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogSourceName, ns.GetName(), []apiextensions.CustomResourceDefinition{*tt.newCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainStableCSV, mainBetaCSV, mainDeltaCSV}, mainManifests)
// Attempt to get the catalog source before creating install plan(s)
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscription, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(installPlanName))
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName = subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete or Failed before checking resource presence
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed))
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), tt.expectedPhase, fetchedInstallPlan.Status.Phase)
// Ensure correct in-cluster resource(s)
fetchedCSV, err = fetchCSV(crc, mainDeltaCSV.GetName(), ns.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Ensure CRD versions are accurate
expectedVersions = map[string]struct{}{
"v1alpha2": {},
"v1beta1": {},
"v1alpha1": {},
}
validateCRDVersions(GinkgoT(), c, tt.oldCRD.GetName(), expectedVersions)
GinkgoT().Logf("All expected resources resolved %s", fetchedCSV.Status.Phase)
}, tableEntries)
})
Describe("update catalog for subscription", func() {
// crdVersionKey uniquely identifies a version within a CRD.
type crdVersionKey struct {
name string
served bool
storage bool
}
It("AmplifyPermissions", func() {
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
// Build initial catalog
mainPackageName := genName("nginx-amplify-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
stableChannel := "stable"
crdPlural := genName("ins-amplify-")
crdName := crdPlural + ".cluster.com"
mainCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
},
},
}
// Generate permissions
clusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
},
},
}
// Create the catalog sources
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), permissions, clusterPermissions)
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, nil, &mainNamedStrategy)
mainCatalogName := genName("mock-ocs-amplify-")
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), mainCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-update-perms1")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.NotNil(GinkgoT(), subscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), mainCSV.GetName(), subscription.Status.CurrentCSV)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update CatalogSource with a new CSV with more permissions
updatedPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"local.cluster.com"},
Resources: []string{"locals"},
},
},
},
}
updatedClusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"two.cluster.com"},
Resources: []string{"twos"},
},
},
},
}
// Create the catalog sources
updatedNamedStrategy := newNginxInstallStrategy(genName("dep-"), updatedPermissions, updatedClusterPermissions)
updatedCSV := newCSV(mainPackageStable+"-next", ns.GetName(), mainCSV.GetName(), semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &updatedNamedStrategy)
updatedManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: updatedCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Update catalog with updated CSV with more permissions
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, ns.GetName(), []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV, updatedCSV}, updatedManifests)
_, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName()))
require.NoError(GinkgoT(), err)
updatedInstallPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedUpdatedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, updatedInstallPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedUpdatedInstallPlan.Status.Phase)
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), updatedCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// If the CSV is succeeded, we successfully rolled out the RBAC changes
})
It("AttenuatePermissions", func() {
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
// Build initial catalog
mainPackageName := genName("nginx-attenuate-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
stableChannel := "stable"
crdPlural := genName("ins-attenuate-")
crdName := crdPlural + ".cluster.com"
mainCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"local.cluster.com"},
Resources: []string{"locals"},
},
},
},
}
// Generate permissions
clusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"two.cluster.com"},
Resources: []string{"twos"},
},
},
},
}
// Create the catalog sources
mainNamedStrategy := newNginxInstallStrategy(genName("dep-"), permissions, clusterPermissions)
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, nil, &mainNamedStrategy)
mainCatalogName := genName("mock-ocs-main-update-perms1-")
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), mainCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-update-perms1")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.NotNil(GinkgoT(), subscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), mainCSV.GetName(), subscription.Status.CurrentCSV)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Update CatalogSource with a new CSV with more permissions
updatedPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"local.cluster.com"},
Resources: []string{"locals"},
},
},
},
}
updatedClusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"two.cluster.com"},
Resources: []string{"twos"},
},
},
},
}
oldSecrets, err := c.KubernetesInterface().CoreV1().Secrets(ns.GetName()).List(context.Background(), metav1.ListOptions{})
require.NoError(GinkgoT(), err, "error listing secrets")
// Create the catalog sources
updatedNamedStrategy := newNginxInstallStrategy(genName("dep-"), updatedPermissions, updatedClusterPermissions)
updatedCSV := newCSV(mainPackageStable+"-next", ns.GetName(), mainCSV.GetName(), semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &updatedNamedStrategy)
updatedManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: updatedCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Update catalog with updated CSV with more permissions
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, ns.GetName(), []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV, updatedCSV}, updatedManifests)
// Wait for subscription to update its status
_, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName()))
require.NoError(GinkgoT(), err)
updatedInstallPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedUpdatedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, updatedInstallPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedUpdatedInstallPlan.Status.Phase)
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), updatedCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
newSecrets, err := c.KubernetesInterface().CoreV1().Secrets(ns.GetName()).List(context.Background(), metav1.ListOptions{})
require.NoError(GinkgoT(), err, "error listing secrets")
// Assert that the number of secrets is not increased from updating service account as part of the install plan,
assert.EqualValues(GinkgoT(), len(oldSecrets.Items), len(newSecrets.Items))
// And that the secret list is indeed updated.
assert.Equal(GinkgoT(), oldSecrets.Items, newSecrets.Items)
// Wait for ServiceAccount to not have access anymore
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
res, err := c.KubernetesInterface().AuthorizationV1().SubjectAccessReviews().Create(context.Background(), &authorizationv1.SubjectAccessReview{
Spec: authorizationv1.SubjectAccessReviewSpec{
User: "system:serviceaccount:" + ns.GetName() + ":" + serviceAccountName,
ResourceAttributes: &authorizationv1.ResourceAttributes{
Group: "cluster.com",
Version: "v1alpha1",
Resource: crdPlural,
Verb: rbac.VerbAll,
},
},
}, metav1.CreateOptions{})
if err != nil {
return false, err
}
if res == nil {
return false, nil
}
GinkgoT().Log("checking serviceaccount for permission")
// should not be allowed
return !res.Status.Allowed, nil
})
})
It("StopOnCSVModifications", func() {
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
// Build initial catalog
mainPackageName := genName("nginx-amplify-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
stableChannel := "stable"
crdPlural := genName("ins-amplify-")
crdName := crdPlural + ".cluster.com"
mainCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
},
},
}
// Generate permissions
clusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
},
},
}
// Create the catalog sources
deploymentName := genName("dep-")
mainNamedStrategy := newNginxInstallStrategy(deploymentName, permissions, clusterPermissions)
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, nil, &mainNamedStrategy)
mainCatalogName := genName("mock-ocs-stomper-")
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), mainCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-stompy-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.NotNil(GinkgoT(), subscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), mainCSV.GetName(), subscription.Status.CurrentCSV)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Verify CSV is created
csv, err := awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
addedEnvVar := corev1.EnvVar{Name: "EXAMPLE", Value: "value"}
modifiedDetails := operatorsv1alpha1.StrategyDetailsDeployment{
DeploymentSpecs: []operatorsv1alpha1.StrategyDeploymentSpec{
{
Name: deploymentName,
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "nginx"},
},
Replicas: &singleInstance,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "nginx"},
},
Spec: corev1.PodSpec{Containers: []corev1.Container{
{
Name: genName("nginx"),
Image: *dummyImage,
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
ImagePullPolicy: corev1.PullIfNotPresent,
Env: []corev1.EnvVar{addedEnvVar},
},
}},
},
},
},
},
Permissions: permissions,
ClusterPermissions: clusterPermissions,
}
csv.Spec.InstallStrategy = operatorsv1alpha1.NamedInstallStrategy{
StrategyName: operatorsv1alpha1.InstallStrategyNameDeployment,
StrategySpec: modifiedDetails,
}
_, err = crc.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).Update(context.Background(), csv, metav1.UpdateOptions{})
require.NoError(GinkgoT(), err)
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), csv.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Should have the updated env var
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
dep, err := c.GetDeployment(ns.GetName(), deploymentName)
if err != nil {
return false, nil
}
if len(dep.Spec.Template.Spec.Containers[0].Env) == 0 {
return false, nil
}
for _, envVar := range dep.Spec.Template.Spec.Containers[0].Env {
if envVar == addedEnvVar {
return true, nil
}
}
return false, nil
})
require.NoError(GinkgoT(), err)
// Create the catalog sources
// Updated csv has the same deployment strategy as main
updatedCSV := newCSV(mainPackageStable+"-next", ns.GetName(), mainCSV.GetName(), semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, &mainNamedStrategy)
updatedManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: updatedCSV.GetName()},
},
DefaultChannelName: stableChannel,
},
}
// Update catalog with updated CSV with more permissions
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, ns.GetName(), []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV, updatedCSV}, updatedManifests)
_, err = fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName()))
require.NoError(GinkgoT(), err)
updatedInstallPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedUpdatedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, updatedInstallPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedUpdatedInstallPlan.Status.Phase)
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), updatedCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Should have created deployment and stomped on the env changes
updatedDep, err := c.GetDeployment(ns.GetName(), deploymentName)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), updatedDep)
// Should have the updated env var
for _, envVar := range updatedDep.Spec.Template.Spec.Containers[0].Env {
require.False(GinkgoT(), envVar == addedEnvVar)
}
})
It("UpdateSingleExistingCRDOwner", func() {
mainPackageName := genName("nginx-update-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
mainPackageBeta := fmt.Sprintf("%s-beta", mainPackageName)
stableChannel := "stable"
crdPlural := genName("ins-update-")
crdName := crdPlural + ".cluster.com"
mainCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
updatedCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1alpha2",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{mainCRD}, nil, nil)
betaCSV := newCSV(mainPackageBeta, ns.GetName(), mainPackageStable, semver.MustParse("0.2.0"), []apiextensions.CustomResourceDefinition{updatedCRD}, nil, nil)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), mainCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), updatedCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
mainCatalogName := genName("mock-ocs-main-update-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the catalog sources
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{mainCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-update-")
createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.NotNil(GinkgoT(), subscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), mainCSV.GetName(), subscription.Status.CurrentCSV)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Fetch installplan again to check for unnecessary control loops
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, fetchedInstallPlan.GetName(), ns.GetName(), func(fip *operatorsv1alpha1.InstallPlan) bool {
Expect(equality.Semantic.DeepEqual(fetchedInstallPlan, fip)).Should(BeTrue(), diff.ObjectDiff(fetchedInstallPlan, fip))
return true
})
require.NoError(GinkgoT(), err)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
mainManifests = []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageBeta},
},
DefaultChannelName: stableChannel,
},
}
updateInternalCatalog(GinkgoT(), c, crc, mainCatalogName, ns.GetName(), []apiextensions.CustomResourceDefinition{updatedCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV, betaCSV}, mainManifests)
// Wait for subscription to update
updatedSubscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanDifferentChecker(fetchedInstallPlan.GetName()))
require.NoError(GinkgoT(), err)
// Verify installplan created and installed
fetchedUpdatedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, updatedSubscription.Status.InstallPlanRef.Name, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.NotEqual(GinkgoT(), fetchedInstallPlan.GetName(), fetchedUpdatedInstallPlan.GetName())
// Wait for csv to update
_, err = awaitCSV(crc, ns.GetName(), betaCSV.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
// Get the CRD to see if it is updated
fetchedCRD, err := c.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), crdName, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), len(fetchedCRD.Spec.Versions), len(updatedCRD.Spec.Versions), "The CRD versions counts don't match")
fetchedCRDVersions := map[crdVersionKey]struct{}{}
for _, version := range fetchedCRD.Spec.Versions {
key := crdVersionKey{
name: version.Name,
served: version.Served,
storage: version.Storage,
}
fetchedCRDVersions[key] = struct{}{}
}
for _, version := range updatedCRD.Spec.Versions {
key := crdVersionKey{
name: version.Name,
served: version.Served,
storage: version.Storage,
}
_, ok := fetchedCRDVersions[key]
require.True(GinkgoT(), ok, "couldn't find %v in fetched CRD versions: %#v", key, fetchedCRDVersions)
}
})
It("UpdatePreexistingCRDFailed", func() {
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
mainPackageName := genName("nginx-update2-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
stableChannel := "stable"
crdPlural := genName("ins-update2-")
crdName := crdPlural + ".cluster.com"
mainCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
updatedCRD := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
{
Name: "v1alpha2",
Served: true,
Storage: false,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
expectedCRDVersions := map[crdVersionKey]struct{}{}
for _, version := range mainCRD.Spec.Versions {
key := crdVersionKey{
name: version.Name,
served: version.Served,
storage: version.Storage,
}
expectedCRDVersions[key] = struct{}{}
}
// Create the initial CSV
cleanupCRD, err := createCRD(c, mainCRD)
require.NoError(GinkgoT(), err)
defer cleanupCRD()
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, nil, nil)
mainCatalogName := genName("mock-ocs-main-update2-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the catalog sources
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, []apiextensions.CustomResourceDefinition{updatedCRD}, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-update2-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
require.NotNil(GinkgoT(), subscription.Status.InstallPlanRef)
require.Equal(GinkgoT(), mainCSV.GetName(), subscription.Status.CurrentCSV)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Fetch installplan again to check for unnecessary control loops
fetchedInstallPlan, err = fetchInstallPlan(GinkgoT(), crc, fetchedInstallPlan.GetName(), ns.GetName(), func(fip *operatorsv1alpha1.InstallPlan) bool {
Expect(equality.Semantic.DeepEqual(fetchedInstallPlan, fip)).Should(BeTrue(), diff.ObjectDiff(fetchedInstallPlan, fip))
return true
})
require.NoError(GinkgoT(), err)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvAnyChecker)
require.NoError(GinkgoT(), err)
// Get the CRD to see if it is updated
fetchedCRD, err := c.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), crdName, metav1.GetOptions{})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), len(fetchedCRD.Spec.Versions), len(mainCRD.Spec.Versions), "The CRD versions counts don't match")
fetchedCRDVersions := map[crdVersionKey]struct{}{}
for _, version := range fetchedCRD.Spec.Versions {
key := crdVersionKey{
name: version.Name,
served: version.Served,
storage: version.Storage,
}
fetchedCRDVersions[key] = struct{}{}
}
for _, version := range mainCRD.Spec.Versions {
key := crdVersionKey{
name: version.Name,
served: version.Served,
storage: version.Storage,
}
_, ok := fetchedCRDVersions[key]
require.True(GinkgoT(), ok, "couldn't find %v in fetched CRD versions: %#v", key, fetchedCRDVersions)
}
})
})
// This It spec creates an InstallPlan with a CSV containing a set of permissions to be resolved.
It("creation with permissions", func() {
packageName := genName("nginx")
stableChannel := "stable"
stableCSVName := packageName + "-stable"
// Create manifests
manifests := []registry.PackageManifest{
{
PackageName: packageName,
Channels: []registry.PackageChannel{
{
Name: stableChannel,
CurrentCSVName: stableCSVName,
},
},
DefaultChannelName: stableChannel,
},
}
// Create new CRDs
crdPlural := genName("ins")
crd := newCRD(crdPlural)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
// Generate permissions
serviceAccountName := genName("nginx-sa")
permissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
// Permissions must be different than ClusterPermissions defined below if OLM is going to lift role/rolebindings to cluster level.
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{corev1.GroupName},
Resources: []string{corev1.ResourceConfigMaps.String()},
},
},
},
}
// Generate permissions
clusterPermissions := []operatorsv1alpha1.StrategyDeploymentPermissions{
{
ServiceAccountName: serviceAccountName,
Rules: []rbacv1.PolicyRule{
{
Verbs: []string{rbac.VerbAll},
APIGroups: []string{"cluster.com"},
Resources: []string{crdPlural},
},
},
},
}
// Create a new NamedInstallStrategy
namedStrategy := newNginxInstallStrategy(genName("dep-"), permissions, clusterPermissions)
// Create new CSVs
stableCSV := newCSV(stableCSVName, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crd}, nil, &namedStrategy)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
// Create CatalogSource
mainCatalogSourceName := genName("nginx-catalog")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, mainCatalogSourceName, ns.GetName(), manifests, []apiextensions.CustomResourceDefinition{crd}, []operatorsv1alpha1.ClusterServiceVersion{stableCSV})
defer cleanupCatalogSource()
// Attempt to get CatalogSource
_, err := fetchCatalogSourceOnStatus(crc, mainCatalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogSourceName, packageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Attempt to get InstallPlan
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseFailed, operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
require.NotEqual(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseFailed, fetchedInstallPlan.Status.Phase, "InstallPlan failed")
// Expect correct RBAC resources to be resolved and created
expectedSteps := map[registry.ResourceKey]struct{}{
{Name: crd.Name, Kind: "CustomResourceDefinition"}: {},
{Name: stableCSVName, Kind: "ClusterServiceVersion"}: {},
{Name: serviceAccountName, Kind: "ServiceAccount"}: {},
{Name: stableCSVName, Kind: "Role"}: {},
{Name: stableCSVName, Kind: "RoleBinding"}: {},
{Name: stableCSVName, Kind: "ClusterRole"}: {},
{Name: stableCSVName, Kind: "ClusterRoleBinding"}: {},
}
require.Equal(GinkgoT(), len(expectedSteps), len(fetchedInstallPlan.Status.Plan), "number of expected steps does not match installed")
for _, step := range fetchedInstallPlan.Status.Plan {
key := registry.ResourceKey{
Name: step.Resource.Name,
Kind: step.Resource.Kind,
}
for expected := range expectedSteps {
if expected == key {
delete(expectedSteps, expected)
} else if strings.HasPrefix(key.Name, expected.Name) && key.Kind == expected.Kind {
delete(expectedSteps, expected)
} else {
GinkgoT().Logf("%v, %v: %v && %v", key, expected, strings.HasPrefix(key.Name, expected.Name), key.Kind == expected.Kind)
}
}
// This operator was installed into a global operator group, so the roles should have been lifted to clusterroles
if step.Resource.Kind == "Role" {
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
_, err = c.GetClusterRole(step.Resource.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.NoError(GinkgoT(), err)
}
if step.Resource.Kind == "RoleBinding" {
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
_, err = c.GetClusterRoleBinding(step.Resource.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
require.NoError(GinkgoT(), err)
}
}
// Should have removed every matching step
require.Equal(GinkgoT(), 0, len(expectedSteps), "Actual resource steps do not match expected: %#v", expectedSteps)
// the test from here out verifies created RBAC is removed after CSV deletion
createdClusterRoles, err := c.KubernetesInterface().RbacV1().ClusterRoles().List(context.Background(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%v", ownerutil.OwnerKey, stableCSVName)})
createdClusterRoleNames := map[string]struct{}{}
for _, role := range createdClusterRoles.Items {
createdClusterRoleNames[role.GetName()] = struct{}{}
GinkgoT().Logf("Monitoring cluster role %v", role.GetName())
}
createdClusterRoleBindings, err := c.KubernetesInterface().RbacV1().ClusterRoleBindings().List(context.Background(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%v", ownerutil.OwnerKey, stableCSVName)})
createdClusterRoleBindingNames := map[string]struct{}{}
for _, binding := range createdClusterRoleBindings.Items {
createdClusterRoleBindingNames[binding.GetName()] = struct{}{}
GinkgoT().Logf("Monitoring cluster role binding %v", binding.GetName())
}
crWatcher, err := c.KubernetesInterface().RbacV1().ClusterRoles().Watch(context.Background(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%v", ownerutil.OwnerKey, stableCSVName)})
require.NoError(GinkgoT(), err)
crbWatcher, err := c.KubernetesInterface().RbacV1().ClusterRoleBindings().Watch(context.Background(), metav1.ListOptions{LabelSelector: fmt.Sprintf("%v=%v", ownerutil.OwnerKey, stableCSVName)})
require.NoError(GinkgoT(), err)
done := make(chan struct{})
errExit := make(chan error)
go func() {
defer GinkgoRecover()
for {
select {
case evt, ok := <-crWatcher.ResultChan():
if !ok {
errExit <- errors.New("cr watch channel closed unexpectedly")
return
}
if evt.Type == watch.Deleted {
cr, ok := evt.Object.(*rbacv1.ClusterRole)
if !ok {
continue
}
delete(createdClusterRoleNames, cr.GetName())
if len(createdClusterRoleNames) == 0 && len(createdClusterRoleBindingNames) == 0 {
done <- struct{}{}
return
}
}
case evt, ok := <-crbWatcher.ResultChan():
if !ok {
errExit <- errors.New("crb watch channel closed unexpectedly")
return
}
if evt.Type == watch.Deleted {
crb, ok := evt.Object.(*rbacv1.ClusterRoleBinding)
if !ok {
continue
}
delete(createdClusterRoleBindingNames, crb.GetName())
if len(createdClusterRoleNames) == 0 && len(createdClusterRoleBindingNames) == 0 {
done <- struct{}{}
return
}
}
case <-time.After(pollDuration):
done <- struct{}{}
return
}
}
}()
GinkgoT().Logf("Deleting CSV '%v' in namespace %v", stableCSVName, ns.GetName())
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().ClusterServiceVersions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
select {
case <-done:
break
case err := <-errExit:
GinkgoT().Fatal(err)
}
require.Emptyf(GinkgoT(), createdClusterRoleNames, "unexpected cluster role remain: %v", createdClusterRoleNames)
require.Emptyf(GinkgoT(), createdClusterRoleBindingNames, "unexpected cluster role binding remain: %v", createdClusterRoleBindingNames)
Eventually(func() error {
_, err := c.GetServiceAccount(ns.GetName(), serviceAccountName)
if err == nil {
return fmt.Errorf("The %v/%v ServiceAccount should have been deleted", ns.GetName(), serviceAccountName)
}
if !apierrors.IsNotFound(err) {
return err
}
return nil
}, timeout, interval).Should(BeNil())
})
It("CRD validation", func() {
// Tests if CRD validation works with the "minimum" property after being
// pulled from a CatalogSource's operator-registry.
crdPlural := genName("ins")
crdName := crdPlural + ".cluster.com"
var min float64 = 2
var max float64 = 256
// Create CRD with offending property
crd := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: crdName,
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextensions.JSONSchemaProps{
"spec": {
Type: "object",
Description: "Spec of a test object.",
Properties: map[string]apiextensions.JSONSchemaProps{
"scalar": {
Type: "number",
Description: "Scalar value that should have a min and max.",
Minimum: &min,
Maximum: &max,
},
},
},
},
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: crdPlural,
Singular: crdPlural,
Kind: crdPlural,
ListKind: "list" + crdPlural,
},
Scope: apiextensions.NamespaceScoped,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
// Create CSV
packageName := genName("nginx-")
stableChannel := "stable"
packageNameStable := packageName + "-" + stableChannel
csv := newCSV(packageNameStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{crd}, nil, nil)
// Create PackageManifests
manifests := []registry.PackageManifest{
{
PackageName: packageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: packageNameStable},
},
DefaultChannelName: stableChannel,
},
}
// Create the CatalogSource
catalogSourceName := genName("mock-nginx-")
_, cleanupCatalogSource := createInternalCatalogSource(c, crc, catalogSourceName, ns.GetName(), manifests, []apiextensions.CustomResourceDefinition{crd}, []operatorsv1alpha1.ClusterServiceVersion{csv})
defer cleanupCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err := fetchCatalogSourceOnStatus(crc, catalogSourceName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-")
cleanupSubscription := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, catalogSourceName, packageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer cleanupSubscription()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlan(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete, operatorsv1alpha1.InstallPlanPhaseFailed))
require.NoError(GinkgoT(), err)
GinkgoT().Logf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase)
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
})
// This It spec verifies that, in cases where there are multiple options to fulfil a dependency
// across multiple catalogs, we only generate one installplan with one set of resolved resources.
//issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2633
It("[FLAKE] consistent generation", func() {
// Configure catalogs:
// - one catalog with a package that has a dependency
// - several duplicate catalog with a package that satisfies the dependency
// Install the package from the main catalog
// Should see only 1 installplan created
// Should see the main CSV installed
log := func(s string) {
GinkgoT().Logf("%s: %s", time.Now().Format("15:04:05.9999"), s)
}
ns := &corev1.Namespace{}
ns.SetName(genName("ns-"))
// Create a namespace an OperatorGroup
ns, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
deleteOpts := &metav1.DeleteOptions{}
defer func() {
require.NoError(GinkgoT(), c.KubernetesInterface().CoreV1().Namespaces().Delete(context.Background(), ns.GetName(), *deleteOpts))
}()
og := &operatorsv1.OperatorGroup{}
og.SetName("og")
_, err = crc.OperatorsV1().OperatorGroups(ns.GetName()).Create(context.Background(), og, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
mainPackageName := genName("nginx-")
dependentPackageName := genName("nginxdep-")
mainPackageStable := fmt.Sprintf("%s-stable", mainPackageName)
dependentPackageStable := fmt.Sprintf("%s-stable", dependentPackageName)
stableChannel := "stable"
dependentCRD := newCRD(genName("ins-"))
mainCSV := newCSV(mainPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), nil, []apiextensions.CustomResourceDefinition{dependentCRD}, nil)
dependentCSV := newCSV(dependentPackageStable, ns.GetName(), "", semver.MustParse("0.1.0"), []apiextensions.CustomResourceDefinition{dependentCRD}, nil, nil)
defer func() {
require.NoError(GinkgoT(), crc.OperatorsV1alpha1().Subscriptions(ns.GetName()).DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{}))
}()
dependentCatalogName := genName("mock-ocs-dependent-")
mainCatalogName := genName("mock-ocs-main-")
// Create separate manifests for each CatalogSource
mainManifests := []registry.PackageManifest{
{
PackageName: mainPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: mainPackageStable},
},
DefaultChannelName: stableChannel,
},
}
dependentManifests := []registry.PackageManifest{
{
PackageName: dependentPackageName,
Channels: []registry.PackageChannel{
{Name: stableChannel, CurrentCSVName: dependentPackageStable},
},
DefaultChannelName: stableChannel,
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), dependentCRD.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
// Create the dependent catalog source
_, cleanupDependentCatalogSource := createInternalCatalogSource(c, crc, dependentCatalogName, ns.GetName(), dependentManifests, []apiextensions.CustomResourceDefinition{dependentCRD}, []operatorsv1alpha1.ClusterServiceVersion{dependentCSV})
defer cleanupDependentCatalogSource()
// Attempt to get the catalog source before creating install plan
dependentCatalogSource, err := fetchCatalogSourceOnStatus(crc, dependentCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
// Create the alt dependent catalog sources
var wg sync.WaitGroup
for i := 0; i < 4; i++ { // Creating more increases the odds that the race condition will be triggered
wg.Add(1)
go func(i int) {
defer GinkgoRecover()
// Create a CatalogSource pointing to the grpc pod
addressSource := &operatorsv1alpha1.CatalogSource{
TypeMeta: metav1.TypeMeta{
Kind: operatorsv1alpha1.CatalogSourceKind,
APIVersion: operatorsv1alpha1.CatalogSourceCRDAPIVersion,
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
SourceType: operatorsv1alpha1.SourceTypeGrpc,
Address: dependentCatalogSource.Status.RegistryServiceStatus.Address(),
GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{
SecurityContextConfig: operatorsv1alpha1.Restricted,
},
},
}
addressSource.SetName(genName("alt-dep-"))
_, err := crc.OperatorsV1alpha1().CatalogSources(ns.GetName()).Create(context.Background(), addressSource, metav1.CreateOptions{})
require.NoError(GinkgoT(), err)
// Attempt to get the catalog source before creating install plan
_, err = fetchCatalogSourceOnStatus(crc, addressSource.GetName(), ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
wg.Done()
}(i)
}
wg.Wait()
// Create the main catalog source
_, cleanupMainCatalogSource := createInternalCatalogSource(c, crc, mainCatalogName, ns.GetName(), mainManifests, nil, []operatorsv1alpha1.ClusterServiceVersion{mainCSV})
defer cleanupMainCatalogSource()
// Attempt to get the catalog source before creating install plan
_, err = fetchCatalogSourceOnStatus(crc, mainCatalogName, ns.GetName(), catalogSourceRegistryPodSynced)
require.NoError(GinkgoT(), err)
subscriptionName := genName("sub-nginx-")
subscriptionCleanup := createSubscriptionForCatalog(crc, ns.GetName(), subscriptionName, mainCatalogName, mainPackageName, stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer subscriptionCleanup()
subscription, err := fetchSubscription(crc, ns.GetName(), subscriptionName, subscriptionHasInstallPlanChecker)
require.NoError(GinkgoT(), err)
require.NotNil(GinkgoT(), subscription)
installPlanName := subscription.Status.InstallPlanRef.Name
// Wait for InstallPlan to be status: Complete before checking resource presence
fetchedInstallPlan, err := fetchInstallPlanWithNamespace(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseComplete))
require.NoError(GinkgoT(), err)
log(fmt.Sprintf("Install plan %s fetched with status %s", fetchedInstallPlan.GetName(), fetchedInstallPlan.Status.Phase))
require.Equal(GinkgoT(), operatorsv1alpha1.InstallPlanPhaseComplete, fetchedInstallPlan.Status.Phase)
// Verify CSV is created
_, err = awaitCSV(crc, ns.GetName(), mainCSV.GetName(), csvSucceededChecker)
require.NoError(GinkgoT(), err)
// Make sure to clean up the installed CRD
defer func() {
require.NoError(GinkgoT(), c.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), dependentCRD.GetName(), *deleteOpts))
}()
// ensure there is only one installplan
ips, err := crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).List(context.Background(), metav1.ListOptions{})
require.NoError(GinkgoT(), err)
require.Equal(GinkgoT(), 1, len(ips.Items), "If this test fails it should be taken seriously and not treated as a flake. \n%v", ips.Items)
})
When("an InstallPlan is created with no valid OperatorGroup present", func() {
var (
installPlanName string
ns *corev1.Namespace
)
BeforeEach(func() {
ns = &corev1.Namespace{}
ns.SetName(genName("ns-"))
// Create a namespace
Eventually(func() error {
return ctx.Ctx().Client().Create(context.Background(), ns)
}, timeout, interval).Should(Succeed(), "could not create Namespace")
// Create InstallPlan
installPlanName = "ip"
ip := newInstallPlanWithDummySteps(installPlanName, ns.GetName(), operatorsv1alpha1.InstallPlanPhaseInstalling)
outIP, err := crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).Create(context.Background(), ip, metav1.CreateOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(outIP).NotTo(BeNil())
// The status gets ignored on create so we need to update it else the InstallPlan sync ignores
// InstallPlans without any steps or bundle lookups
outIP.Status = ip.Status
_, err = crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).UpdateStatus(context.Background(), outIP, metav1.UpdateOptions{})
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
err := crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).Delete(context.Background(), installPlanName, metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
err = c.KubernetesInterface().CoreV1().Namespaces().Delete(context.Background(), ns.GetName(), metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
})
// issue: https://github.com/operator-framework/operator-lifecycle-manager/issues/2636
It("[FLAKE] should clear up the condition in the InstallPlan status that contains an error message when a valid OperatorGroup is created", func() {
// first wait for a condition with a message exists
cond := operatorsv1alpha1.InstallPlanCondition{Type: operatorsv1alpha1.InstallPlanInstalled, Status: corev1.ConditionFalse, Reason: operatorsv1alpha1.InstallPlanReasonInstallCheckFailed,
Message: "no operator group found that is managing this namespace"}
Eventually(func() bool {
fetchedInstallPlan, err := fetchInstallPlanWithNamespace(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseInstalling))
if err != nil || fetchedInstallPlan == nil {
return false
}
if fetchedInstallPlan.Status.Phase != operatorsv1alpha1.InstallPlanPhaseInstalling {
return false
}
return hasCondition(fetchedInstallPlan, cond)
}, 5*time.Minute, interval).Should(BeTrue())
// Create an operatorgroup for the same namespace
og := &operatorsv1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: "og",
Namespace: ns.GetName(),
},
Spec: operatorsv1.OperatorGroupSpec{
TargetNamespaces: []string{ns.GetName()},
},
}
Eventually(func() error {
return ctx.Ctx().Client().Create(context.Background(), og)
}, timeout, interval).Should(Succeed(), "could not create OperatorGroup")
// Wait for the OperatorGroup to be synced
Eventually(
func() ([]string, error) {
err := ctx.Ctx().Client().Get(context.Background(), client.ObjectKeyFromObject(og), og)
ctx.Ctx().Logf("Waiting for OperatorGroup(%v) to be synced with status.namespaces: %v", og.Name, og.Status.Namespaces)
return og.Status.Namespaces, err
},
1*time.Minute,
interval,
).Should(ContainElement(ns.GetName()))
// check that the condition has been cleared up
Eventually(func() (bool, error) {
fetchedInstallPlan, err := fetchInstallPlanWithNamespace(GinkgoT(), crc, installPlanName, ns.GetName(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseInstalling))
if err != nil {
return false, err
}
if fetchedInstallPlan == nil {
return false, err
}
if hasCondition(fetchedInstallPlan, cond) {
return false, nil
}
return true, nil
}).Should(BeTrue())
})
})
It("compresses installplan step resource manifests to configmap references", func() {
// Test ensures that all steps for index-based catalogs are references to configmaps. This avoids the problem
// of installplans growing beyond the etcd size limit when manifests are written to the ip status.
ns, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: genName("ns-"),
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
og := &operatorsv1.OperatorGroup{}
og.SetName("og")
_, err = crc.OperatorsV1().OperatorGroups(ns.GetName()).Create(context.Background(), og, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
deleteOpts := &metav1.DeleteOptions{}
defer c.KubernetesInterface().CoreV1().Namespaces().Delete(context.Background(), ns.GetName(), *deleteOpts)
catsrc := &operatorsv1alpha1.CatalogSource{
ObjectMeta: metav1.ObjectMeta{
Name: genName("kiali-"),
Namespace: ns.GetName(),
Labels: map[string]string{"olm.catalogSource": "kaili-catalog"},
},
Spec: operatorsv1alpha1.CatalogSourceSpec{
Image: "quay.io/operator-framework/ci-index:latest",
SourceType: operatorsv1alpha1.SourceTypeGrpc,
GrpcPodConfig: &operatorsv1alpha1.GrpcPodConfig{
SecurityContextConfig: operatorsv1alpha1.Restricted,
},
},
}
catsrc, err = crc.OperatorsV1alpha1().CatalogSources(catsrc.GetNamespace()).Create(context.Background(), catsrc, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Wait for the CatalogSource to be ready
catsrc, err = fetchCatalogSourceOnStatus(crc, catsrc.GetName(), catsrc.GetNamespace(), catalogSourceRegistryPodSynced)
Expect(err).ToNot(HaveOccurred())
// Generate a Subscription
subName := genName("kiali-")
cleanUpSubscriptionFn := createSubscriptionForCatalog(crc, catsrc.GetNamespace(), subName, catsrc.GetName(), "kiali", stableChannel, "", operatorsv1alpha1.ApprovalAutomatic)
defer cleanUpSubscriptionFn()
sub, err := fetchSubscription(crc, catsrc.GetNamespace(), subName, subscriptionHasInstallPlanChecker)
Expect(err).ToNot(HaveOccurred())
// Wait for the expected InstallPlan's execution to either fail or succeed
ipName := sub.Status.InstallPlanRef.Name
ip, err := waitForInstallPlan(crc, ipName, sub.GetNamespace(), buildInstallPlanPhaseCheckFunc(operatorsv1alpha1.InstallPlanPhaseFailed, operatorsv1alpha1.InstallPlanPhaseComplete))
Expect(err).ToNot(HaveOccurred())
Expect(operatorsv1alpha1.InstallPlanPhaseComplete).To(Equal(ip.Status.Phase), "InstallPlan not complete")
// Ensure the InstallPlan contains the steps resolved from the bundle image
operatorName := "kiali-operator"
expectedSteps := map[registry.ResourceKey]struct{}{
{Name: operatorName, Kind: "ClusterServiceVersion"}: {},
{Name: "kialis.kiali.io", Kind: "CustomResourceDefinition"}: {},
{Name: "monitoringdashboards.monitoring.kiali.io", Kind: "CustomResourceDefinition"}: {},
{Name: operatorName, Kind: "ServiceAccount"}: {},
{Name: operatorName, Kind: "ClusterRole"}: {},
{Name: operatorName, Kind: "ClusterRoleBinding"}: {},
}
Expect(ip.Status.Plan).To(HaveLen(len(expectedSteps)), "number of expected steps does not match installed: %v", ip.Status.Plan)
for _, step := range ip.Status.Plan {
key := registry.ResourceKey{
Name: step.Resource.Name,
Kind: step.Resource.Kind,
}
for expected := range expectedSteps {
if strings.HasPrefix(key.Name, expected.Name) && key.Kind == expected.Kind {
delete(expectedSteps, expected)
}
}
}
Expect(expectedSteps).To(HaveLen(0), "Actual resource steps do not match expected: %#v", expectedSteps)
// Ensure that all the steps have a configmap based reference
for _, step := range ip.Status.Plan {
manifest := step.Resource.Manifest
var ref catalog.UnpackedBundleReference
err := json.Unmarshal([]byte(manifest), &ref)
Expect(err).ToNot(HaveOccurred())
Expect(ref.Kind).To(Equal("ConfigMap"))
}
})
It("limits installed resources if the scoped serviceaccount has no permissions", func() {
By("creating a scoped serviceaccount specified in the operatorgroup")
ns, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: genName("ns-"),
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer c.KubernetesInterface().CoreV1().Namespaces().Delete(context.Background(), ns.GetName(), metav1.DeleteOptions{})
// create SA
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: genName("sa-"),
Namespace: ns.GetName(),
},
}
_, err = c.KubernetesInterface().CoreV1().ServiceAccounts(ns.GetName()).Create(context.Background(), sa, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Create token secret for the serviceaccount
_, cleanupSE := newTokenSecret(c, ns.GetName(), sa.GetName())
defer cleanupSE()
// role has no explicit permissions
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: genName("role-"),
},
Rules: []rbacv1.PolicyRule{},
}
// bind role to SA
rb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: genName("rb-"),
},
RoleRef: rbacv1.RoleRef{
Name: role.GetName(),
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
APIGroup: "",
Namespace: sa.GetNamespace(),
},
},
}
_, err = c.KubernetesInterface().RbacV1().ClusterRoleBindings().Create(context.Background(), rb, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer c.KubernetesInterface().RbacV1().ClusterRoles().Delete(context.Background(), role.GetName(), metav1.DeleteOptions{})
// create operator group referencing the SA
og := &operatorsv1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("og-"),
Namespace: ns.GetName(),
},
Spec: operatorsv1.OperatorGroupSpec{
ServiceAccountName: sa.GetName(),
},
}
_, err = crc.OperatorsV1().OperatorGroups(ns.GetName()).Create(context.Background(), og, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Wait for the OperatorGroup to be synced and have a status.ServiceAccountRef
// before moving on. Otherwise the catalog operator treats it as an invalid OperatorGroup
// and the InstallPlan is resynced
Eventually(func() (*corev1.ObjectReference, error) {
outOG, err := crc.OperatorsV1().OperatorGroups(ns.GetName()).Get(context.Background(), og.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
ctx.Ctx().Logf("[DEBUG] Operator Group Status: %+v\n", outOG.Status)
return outOG.Status.ServiceAccountRef, nil
}).ShouldNot(BeNil())
crd := apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "ins" + ".cluster.com",
},
TypeMeta: metav1.TypeMeta{
Kind: "CustomResourceDefinition",
APIVersion: "v1",
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: "cluster.com",
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "ins",
Singular: "ins",
Kind: "ins",
ListKind: "ins" + "list",
},
Scope: apiextensionsv1.NamespaceScoped,
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
},
}
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
}()
scheme := runtime.NewScheme()
Expect(apiextensionsv1.AddToScheme(scheme)).To(Succeed())
var crdManifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(&crd, &crdManifest)).To(Succeed())
By("using the OLM client to create the CRD")
plan := &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: genName("ip-"),
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
AttenuatedServiceAccountRef: &corev1.ObjectReference{
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
Kind: "ServiceAccount",
},
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: crd.GetName(),
Version: "v1",
Kind: "CustomResourceDefinition",
Manifest: crdManifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), plan)).To(Succeed())
key := client.ObjectKeyFromObject(plan)
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return plan, ctx.Ctx().Client().Get(context.Background(), key, plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
// delete installplan, then create one with an additional resource that the SA does not have permissions to create
// expect installplan to fail
By("failing to install resources that are not explicitly allowed in the SA")
err = crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).Delete(context.Background(), plan.GetName(), metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
service := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: "test-service",
},
Spec: corev1.ServiceSpec{
Type: corev1.ServiceTypeClusterIP,
Ports: []corev1.ServicePort{
{
Port: 12345,
},
},
},
}
Expect(corev1.AddToScheme(scheme)).To(Succeed())
var manifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(service, &manifest)).To(Succeed())
newPlan := &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: genName("ip-"),
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), newPlan)).To(Succeed())
newPlan.Status = operatorsv1alpha1.InstallPlanStatus{
StartTime: &metav1.Time{Time: time.Unix(0, 0)}, // disable retries
AttenuatedServiceAccountRef: &corev1.ObjectReference{
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
Kind: "ServiceAccount",
},
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: service.Name,
Version: "v1",
Kind: "Service",
Manifest: manifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), newPlan)).To(Succeed())
newKey := client.ObjectKeyFromObject(newPlan)
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return newPlan, ctx.Ctx().Client().Get(context.Background(), newKey, newPlan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseFailed))
Expect(client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &crd))).To(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), ns))
}, timeout, interval).Should(Succeed(), "could not delete Namespace")
})
It("uses the correct client when installing resources from an installplan", func() {
By("creating a scoped serviceaccount specifified in the operatorgroup")
ns, err := c.KubernetesInterface().CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: genName("ns-"),
},
}, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer c.KubernetesInterface().CoreV1().Namespaces().Delete(context.Background(), ns.GetName(), metav1.DeleteOptions{})
// create SA
sa := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: genName("sa-"),
Namespace: ns.GetName(),
},
}
_, err = c.KubernetesInterface().CoreV1().ServiceAccounts(ns.GetName()).Create(context.Background(), sa, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Create token secret for the serviceaccount
_, cleanupSE := newTokenSecret(c, ns.GetName(), sa.GetName())
defer cleanupSE()
// see https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/scoped-operator-install.md
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: genName("role-"),
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"operators.coreos.com"},
Resources: []string{"subscriptions", "clusterserviceversions"},
Verbs: []string{"get", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"services", "serviceaccounts", "configmaps", "endpoints", "events", "persistentvolumeclaims", "pods"},
Verbs: []string{"create", "delete", "get", "list", "update", "patch", "watch"},
},
{
APIGroups: []string{"apps"},
Resources: []string{"deployments", "replicasets", "statefulsets"},
Verbs: []string{"list", "watch", "get", "create", "update", "patch", "delete"},
},
{
// ability to get and list CRDs, but not create CRDs
APIGroups: []string{"apiextensions.k8s.io"},
Resources: []string{"customresourcedefinitions"},
Verbs: []string{"get", "list", "watch"},
},
},
}
_, err = c.KubernetesInterface().RbacV1().ClusterRoles().Create(context.Background(), role, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// bind role to SA
rb := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: genName("rb-"),
},
RoleRef: rbacv1.RoleRef{
Name: role.GetName(),
Kind: "ClusterRole",
APIGroup: "rbac.authorization.k8s.io",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
APIGroup: "",
Namespace: sa.GetNamespace(),
},
},
}
_, err = c.KubernetesInterface().RbacV1().ClusterRoleBindings().Create(context.Background(), rb, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
defer c.KubernetesInterface().RbacV1().ClusterRoles().Delete(context.Background(), role.GetName(), metav1.DeleteOptions{})
// create operator group referencing the SA
og := &operatorsv1.OperatorGroup{
ObjectMeta: metav1.ObjectMeta{
Name: genName("og-"),
Namespace: ns.GetName(),
},
Spec: operatorsv1.OperatorGroupSpec{
ServiceAccountName: sa.GetName(),
},
}
_, err = crc.OperatorsV1().OperatorGroups(ns.GetName()).Create(context.Background(), og, metav1.CreateOptions{})
Expect(err).ToNot(HaveOccurred())
// Wait for the OperatorGroup to be synced and have a status.ServiceAccountRef
// before moving on. Otherwise the catalog operator treats it as an invalid OperatorGroup
// and the InstallPlan is resynced
Eventually(func() (*corev1.ObjectReference, error) {
outOG, err := crc.OperatorsV1().OperatorGroups(ns.GetName()).Get(context.Background(), og.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
ctx.Ctx().Logf("[DEBUG] Operator Group Status: %+v\n", outOG.Status)
return outOG.Status.ServiceAccountRef, nil
}).ShouldNot(BeNil())
By("using the OLM client to install CRDs from the installplan and the scoped client for other resources")
crd := apiextensionsv1.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: "ins" + ".cluster.com",
},
TypeMeta: metav1.TypeMeta{
Kind: "CustomResourceDefinition",
APIVersion: "v1",
},
Spec: apiextensionsv1.CustomResourceDefinitionSpec{
Group: "cluster.com",
Names: apiextensionsv1.CustomResourceDefinitionNames{
Plural: "ins",
Singular: "ins",
Kind: "ins",
ListKind: "ins" + "list",
},
Scope: apiextensionsv1.NamespaceScoped,
Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensionsv1.CustomResourceValidation{
OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
},
}
csv := newCSV("stable", ns.GetName(), "", semver.MustParse("0.1.0"), nil, nil, nil)
// Defer CRD clean up
defer func() {
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().KubeClient().ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Delete(context.Background(), crd.GetName(), metav1.DeleteOptions{}))
}).Should(Succeed())
Eventually(func() error {
return client.IgnoreNotFound(ctx.Ctx().Client().Delete(context.Background(), &csv))
}).Should(Succeed())
}()
scheme := runtime.NewScheme()
Expect(apiextensionsv1.AddToScheme(scheme)).To(Succeed())
Expect(operatorsv1alpha1.AddToScheme(scheme)).To(Succeed())
var crdManifest, csvManifest bytes.Buffer
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(&crd, &crdManifest)).To(Succeed())
Expect(k8sjson.NewSerializer(k8sjson.DefaultMetaFactory, scheme, scheme, false).Encode(&csv, &csvManifest)).To(Succeed())
plan := &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: genName("ip-"),
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{csv.GetName()},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), plan)).To(Succeed())
plan.Status = operatorsv1alpha1.InstallPlanStatus{
AttenuatedServiceAccountRef: &corev1.ObjectReference{
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
Kind: "ServiceAccount",
},
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: csv.GetName(),
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Manifest: csvManifest.String(),
},
},
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: crd.GetName(),
Version: "v1",
Kind: "CustomResourceDefinition",
Manifest: crdManifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), plan)).To(Succeed())
key := client.ObjectKeyFromObject(plan)
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return plan, ctx.Ctx().Client().Get(context.Background(), key, plan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
// delete installplan, and create one with just a CSV resource which should succeed
By("installing additional resources that are allowed in the SA")
err = crc.OperatorsV1alpha1().InstallPlans(ns.GetName()).Delete(context.Background(), plan.GetName(), metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
newPlan := &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Namespace: ns.GetName(),
Name: genName("ip-"),
},
Spec: operatorsv1alpha1.InstallPlanSpec{
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
ClusterServiceVersionNames: []string{csv.GetName()},
},
}
Expect(ctx.Ctx().Client().Create(context.Background(), newPlan)).To(Succeed())
newPlan.Status = operatorsv1alpha1.InstallPlanStatus{
AttenuatedServiceAccountRef: &corev1.ObjectReference{
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
Kind: "ServiceAccount",
},
Phase: operatorsv1alpha1.InstallPlanPhaseInstalling,
CatalogSources: []string{},
Plan: []*operatorsv1alpha1.Step{
{
Status: operatorsv1alpha1.StepStatusUnknown,
Resource: operatorsv1alpha1.StepResource{
Name: csv.GetName(),
Version: "v1alpha1",
Kind: "ClusterServiceVersion",
Manifest: csvManifest.String(),
},
},
},
}
Expect(ctx.Ctx().Client().Status().Update(context.Background(), newPlan)).To(Succeed())
newKey := client.ObjectKeyFromObject(newPlan)
Eventually(func() (*operatorsv1alpha1.InstallPlan, error) {
return newPlan, ctx.Ctx().Client().Get(context.Background(), newKey, newPlan)
}).Should(HavePhase(operatorsv1alpha1.InstallPlanPhaseComplete))
})
})
type checkInstallPlanFunc func(fip *operatorsv1alpha1.InstallPlan) bool
func validateCRDVersions(t GinkgoTInterface, c operatorclient.ClientInterface, name string, expectedVersions map[string]struct{}) {
// Retrieve CRD information
crd, err := c.ApiextensionsInterface().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), name, metav1.GetOptions{})
require.NoError(t, err)
require.Equal(t, len(expectedVersions), len(crd.Spec.Versions), "number of CRD versions don't not match installed")
for _, version := range crd.Spec.Versions {
_, ok := expectedVersions[version.Name]
require.True(t, ok, "couldn't find %v in expected versions: %#v", version.Name, expectedVersions)
// Remove the entry from the expected steps set (to ensure no duplicates in resolved plan)
delete(expectedVersions, version.Name)
}
// Should have removed every matching version
require.Equal(t, 0, len(expectedVersions), "Actual CRD versions do not match expected")
}
func buildInstallPlanPhaseCheckFunc(phases ...operatorsv1alpha1.InstallPlanPhase) checkInstallPlanFunc {
return func(fip *operatorsv1alpha1.InstallPlan) bool {
ctx.Ctx().Logf("installplan %v is in phase %v", fip.GetName(), fip.Status.Phase)
satisfiesAny := false
for _, phase := range phases {
satisfiesAny = satisfiesAny || fip.Status.Phase == phase
}
return satisfiesAny
}
}
func buildInstallPlanCleanupFunc(crc versioned.Interface, namespace string, installPlan *operatorsv1alpha1.InstallPlan) cleanupFunc {
return func() {
deleteOptions := &metav1.DeleteOptions{}
for _, step := range installPlan.Status.Plan {
if step.Resource.Kind == operatorsv1alpha1.ClusterServiceVersionKind {
if err := crc.OperatorsV1alpha1().ClusterServiceVersions(namespace).Delete(context.Background(), step.Resource.Name, *deleteOptions); err != nil {
fmt.Println(err)
}
}
}
if err := crc.OperatorsV1alpha1().InstallPlans(namespace).Delete(context.Background(), installPlan.GetName(), *deleteOptions); err != nil {
fmt.Println(err)
}
err := waitForDelete(func() error {
_, err := crc.OperatorsV1alpha1().InstallPlans(namespace).Get(context.Background(), installPlan.GetName(), metav1.GetOptions{})
return err
})
if err != nil {
fmt.Println(err)
}
}
}
func fetchInstallPlan(t GinkgoTInterface, c versioned.Interface, name string, namespace string, checkPhase checkInstallPlanFunc) (*operatorsv1alpha1.InstallPlan, error) {
return fetchInstallPlanWithNamespace(t, c, name, namespace, checkPhase)
}
func fetchInstallPlanWithNamespace(t GinkgoTInterface, c versioned.Interface, name string, namespace string, checkPhase checkInstallPlanFunc) (*operatorsv1alpha1.InstallPlan, error) {
var fetchedInstallPlan *operatorsv1alpha1.InstallPlan
var err error
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedInstallPlan, err = c.OperatorsV1alpha1().InstallPlans(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil || fetchedInstallPlan == nil {
return false, err
}
return checkPhase(fetchedInstallPlan), nil
})
return fetchedInstallPlan, err
}
// do not return an error if the installplan has not been created yet
func waitForInstallPlan(c versioned.Interface, name string, namespace string, checkPhase checkInstallPlanFunc) (*operatorsv1alpha1.InstallPlan, error) {
var fetchedInstallPlan *operatorsv1alpha1.InstallPlan
var err error
err = wait.Poll(pollInterval, pollDuration, func() (bool, error) {
fetchedInstallPlan, err = c.OperatorsV1alpha1().InstallPlans(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
return checkPhase(fetchedInstallPlan), nil
})
return fetchedInstallPlan, err
}
func newNginxInstallStrategy(name string, permissions []operatorsv1alpha1.StrategyDeploymentPermissions, clusterPermissions []operatorsv1alpha1.StrategyDeploymentPermissions) operatorsv1alpha1.NamedInstallStrategy {
// Create an nginx details deployment
details := operatorsv1alpha1.StrategyDetailsDeployment{
DeploymentSpecs: []operatorsv1alpha1.StrategyDeploymentSpec{
{
Name: name,
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "nginx"},
},
Replicas: &singleInstance,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "nginx"},
},
Spec: corev1.PodSpec{Containers: []corev1.Container{
{
Name: genName("nginx"),
Image: *dummyImage,
Ports: []corev1.ContainerPort{{ContainerPort: 80}},
ImagePullPolicy: corev1.PullIfNotPresent,
},
}},
},
},
},
},
Permissions: permissions,
ClusterPermissions: clusterPermissions,
}
namedStrategy := operatorsv1alpha1.NamedInstallStrategy{
StrategyName: operatorsv1alpha1.InstallStrategyNameDeployment,
StrategySpec: details,
}
return namedStrategy
}
func newCRD(plural string) apiextensions.CustomResourceDefinition {
crd := apiextensions.CustomResourceDefinition{
ObjectMeta: metav1.ObjectMeta{
Name: plural + ".cluster.com",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.com",
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
Schema: &apiextensions.CustomResourceValidation{
OpenAPIV3Schema: &apiextensions.JSONSchemaProps{
Type: "object",
Description: "my crd schema",
},
},
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: plural,
Singular: plural,
Kind: plural,
ListKind: plural + "list",
},
Scope: apiextensions.NamespaceScoped,
},
}
return crd
}
func newCSV(name, namespace, replaces string, version semver.Version, owned []apiextensions.CustomResourceDefinition, required []apiextensions.CustomResourceDefinition, namedStrategy *operatorsv1alpha1.NamedInstallStrategy) operatorsv1alpha1.ClusterServiceVersion {
csvType = metav1.TypeMeta{
Kind: operatorsv1alpha1.ClusterServiceVersionKind,
APIVersion: operatorsv1alpha1.SchemeGroupVersion.String(),
}
// set a simple default strategy if none given
var strategy operatorsv1alpha1.NamedInstallStrategy
if namedStrategy == nil {
strategy = newNginxInstallStrategy(genName("dep"), nil, nil)
} else {
strategy = *namedStrategy
}
csv := operatorsv1alpha1.ClusterServiceVersion{
TypeMeta: csvType,
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: operatorsv1alpha1.ClusterServiceVersionSpec{
Replaces: replaces,
Version: opver.OperatorVersion{Version: version},
MinKubeVersion: "0.0.0",
InstallModes: []operatorsv1alpha1.InstallMode{
{
Type: operatorsv1alpha1.InstallModeTypeOwnNamespace,
Supported: true,
},
{
Type: operatorsv1alpha1.InstallModeTypeSingleNamespace,
Supported: true,
},
{
Type: operatorsv1alpha1.InstallModeTypeMultiNamespace,
Supported: true,
},
{
Type: operatorsv1alpha1.InstallModeTypeAllNamespaces,
Supported: true,
},
},
InstallStrategy: strategy,
CustomResourceDefinitions: operatorsv1alpha1.CustomResourceDefinitions{
Owned: nil,
Required: nil,
},
},
}
// Populate owned and required
for _, crd := range owned {
crdVersion := "v1alpha1"
for _, v := range crd.Spec.Versions {
if v.Served && v.Storage {
crdVersion = v.Name
break
}
}
desc := operatorsv1alpha1.CRDDescription{
Name: crd.GetName(),
Version: crdVersion,
Kind: crd.Spec.Names.Plural,
DisplayName: crd.GetName(),
Description: crd.GetName(),
}
csv.Spec.CustomResourceDefinitions.Owned = append(csv.Spec.CustomResourceDefinitions.Owned, desc)
}
for _, crd := range required {
crdVersion := "v1alpha1"
for _, v := range crd.Spec.Versions {
if v.Served && v.Storage {
crdVersion = v.Name
break
}
}
desc := operatorsv1alpha1.CRDDescription{
Name: crd.GetName(),
Version: crdVersion,
Kind: crd.Spec.Names.Plural,
DisplayName: crd.GetName(),
Description: crd.GetName(),
}
csv.Spec.CustomResourceDefinitions.Required = append(csv.Spec.CustomResourceDefinitions.Required, desc)
}
return csv
}
func newInstallPlanWithDummySteps(name, namespace string, phase operatorsv1alpha1.InstallPlanPhase) *operatorsv1alpha1.InstallPlan {
return &operatorsv1alpha1.InstallPlan{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: operatorsv1alpha1.InstallPlanSpec{
ClusterServiceVersionNames: []string{"foobar"},
Approval: operatorsv1alpha1.ApprovalAutomatic,
Approved: true,
},
Status: operatorsv1alpha1.InstallPlanStatus{
CatalogSources: []string{"catalog"},
Phase: phase,
Plan: []*operatorsv1alpha1.Step{
{
Resource: operatorsv1alpha1.StepResource{
CatalogSource: "catalog",
CatalogSourceNamespace: namespace,
Group: "",
Version: "v1",
Kind: "Foo",
Name: "bar",
},
Status: operatorsv1alpha1.StepStatusUnknown,
},
},
},
}
}
func hasCondition(ip *operatorsv1alpha1.InstallPlan, expectedCondition operatorsv1alpha1.InstallPlanCondition) bool {
for _, cond := range ip.Status.Conditions {
if cond.Type == expectedCondition.Type && cond.Message == expectedCondition.Message && cond.Status == expectedCondition.Status {
return true
}
}
return false
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package importintotest
import (
"context"
"fmt"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/go-units"
"github.com/fsouza/fake-gcs-server/fakestorage"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/br/pkg/utils"
"github.com/pingcap/tidb/disttask/framework/proto"
"github.com/pingcap/tidb/disttask/framework/scheduler"
"github.com/pingcap/tidb/disttask/framework/storage"
"github.com/pingcap/tidb/disttask/importinto"
"github.com/pingcap/tidb/executor"
"github.com/pingcap/tidb/executor/importer"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/planner/core"
"github.com/pingcap/tidb/session"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/util/dbterror/exeerrors"
)
func (s *mockGCSSuite) compareJobInfoWithoutTime(jobInfo *importer.JobInfo, row []interface{}) {
s.Equal(strconv.Itoa(int(jobInfo.ID)), row[0])
urlExpected, err := url.Parse(jobInfo.Parameters.FileLocation)
s.NoError(err)
urlGot, err := url.Parse(fmt.Sprintf("%v", row[1]))
s.NoError(err)
// order of query parameters might change
s.Equal(urlExpected.Query(), urlGot.Query())
urlExpected.RawQuery, urlGot.RawQuery = "", ""
s.Equal(urlExpected.String(), urlGot.String())
s.Equal(utils.EncloseDBAndTable(jobInfo.TableSchema, jobInfo.TableName), row[2])
s.Equal(strconv.Itoa(int(jobInfo.TableID)), row[3])
s.Equal(jobInfo.Step, row[4])
s.Equal(jobInfo.Status, row[5])
s.Equal(units.HumanSize(float64(jobInfo.SourceFileSize)), row[6])
if jobInfo.Summary == nil {
s.Equal("<nil>", row[7].(string))
} else {
s.Equal(strconv.Itoa(int(jobInfo.Summary.ImportedRows)), row[7])
}
s.Regexp(jobInfo.ErrorMessage, row[8])
s.Equal(jobInfo.CreatedBy, row[12])
}
func (s *mockGCSSuite) TestShowJob() {
s.tk.MustExec("delete from mysql.tidb_import_jobs")
s.prepareAndUseDB("test_show_job")
s.tk.MustExec("CREATE TABLE t1 (i INT PRIMARY KEY);")
s.tk.MustExec("CREATE TABLE t2 (i INT PRIMARY KEY);")
s.tk.MustExec("CREATE TABLE t3 (i INT PRIMARY KEY);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test-show-job", Name: "t.csv"},
Content: []byte("1\n2"),
})
s.T().Cleanup(func() {
_ = s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil)
})
// create 2 user which don't have system table privileges
s.tk.MustExec(`DROP USER IF EXISTS 'test_show_job1'@'localhost';`)
s.tk.MustExec(`CREATE USER 'test_show_job1'@'localhost';`)
s.tk.MustExec(`GRANT SELECT,UPDATE,INSERT,DELETE,ALTER on test_show_job.* to 'test_show_job1'@'localhost'`)
s.tk.MustExec(`DROP USER IF EXISTS 'test_show_job2'@'localhost';`)
s.tk.MustExec(`CREATE USER 'test_show_job2'@'localhost';`)
s.tk.MustExec(`GRANT SELECT,UPDATE,INSERT,DELETE,ALTER on test_show_job.* to 'test_show_job2'@'localhost'`)
do, err := session.GetDomain(s.store)
s.NoError(err)
tableID1 := do.MustGetTableID(s.T(), "test_show_job", "t1")
tableID2 := do.MustGetTableID(s.T(), "test_show_job", "t2")
tableID3 := do.MustGetTableID(s.T(), "test_show_job", "t3")
// show non-exists job
err = s.tk.QueryToErr("show import job 9999999999")
s.ErrorIs(err, exeerrors.ErrLoadDataJobNotFound)
// test show job by id using test_show_job1
s.enableFailpoint("github.com/pingcap/tidb/executor/importer/setLastImportJobID", `return(true)`)
s.enableFailpoint("github.com/pingcap/tidb/disttask/framework/storage/testSetLastTaskID", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/parser/ast/forceRedactURL", "return(true)")
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_show_job1", Hostname: "localhost"}, nil, nil, nil))
result1 := s.tk.MustQuery(fmt.Sprintf(`import into t1 FROM 'gs://test-show-job/t.csv?access-key=aaaaaa&secret-access-key=bbbbbb&endpoint=%s'`,
gcsEndpoint)).Rows()
s.Len(result1, 1)
s.tk.MustQuery("select * from t1").Check(testkit.Rows("1", "2"))
rows := s.tk.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(rows, 1)
s.Equal(result1, rows)
jobInfo := &importer.JobInfo{
ID: importer.TestLastImportJobID.Load(),
TableSchema: "test_show_job",
TableName: "t1",
TableID: tableID1,
CreatedBy: "test_show_job1@localhost",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test-show-job/t.csv?access-key=xxxxxx&secret-access-key=xxxxxx&endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "finished",
Step: "",
Summary: &importer.JobSummary{
ImportedRows: 2,
},
ErrorMessage: "",
}
s.compareJobInfoWithoutTime(jobInfo, rows[0])
// test show job by id using test_show_job2
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_show_job2", Hostname: "localhost"}, nil, nil, nil))
result2 := s.tk.MustQuery(fmt.Sprintf(`import into t2 FROM 'gs://test-show-job/t.csv?endpoint=%s'`, gcsEndpoint)).Rows()
s.tk.MustQuery("select * from t2").Check(testkit.Rows("1", "2"))
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(rows, 1)
s.Equal(result2, rows)
jobInfo.ID = importer.TestLastImportJobID.Load()
jobInfo.TableName = "t2"
jobInfo.TableID = tableID2
jobInfo.CreatedBy = "test_show_job2@localhost"
jobInfo.Parameters.FileLocation = fmt.Sprintf(`gs://test-show-job/t.csv?endpoint=%s`, gcsEndpoint)
s.compareJobInfoWithoutTime(jobInfo, rows[0])
rows = s.tk.MustQuery("show import jobs").Rows()
s.Len(rows, 1)
s.Equal(result2, rows)
// show import jobs with root
checkJobsMatch := func(rows [][]interface{}) {
s.GreaterOrEqual(len(rows), 2) // other cases may create import jobs
var matched int
for _, r := range rows {
if r[0] == result1[0][0] {
s.Equal(result1[0], r)
matched++
}
if r[0] == result2[0][0] {
s.Equal(result2[0], r)
matched++
}
}
s.Equal(2, matched)
}
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
rows = s.tk.MustQuery("show import jobs").Rows()
checkJobsMatch(rows)
// show import job by id with root
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(rows, 1)
s.Equal(result2, rows)
jobInfo.ID = importer.TestLastImportJobID.Load()
jobInfo.TableName = "t2"
jobInfo.TableID = tableID2
jobInfo.CreatedBy = "test_show_job2@localhost"
jobInfo.Parameters.FileLocation = fmt.Sprintf(`gs://test-show-job/t.csv?endpoint=%s`, gcsEndpoint)
s.compareJobInfoWithoutTime(jobInfo, rows[0])
// grant SUPER to test_show_job2, now it can see all jobs
s.tk.MustExec(`GRANT SUPER on *.* to 'test_show_job2'@'localhost'`)
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_show_job2", Hostname: "localhost"}, nil, nil, nil))
rows = s.tk.MustQuery("show import jobs").Rows()
checkJobsMatch(rows)
// show running jobs with 2 subtasks
s.enableFailpoint("github.com/pingcap/tidb/disttask/framework/scheduler/syncAfterSubtaskFinish", `return(true)`)
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test-show-job", Name: "t2.csv"},
Content: []byte("3\n4"),
})
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
// wait first subtask finish
<-scheduler.TestSyncChan
jobInfo = &importer.JobInfo{
ID: importer.TestLastImportJobID.Load(),
TableSchema: "test_show_job",
TableName: "t3",
TableID: tableID3,
CreatedBy: "test_show_job2@localhost",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test-show-job/t*.csv?access-key=xxxxxx&secret-access-key=xxxxxx&endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 6,
Status: "running",
Step: "importing",
Summary: &importer.JobSummary{
ImportedRows: 2,
},
ErrorMessage: "",
}
tk2 := testkit.NewTestKit(s.T(), s.store)
rows = tk2.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(rows, 1)
s.compareJobInfoWithoutTime(jobInfo, rows[0])
// show processlist, should be redacted too
procRows := tk2.MustQuery("show full processlist").Rows()
var got bool
for _, r := range procRows {
user := r[1].(string)
sql := r[7].(string)
if user == "test_show_job2" && strings.Contains(sql, "IMPORT INTO") {
s.Contains(sql, "access-key=xxxxxx")
s.Contains(sql, "secret-access-key=xxxxxx")
s.NotContains(sql, "aaaaaa")
s.NotContains(sql, "bbbbbb")
got = true
}
}
s.True(got)
// resume the scheduler
scheduler.TestSyncChan <- struct{}{}
// wait second subtask finish
<-scheduler.TestSyncChan
rows = tk2.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(rows, 1)
jobInfo.Summary.ImportedRows = 4
s.compareJobInfoWithoutTime(jobInfo, rows[0])
// resume the scheduler, need disable failpoint first, otherwise the post-process subtask will be blocked
s.NoError(failpoint.Disable("github.com/pingcap/tidb/disttask/framework/scheduler/syncAfterSubtaskFinish"))
scheduler.TestSyncChan <- struct{}{}
}()
s.tk.MustQuery(fmt.Sprintf(`import into t3 FROM 'gs://test-show-job/t*.csv?access-key=aaaaaa&secret-access-key=bbbbbb&endpoint=%s' with thread=1, __max_engine_size='1'`, gcsEndpoint))
wg.Wait()
s.tk.MustQuery("select * from t3").Sort().Check(testkit.Rows("1", "2", "3", "4"))
}
func (s *mockGCSSuite) TestShowDetachedJob() {
s.prepareAndUseDB("show_detached_job")
s.tk.MustExec("CREATE TABLE t1 (i INT PRIMARY KEY);")
s.tk.MustExec("CREATE TABLE t2 (i INT PRIMARY KEY);")
s.tk.MustExec("CREATE TABLE t3 (i INT PRIMARY KEY);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test-show-detached-job", Name: "t.csv"},
Content: []byte("1\n2"),
})
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test-show-detached-job", Name: "t2.csv"},
Content: []byte("1\n1"),
})
do, err := session.GetDomain(s.store)
s.NoError(err)
tableID1 := do.MustGetTableID(s.T(), "show_detached_job", "t1")
tableID2 := do.MustGetTableID(s.T(), "show_detached_job", "t2")
tableID3 := do.MustGetTableID(s.T(), "show_detached_job", "t3")
jobInfo := &importer.JobInfo{
TableSchema: "show_detached_job",
TableName: "t1",
TableID: tableID1,
CreatedBy: "root@%",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test-show-detached-job/t.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "pending",
Step: "",
}
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
result1 := s.tk.MustQuery(fmt.Sprintf(`import into t1 FROM 'gs://test-show-detached-job/t.csv?endpoint=%s' with detached`,
gcsEndpoint)).Rows()
s.Len(result1, 1)
jobID1, err := strconv.Atoi(result1[0][0].(string))
s.NoError(err)
jobInfo.ID = int64(jobID1)
s.compareJobInfoWithoutTime(jobInfo, result1[0])
s.Require().Eventually(func() bool {
rows := s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID1)).Rows()
return rows[0][5] == "finished"
}, 20*time.Second, 500*time.Millisecond)
rows := s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID1)).Rows()
s.Len(rows, 1)
jobInfo.Status = "finished"
jobInfo.Summary = &importer.JobSummary{
ImportedRows: 2,
}
s.compareJobInfoWithoutTime(jobInfo, rows[0])
s.tk.MustQuery("select * from t1").Check(testkit.Rows("1", "2"))
// job fail with checksum mismatch
result2 := s.tk.MustQuery(fmt.Sprintf(`import into t2 FROM 'gs://test-show-detached-job/t2.csv?endpoint=%s' with detached`,
gcsEndpoint)).Rows()
s.Len(result2, 1)
jobID2, err := strconv.Atoi(result2[0][0].(string))
s.NoError(err)
jobInfo = &importer.JobInfo{
ID: int64(jobID2),
TableSchema: "show_detached_job",
TableName: "t2",
TableID: tableID2,
CreatedBy: "root@%",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test-show-detached-job/t2.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "pending",
Step: "",
}
s.compareJobInfoWithoutTime(jobInfo, result2[0])
s.Require().Eventually(func() bool {
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID2)).Rows()
return rows[0][5] == "failed"
}, 10*time.Second, 500*time.Millisecond)
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID2)).Rows()
s.Len(rows, 1)
jobInfo.Status = "failed"
jobInfo.Step = importer.JobStepValidating
jobInfo.ErrorMessage = `\[Lighting:Restore:ErrChecksumMismatch]checksum mismatched remote vs local.*`
s.compareJobInfoWithoutTime(jobInfo, rows[0])
// subtask fail with error
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/errorWhenSortChunk", "return(true)")
result3 := s.tk.MustQuery(fmt.Sprintf(`import into t3 FROM 'gs://test-show-detached-job/t.csv?endpoint=%s' with detached`,
gcsEndpoint)).Rows()
s.Len(result3, 1)
jobID3, err := strconv.Atoi(result3[0][0].(string))
s.NoError(err)
jobInfo = &importer.JobInfo{
ID: int64(jobID3),
TableSchema: "show_detached_job",
TableName: "t3",
TableID: tableID3,
CreatedBy: "root@%",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test-show-detached-job/t.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "pending",
Step: "",
}
s.compareJobInfoWithoutTime(jobInfo, result3[0])
s.Require().Eventually(func() bool {
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID3)).Rows()
return rows[0][5] == "failed"
}, 10*time.Second, 500*time.Millisecond)
rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID3)).Rows()
s.Len(rows, 1)
jobInfo.Status = "failed"
jobInfo.Step = importer.JobStepImporting
jobInfo.ErrorMessage = `occur an error when sort chunk.*`
s.compareJobInfoWithoutTime(jobInfo, rows[0])
}
func (s *mockGCSSuite) TestCancelJob() {
s.prepareAndUseDB("test_cancel_job")
s.tk.MustExec("CREATE TABLE t1 (i INT PRIMARY KEY);")
s.tk.MustExec("CREATE TABLE t2 (i INT PRIMARY KEY);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test_cancel_job", Name: "t.csv"},
Content: []byte("1\n2"),
})
s.T().Cleanup(func() {
_ = s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil)
})
s.tk.MustExec(`DROP USER IF EXISTS 'test_cancel_job1'@'localhost';`)
s.tk.MustExec(`CREATE USER 'test_cancel_job1'@'localhost';`)
s.tk.MustExec(`GRANT SELECT,UPDATE,INSERT,DELETE,ALTER on test_cancel_job.* to 'test_cancel_job1'@'localhost'`)
s.tk.MustExec(`DROP USER IF EXISTS 'test_cancel_job2'@'localhost';`)
s.tk.MustExec(`CREATE USER 'test_cancel_job2'@'localhost';`)
s.tk.MustExec(`GRANT SELECT,UPDATE,INSERT,DELETE,ALTER on test_cancel_job.* to 'test_cancel_job2'@'localhost'`)
do, err := session.GetDomain(s.store)
s.NoError(err)
tableID1 := do.MustGetTableID(s.T(), "test_cancel_job", "t1")
tableID2 := do.MustGetTableID(s.T(), "test_cancel_job", "t2")
// cancel non-exists job
err = s.tk.ExecToErr("cancel import job 9999999999")
s.ErrorIs(err, exeerrors.ErrLoadDataJobNotFound)
getTask := func(jobID int64) *proto.Task {
globalTaskManager, err := storage.GetTaskManager()
s.NoError(err)
taskKey := importinto.TaskKey(jobID)
globalTask, err := globalTaskManager.GetGlobalTaskByKey(taskKey)
s.NoError(err)
return globalTask
}
// cancel a running job created by self
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/waitBeforeSortChunk", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/syncAfterJobStarted", "return(true)")
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_cancel_job1", Hostname: "localhost"}, nil, nil, nil))
result1 := s.tk.MustQuery(fmt.Sprintf(`import into t1 FROM 'gs://test_cancel_job/t.csv?endpoint=%s' with detached`,
gcsEndpoint)).Rows()
s.Len(result1, 1)
jobID1, err := strconv.Atoi(result1[0][0].(string))
s.NoError(err)
// wait job started
<-importinto.TestSyncChan
// dist framework has bug, the cancelled status might be overridden by running status,
// so we wait it turn running before cancel, see https://github.com/pingcap/tidb/issues/44443
time.Sleep(3 * time.Second)
s.tk.MustExec(fmt.Sprintf("cancel import job %d", jobID1))
rows := s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID1)).Rows()
s.Len(rows, 1)
jobInfo := &importer.JobInfo{
ID: int64(jobID1),
TableSchema: "test_cancel_job",
TableName: "t1",
TableID: tableID1,
CreatedBy: "test_cancel_job1@localhost",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test_cancel_job/t.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "cancelled",
Step: importer.JobStepImporting,
ErrorMessage: "cancelled by user",
}
s.compareJobInfoWithoutTime(jobInfo, rows[0])
s.Require().Eventually(func() bool {
task := getTask(int64(jobID1))
return task.State == proto.TaskStateReverted
}, 10*time.Second, 500*time.Millisecond)
// cancel again, should fail
s.ErrorIs(s.tk.ExecToErr(fmt.Sprintf("cancel import job %d", jobID1)), exeerrors.ErrLoadDataInvalidOperation)
// cancel a job created by test_cancel_job1 using test_cancel_job2, should fail
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_cancel_job2", Hostname: "localhost"}, nil, nil, nil))
s.ErrorIs(s.tk.ExecToErr(fmt.Sprintf("cancel import job %d", jobID1)), core.ErrSpecificAccessDenied)
// cancel by root, should pass privilege check
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
s.ErrorIs(s.tk.ExecToErr(fmt.Sprintf("cancel import job %d", jobID1)), exeerrors.ErrLoadDataInvalidOperation)
// cancel job in post-process phase, using test_cancel_job2
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "test_cancel_job2", Hostname: "localhost"}, nil, nil, nil))
s.NoError(failpoint.Disable("github.com/pingcap/tidb/disttask/importinto/waitBeforeSortChunk"))
s.NoError(failpoint.Disable("github.com/pingcap/tidb/disttask/importinto/syncAfterJobStarted"))
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/syncBeforePostProcess", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/waitCtxDone", "return(true)")
result2 := s.tk.MustQuery(fmt.Sprintf(`import into t2 FROM 'gs://test_cancel_job/t.csv?endpoint=%s' with detached`,
gcsEndpoint)).Rows()
s.Len(result2, 1)
jobID2, err := strconv.Atoi(result2[0][0].(string))
s.NoError(err)
// wait job reach post-process phase
<-importinto.TestSyncChan
s.tk.MustExec(fmt.Sprintf("cancel import job %d", jobID2))
// resume the job
importinto.TestSyncChan <- struct{}{}
rows2 := s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID2)).Rows()
s.Len(rows2, 1)
jobInfo = &importer.JobInfo{
ID: int64(jobID2),
TableSchema: "test_cancel_job",
TableName: "t2",
TableID: tableID2,
CreatedBy: "test_cancel_job2@localhost",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://test_cancel_job/t.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "cancelled",
Step: importer.JobStepValidating,
ErrorMessage: "cancelled by user",
}
s.compareJobInfoWithoutTime(jobInfo, rows2[0])
globalTaskManager, err := storage.GetTaskManager()
s.NoError(err)
taskKey := importinto.TaskKey(int64(jobID2))
s.NoError(err)
s.Require().Eventually(func() bool {
globalTask, err2 := globalTaskManager.GetGlobalTaskByKey(taskKey)
s.NoError(err2)
subtasks, err2 := globalTaskManager.GetSubtasksByStep(globalTask.ID, importinto.StepPostProcess)
s.NoError(err2)
s.Len(subtasks, 2) // framework will generate a subtask when canceling
var cancelled bool
for _, st := range subtasks {
if st.State == proto.TaskStateCanceled {
cancelled = true
break
}
}
return globalTask.State == proto.TaskStateReverted && cancelled
}, 5*time.Second, 1*time.Second)
// todo: enable it when https://github.com/pingcap/tidb/issues/44443 fixed
//// cancel a pending job created by test_cancel_job2 using root
//s.NoError(failpoint.Disable("github.com/pingcap/tidb/disttask/importinto/syncAfterJobStarted"))
//s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/syncBeforeJobStarted", "return(true)")
//result2 := s.tk.MustQuery(fmt.Sprintf(`import into t2 FROM 'gs://test_cancel_job/t.csv?endpoint=%s' with detached`,
// gcsEndpoint)).Rows()
//s.Len(result2, 1)
//jobID2, err := strconv.Atoi(result2[0][0].(string))
//s.NoError(err)
//// wait job reached to the point before job started
//<-loaddata.TestSyncChan
//s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
//s.tk.MustExec(fmt.Sprintf("cancel import job %d", jobID2))
//// resume the job
//loaddata.TestSyncChan <- struct{}{}
//rows = s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID2)).Rows()
//s.Len(rows, 1)
//jobInfo = &importer.JobInfo{
// ID: int64(jobID2),
// TableSchema: "test_cancel_job",
// TableName: "t2",
// TableID: tableID2,
// CreatedBy: "test_cancel_job2@localhost",
// Parameters: importer.ImportParameters{
// FileLocation: fmt.Sprintf(`gs://test_cancel_job/t.csv?endpoint=%s`, gcsEndpoint),
// Format: importer.DataFormatCSV,
// },
// SourceFileSize: 3,
// Status: "cancelled",
// Step: "",
// ErrorMessage: "cancelled by user",
//}
//s.compareJobInfoWithoutTime(jobInfo, rows[0])
//s.Require().Eventually(func() bool {
// task := getTask(int64(jobID2))
// return task.State == proto.TaskStateReverted
//}, 10*time.Second, 500*time.Millisecond)
}
func (s *mockGCSSuite) TestJobFailWhenDispatchSubtask() {
s.prepareAndUseDB("fail_job_after_import")
s.tk.MustExec("CREATE TABLE t1 (i INT PRIMARY KEY);")
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "fail_job_after_import", Name: "t.csv"},
Content: []byte("1\n2"),
})
do, err := session.GetDomain(s.store)
s.NoError(err)
tableID1 := do.MustGetTableID(s.T(), "fail_job_after_import", "t1")
jobInfo := &importer.JobInfo{
TableSchema: "fail_job_after_import",
TableName: "t1",
TableID: tableID1,
CreatedBy: "root@%",
Parameters: importer.ImportParameters{
FileLocation: fmt.Sprintf(`gs://fail_job_after_import/t.csv?endpoint=%s`, gcsEndpoint),
Format: importer.DataFormatCSV,
},
SourceFileSize: 3,
Status: "failed",
Step: importer.JobStepValidating,
ErrorMessage: "injected error after StepImport",
}
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/failWhenDispatchPostProcessSubtask", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/executor/importer/setLastImportJobID", `return(true)`)
s.NoError(s.tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil))
err = s.tk.QueryToErr(fmt.Sprintf(`import into t1 FROM 'gs://fail_job_after_import/t.csv?endpoint=%s'`, gcsEndpoint))
s.ErrorContains(err, "injected error after StepImport")
result1 := s.tk.MustQuery(fmt.Sprintf("show import job %d", importer.TestLastImportJobID.Load())).Rows()
s.Len(result1, 1)
jobID1, err := strconv.Atoi(result1[0][0].(string))
s.NoError(err)
jobInfo.ID = int64(jobID1)
s.compareJobInfoWithoutTime(jobInfo, result1[0])
}
func (s *mockGCSSuite) TestKillBeforeFinish() {
s.cleanupSysTables()
s.tk.MustExec("DROP DATABASE IF EXISTS kill_job;")
s.tk.MustExec("CREATE DATABASE kill_job;")
s.tk.MustExec(`CREATE TABLE kill_job.t (a INT, b INT, c int);`)
s.server.CreateObject(fakestorage.Object{
ObjectAttrs: fakestorage.ObjectAttrs{BucketName: "test-load", Name: "t-1.tsv"},
Content: []byte("1,11,111"),
})
s.enableFailpoint("github.com/pingcap/tidb/disttask/importinto/syncBeforeSortChunk", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/executor/cancellableCtx", "return(true)")
s.enableFailpoint("github.com/pingcap/tidb/executor/importer/setLastImportJobID", `return(true)`)
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
sql := fmt.Sprintf(`IMPORT INTO kill_job.t FROM 'gs://test-load/t-*.tsv?endpoint=%s'`, gcsEndpoint)
err := s.tk.QueryToErr(sql)
s.ErrorIs(errors.Cause(err), context.Canceled)
}()
// wait for the task reach sort chunk
<-importinto.TestSyncChan
// cancel the job
executor.TestCancelFunc()
// continue the execution
importinto.TestSyncChan <- struct{}{}
wg.Wait()
jobID := importer.TestLastImportJobID.Load()
rows := s.tk.MustQuery(fmt.Sprintf("show import job %d", jobID)).Rows()
s.Len(rows, 1)
s.Equal("cancelled", rows[0][5])
globalTaskManager, err := storage.GetTaskManager()
s.NoError(err)
taskKey := importinto.TaskKey(jobID)
s.NoError(err)
s.Require().Eventually(func() bool {
globalTask, err2 := globalTaskManager.GetGlobalTaskByKey(taskKey)
s.NoError(err2)
return globalTask.State == proto.TaskStateReverted
}, 5*time.Second, 1*time.Second)
}
|
package internal
import (
"fmt"
"os"
flag "github.com/spf13/pflag"
)
type options struct {
BlobNameOrPrefix string
ContainerName string
AccountName string
AccountKey string
SetBlobMD5 bool
FileSource string
ShowVersion bool
}
//Options TODO
var Options = &options{}
const storageAccountKeyEnvVar = "ACCOUNT_KEY"
const storageAccountNameEnvVar = "ACCOUNT_NAME"
const (
blobNameMsg = "Blob name (e.g. myblob.txt) or prefix."
containerNameMsg = "Container name (e.g. mycontainer)."
accountNameMsg = "Storage account name (e.g. mystorage).\n\tCan also be specified via the " + storageAccountNameEnvVar + " environment variable."
accountKeyMsg = "Storage account key string.\n\tCan also be specified via the " + storageAccountKeyEnvVar + " environment variable."
setBlobMD5Msg = "Set Content-MD5 property of the blob with the calculated value"
fileSourceMsg = "File name or pattern. If set, the MD5 hash will be calculated for the files that match the criteria"
showVersionMsg = "Display current version"
)
func (o *options) Init() {
flag.Usage = func() {
printUsageDefaults("b", "blob-name-or-prefix", "", blobNameMsg)
printUsageDefaults("c", "container-name", "", containerNameMsg)
printUsageDefaults("a", "account-name", "", accountNameMsg)
printUsageDefaults("k", "account-key", "", accountKeyMsg)
printUsageDefaults("m", "set-blob-md5", "", setBlobMD5Msg)
printUsageDefaults("f", "file-source-pattern", "", fileSourceMsg)
printUsageDefaults("v", "version", "", showVersionMsg)
}
flag.BoolVarP(&o.SetBlobMD5, "set-blob-md5", "m", false, setBlobMD5Msg)
flag.BoolVarP(&o.ShowVersion, "version", "v", false, showVersionMsg)
flag.StringVarP(&o.BlobNameOrPrefix, "blob-name-or-prefix", "b", "", blobNameMsg)
flag.StringVarP(&o.ContainerName, "container-name", "c", "", containerNameMsg)
flag.StringVarP(&o.AccountName, "account-name", "a", os.Getenv(storageAccountNameEnvVar), accountNameMsg)
flag.StringVarP(&o.AccountKey, "account-key", "k", os.Getenv(storageAccountKeyEnvVar), accountKeyMsg)
flag.StringVarP(&o.FileSource, "file-source-pattern", "f", "", fileSourceMsg)
}
func (o *options) Validate() (blobSource bool, fileSource bool, err error) {
flag.Parse()
errBlobSource := o.validateBlobSource()
errFileSource := o.validateFileSource()
if errBlobSource != nil && errFileSource != nil {
return false, false, fmt.Errorf(" Invalid options. A file source or a blob source be set.\nFile Source:\n%v\nBlobSource:\n%v", errFileSource, errBlobSource)
}
if errBlobSource != nil && errFileSource == nil {
return false, true, nil
}
if errBlobSource == nil {
return true, errFileSource == nil, nil
}
//it should not get here...
return
}
func (o *options) validateFileSource() error {
if o.FileSource == "" {
return fmt.Errorf("File source pattern not specified via option -f")
}
return nil
}
func (o *options) validateBlobSource() error {
var err error
if o.AccountKey == "" {
err = fmt.Errorf("Storage account key is not set")
}
if o.AccountName == "" {
err = fmt.Errorf("Storage account name is not set\n%v", err)
}
if o.ContainerName == "" {
err = fmt.Errorf("Container name is missing\n%v", err)
}
return err
}
func printUsageDefaults(shortflag string, longflag string, defaultVal string, description string) {
defaultMsg := ""
if defaultVal != "" {
defaultMsg = fmt.Sprintf("\n\tDefault value: %v", defaultVal)
}
fmt.Fprintln(os.Stderr, fmt.Sprintf("-%v, --%v :\n\t%v%v", shortflag, longflag, description, defaultMsg))
}
|
package sleepytcp
import "sync"
type waitingCaller struct {
ready chan struct{}
mu sync.Mutex // protects conn, err, close(ready)
conn *clientConn
err error
}
// waiting reports whether w is still waiting for an answer (connection or error).
func (w *waitingCaller) waiting() bool {
select {
case <-w.ready:
return false
default:
return true
}
}
// tryDeliver attempts to deliver conn, err to w and reports whether it succeeded.
func (w *waitingCaller) tryDeliver(conn *clientConn, err error) bool {
w.mu.Lock()
defer w.mu.Unlock()
if w.conn != nil || w.err != nil {
return false
}
w.conn = conn
w.err = err
close(w.ready)
return true
}
// cancel marks w as no longer wanting a result (for example, due to cancellation).
// If a connection has been delivered already, cancel returns it with c.tryRecycleClientConn.
func (w *waitingCaller) cancel(c *HostClient, err error) {
w.mu.Lock()
if w.conn == nil && w.err == nil {
close(w.ready) // catch misbehavior in future delivery
}
conn := w.conn
w.conn = nil
w.err = err
w.mu.Unlock()
if conn != nil {
c.tryRecycleClientConn(conn)
}
}
type waitingCallerQueue struct {
// This is a queue, not a deque.
// It is split into two stages - head[headPos:] and tail.
// popFront is trivial (headPos++) on the first stage, and
// pushBack is trivial (append) on the second stage.
// If the first stage is empty, popFront can swap the
// first and second stages to remedy the situation.
//
// This two-stage split is analogous to the use of two lists
// in Okasaki's purely functional queue but without the
// overhead of reversing the list when swapping stages.
head []*waitingCaller
headPos int
tail []*waitingCaller
}
// len returns the number of items in the queue.
func (q *waitingCallerQueue) len() int {
return len(q.head) - q.headPos + len(q.tail)
}
// pushBack adds w to the back of the queue.
func (q *waitingCallerQueue) pushBack(w *waitingCaller) {
q.tail = append(q.tail, w)
}
// popFront removes and returns the waitingCaller at the front of the queue.
func (q *waitingCallerQueue) popFront() *waitingCaller {
if q.headPos >= len(q.head) {
if len(q.tail) == 0 {
return nil
}
// Pick up tail as new head, clear tail.
q.head, q.headPos, q.tail = q.tail, 0, q.head[:0]
}
w := q.head[q.headPos]
q.head[q.headPos] = nil
q.headPos++
return w
}
// peekFront returns the waitingCaller at the front of the queue without removing it.
func (q *waitingCallerQueue) peekFront() *waitingCaller {
if q.headPos < len(q.head) {
return q.head[q.headPos]
}
if len(q.tail) > 0 {
return q.tail[0]
}
return nil
}
// cleanFront pops any wantConns that are no longer waiting from the head of the
// queue, reporting whether any were popped.
func (q *waitingCallerQueue) clearFront() (cleaned bool) {
for {
w := q.peekFront()
if w == nil || w.waiting() {
return cleaned
}
q.popFront()
cleaned = true
}
}
|
package venti
import (
"crypto/sha1"
"errors"
"fmt"
"io"
)
// TODO: when should scores be pointers vs values?
const ScoreSize = sha1.Size
type Score [ScoreSize]byte
func ZeroScore() Score {
return Score{
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55,
0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07, 0x09,
}
}
func ReadScore(s *Score, r io.Reader) error {
n, err := r.Read(s[:])
if err != nil {
return err
}
if n != ScoreSize {
return errors.New("short read")
}
return nil
}
func ParseScore(s string) (Score, error) {
if len(s) != ScoreSize*2 {
return Score{}, fmt.Errorf("bad score size: %d", len(s))
}
var score Score
for i := 0; i < ScoreSize*2; i++ {
var c int
if s[i] >= '0' && s[i] <= '9' {
c = int(s[i]) - '0'
} else if s[i] >= 'a' && s[i] <= 'f' {
c = int(s[i]) - 'a' + 10
} else if s[i] >= 'A' && s[i] <= 'F' {
c = int(s[i]) - 'A' + 10
} else {
return Score{}, fmt.Errorf("invalid byte: %d", s[i])
}
if i&1 == 0 {
c <<= 4
}
score[i>>1] |= uint8(c)
}
return score, nil
}
func Fingerprint(data []byte) Score {
return sha1.Sum(data)
}
func (s *Score) String() string {
return fmt.Sprintf("%x", [ScoreSize]byte(*s))
}
func (s *Score) Bytes() []byte {
return s[:]
}
|
package setr
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document05700102 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:setr.057.001.02 Document"`
Message *OrderConfirmationStatusReportV02 `xml:"OrdrConfStsRpt"`
}
func (d *Document05700102) AddMessage() *OrderConfirmationStatusReportV02 {
d.Message = new(OrderConfirmationStatusReportV02)
return d.Message
}
// Scope
// The OrderConfirmationStatusReport message is sent by an instructing party, for example, an investment manager or its authorised representative, to the executing party, for example, a transfer agent, to report the status of an order confirmation or an order confirmation amendment.
// Usage
// The OrderConfirmationStatusReport message is used to report on the status of one or more individual:
// - subscription confirmations,
// - subscription confirmation amendments,
// - redemption confirmations,
// - redemption confirmation amendments,
// - switch order confirmations,
// - switch order confirmation amendments.
// One of the following statuses can be reported:
// - confirmation rejected, or,
// - amendment rejected, or,
// - sent to next party, or,
// - communication problem with next party, or,
// - confirmation accepted, or,
// - confirmation received.
// It is likely that the OrderConfirmationStatusReport is only sent by the order instructing party to the order executing party to reject an order confirmation or to reject an order confirmation amendment, although if an intermediary party is used, the statuses sent to next party and communication problem with next party are also likely be used. The statuses confirmation accepted and confirmation received would only be used in the event the order executing party sends a RequestForOrderConfirmationStatusReport message and one of the other statuses does not apply.
// If the status being reported is either confirmation rejected or amendment rejected, then a reason for the rejection must be given.
// The individual order confirmation or confirmation amendment for which the status is given is identified with its order reference. The message identification of the message in which the individual order confirmation or confirmation amendment was conveyed may also be quoted in RelatedReference, but this is not recommended.
type OrderConfirmationStatusReportV02 struct {
// Reference that uniquely identifies the message from a business application standpoint.
MessageIdentification *iso20022.MessageIdentification1 `xml:"MsgId"`
// Reference to the message or communication that was previously received.
Reference *iso20022.References61Choice `xml:"Ref,omitempty"`
// Status report details of an individual order confirmation.
IndividualOrderConfirmationDetailsReport []*iso20022.IndividualOrderConfirmationStatusAndReason2 `xml:"IndvOrdrConfDtlsRpt"`
// Additional information that cannot be captured in the structured elements and/or any other specific block.
Extension []*iso20022.Extension1 `xml:"Xtnsn,omitempty"`
}
func (o *OrderConfirmationStatusReportV02) AddMessageIdentification() *iso20022.MessageIdentification1 {
o.MessageIdentification = new(iso20022.MessageIdentification1)
return o.MessageIdentification
}
func (o *OrderConfirmationStatusReportV02) AddReference() *iso20022.References61Choice {
o.Reference = new(iso20022.References61Choice)
return o.Reference
}
func (o *OrderConfirmationStatusReportV02) AddIndividualOrderConfirmationDetailsReport() *iso20022.IndividualOrderConfirmationStatusAndReason2 {
newValue := new(iso20022.IndividualOrderConfirmationStatusAndReason2)
o.IndividualOrderConfirmationDetailsReport = append(o.IndividualOrderConfirmationDetailsReport, newValue)
return newValue
}
func (o *OrderConfirmationStatusReportV02) AddExtension() *iso20022.Extension1 {
newValue := new(iso20022.Extension1)
o.Extension = append(o.Extension, newValue)
return newValue
}
|
package lark
import (
"testing"
)
func TestCrypto(t *testing.T) {
text := "hello world"
cd := NewCrypto("test key")
plant, err := cd.DecryptString("P37w+VZImNgPEO1RBhJ6RtKl7n6zymIbEG1pReEzghk=")
if err != nil {
t.Fatal(err)
}
t.Logf("plan: %q", plant)
if plant != text {
t.Errorf("mismatch result: %q === %q", plant, text)
}
}
|
// +build ignore
package main
import (
"fmt"
"os"
"os/signal"
"github.com/naveego/live-api"
"github.com/Sirupsen/logrus"
)
const (
SERVER_ADDR = "ws://127.0.0.1:8888"
)
func main() {
logrus.SetLevel(logrus.DebugLevel)
cli, err := live.NewWebSocketClient(SERVER_ADDR, "23432", "TEST")
if err != nil {
panic(err)
}
defer cli.Close()
sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sigs, os.Interrupt)
go func() {
sig := <-sigs
fmt.Println()
fmt.Println(sig)
done <- true
}()
fmt.Println("Connected")
<-done
fmt.Println("Exiting")
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
"strings"
)
type xy_tuple struct {
x int32
y int32
dir direction
}
type direction int
const (
UPDOWN direction = 0
LEFTRIGHT direction = 1
INVALID direction = 2
)
// Complete the minimumMoves function below.
func minimumMoves(grid []string, startX int32, startY int32, goalX int32, goalY int32) int32 {
bytegrid := make([][]byte, 0)
for _, str := range grid {
bytegrid = append(bytegrid, []byte(str))
}
queue := make([]xy_tuple, 0)
current := xy_tuple{startX, startY, INVALID}
goal := xy_tuple{goalX, goalY, INVALID}
fmt.Println("start", current, "goal", goal)
atGoal := false
moves := 0
// while not at goal
for !atGoal {
// if at goal, break. we're done
fmt.Println("current:", current)
if current.x == goal.x && current.y == goal.y {
atGoal = true
break
}
// otherwise, add all directions (except last traveling direction and its inverse) to the queue
// BTW x is up/down
going := current.dir
if going != UPDOWN {
// Try down
lowestValid := current.x
for i := current.x + 1; int(i) < len(grid); i++ {
if bytegrid[i][current.y] == '.' {
lowestValid++
} else {
break
}
}
for i := lowestValid; i > current.x; i-- {
var next xy_tuple
next.x = i
next.y = current.y
next.dir = UPDOWN
queue = enqueue(queue, next)
}
// Try up
highestValid := current.x
for i := current.x - 1; i >= 0; i-- {
if bytegrid[i][current.y] == '.' {
highestValid--
} else {
break
}
}
for i := highestValid; i < current.x; i++ {
var next xy_tuple
next.x = i
next.y = current.y
next.dir = UPDOWN
queue = enqueue(queue, next)
}
}
if going != LEFTRIGHT {
// Try left
leftmost := current.y
for i := current.y - 1; i >= 0; i-- {
if bytegrid[current.x][i] == '.' {
leftmost--
} else {
break
}
}
for i := leftmost; i < current.y; i++ {
var next xy_tuple
next.x = current.x
next.y = i
next.dir = LEFTRIGHT
queue = enqueue(queue, next)
}
// Try right
rightmost := current.y
for i := current.y + 1; int(i) < len(grid[0]); i++ {
if bytegrid[current.x][i] == '.' {
rightmost++
} else {
break
}
}
for i := rightmost; i > current.y; i-- {
var next xy_tuple
next.x = current.x
next.y = i
next.dir = LEFTRIGHT
queue = enqueue(queue, next)
}
}
// Assuming queue is never empty
var nextPosition xy_tuple
fmt.Println(queue)
queue, nextPosition = dequeue(queue)
if nextPosition.dir != current.dir {
moves++
}
current = nextPosition
}
return int32(moves)
}
func enqueue(queue []xy_tuple, pos xy_tuple) []xy_tuple{
return append(queue, pos)
}
func dequeue(queue []xy_tuple) ([]xy_tuple, xy_tuple) {
ret := queue[0]
queue = queue[1:]
return queue, ret
}
func isEmpty(queue []xy_tuple) bool {
return len(queue) == 0
}
func main() {
reader := bufio.NewReaderSize(os.Stdin, 1024 * 1024)
stdout, err := os.Create(os.Getenv("OUTPUT_PATH"))
checkError(err)
defer stdout.Close()
writer := bufio.NewWriterSize(stdout, 1024 * 1024)
nTemp, err := strconv.ParseInt(readLine(reader), 10, 64)
checkError(err)
n := int32(nTemp)
var grid []string
for i := 0; i < int(n); i++ {
gridItem := readLine(reader)
grid = append(grid, gridItem)
}
startXStartY := strings.Split(readLine(reader), " ")
startXTemp, err := strconv.ParseInt(startXStartY[0], 10, 64)
checkError(err)
startX := int32(startXTemp)
startYTemp, err := strconv.ParseInt(startXStartY[1], 10, 64)
checkError(err)
startY := int32(startYTemp)
goalXTemp, err := strconv.ParseInt(startXStartY[2], 10, 64)
checkError(err)
goalX := int32(goalXTemp)
goalYTemp, err := strconv.ParseInt(startXStartY[3], 10, 64)
checkError(err)
goalY := int32(goalYTemp)
result := minimumMoves(grid, startX, startY, goalX, goalY)
fmt.Fprintf(writer, "%d\n", result)
writer.Flush()
}
func readLine(reader *bufio.Reader) string {
str, _, err := reader.ReadLine()
if err == io.EOF {
return ""
}
return strings.TrimRight(string(str), "\r\n")
}
func checkError(err error) {
if err != nil {
panic(err)
}
}
|
package coding
type Splitter func(rune) int
var (
_7BitSplitter Splitter = func(rune) int { return 7 }
_1ByteSplitter Splitter = func(rune) int { return 8 }
_MultibyteSplitter Splitter = func(r rune) int {
if r < 0x7F {
return 8
}
return 16
}
_UTF16Splitter Splitter = func(r rune) int {
if (r <= 0xD7FF) || ((r >= 0xE000) && (r <= 0xFFFF)) {
return 16
}
return 32
}
)
func (fn Splitter) Len(input string) (n int) {
for _, point := range input {
n += fn(point)
}
if n%8 != 0 {
n += 8 - n%8
}
return n / 8
}
func (fn Splitter) Split(input string, limit int) (segments []string) {
limit *= 8
points := []rune(input)
var start, length int
for i := 0; i < len(points); i++ {
length += fn(points[i])
if length > limit {
segments = append(segments, string(points[start:i]))
start, length = i, 0
i--
}
}
if length > 0 {
segments = append(segments, string(points[start:]))
}
return
}
|
// Copyright 2019 The bigfile Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
package http
import "github.com/gin-gonic/gin"
func init() {
isTesting = true
gin.SetMode(gin.ReleaseMode)
}
|
package objs
import (
"time"
)
const (
StartupEvent = 1 // 시동
ShockEvent = 2 // 충격
SpeedingEvent = 3 // 과속
ProximityEvent = 4 // 근접
)
type IpasLog struct {
Ipas
Date time.Time `json:"date"`
EventType int `json:"event_type"`
SessionId string `json:"session_id"`
Targets string `json:"targets"`
Distance int `json:"distance"`
Ip uint32 `json:"ip"`
RecvDate time.Time `json:"recv_date"`
No int64 `json:"no"`
DateAgo string `json:"date_ago"`
}
type IpasMapLog struct {
OrgId int `json:"org_id"`
EquipId string `json:"equip_id"`
GroupId int `json:"group_id"`
EquipType int `json:"equip_type"`
Speed int `json:"speed"`
Latitude string `json:"latitude"`
Longitude string `json:"longitude"`
OrgName string `json:"org_name"`
GroupName string `json:"group_name"`
Date time.Time `json:"date"`
EventType int `json:"event_type"`
Targets string `json:"targets"`
Distance int `json:"distance"`
Label string `json:"text"`
}
type LocTrack struct {
Date time.Time `json:"date"`
Latitude float32 `json:"latitude"`
Longitude float32 `json:"longitude"`
}
// 시동,충격,과속,근접
//type Ipas struct {
// OrgId int `json:"org_id"`
// EquipId string `json:"equip_id"`
// GroupId int `json:"group_id"`
// EquipType int `json:"equip_type"`
// Speed int `json:"spped"`
// Snr int `json:"snr"`
// Usim string `json:"usim"`
// Latitude float32 `json:"latitude"`
// Longitude float32 `json:"longitude"`
// Created time.Time `json:"created"`
// Updated time.Time `json:"updated"`
// OrgName string `json:"org_name"`
// GroupName string `json:"group_name"`
//}
type IpasFilter struct {
PagingFilter
OrgId []int `form:"org_id"`
GroupId []int `form:"group_id"`
EventType []int `form:"event_type"`
EquipType int `form:"equip_type"`
Contents string `form:"contents"`
EquipId string `form:"equip_id"`
TagPattern string `form:"tag_pattern"`
StatsMode bool `form:"stats_mode"` // 통계모드
EventMap string `form:"event_map"`
}
//
//func NewIpasFilter() *IpasFilter {
// filter := IpasFilter{}
// filter.FastPaging = "on"
// filter.Order = "asc"
// filter.Sort = "equip_id"
// filter.Limit = 99999
// filter.Offset = 0
//
// return &filter
//}
|
package fake
import (
"math/rand"
"github.com/goropikari/psqlittle/core"
)
// ColName generates fake ColName
func ColName() core.ColumnName {
return core.ColumnName{
TableName: RandString(),
Name: RandString(),
}
}
// Value generates fake Value
func Value() core.Value {
return RandString()
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// RandString generates a random string
func RandString() string {
n := 15 // length of random string
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(len(letterBytes))]
}
return string(b)
}
|
package cmd
import (
"fmt"
"os"
"strings"
"github.com/lingrino/vaku/vaku"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var folderMapCmd = &cobra.Command{
Use: "map [path]",
Short: "Return a text map of the folder, with subfolders indented by depth",
Long: `Takes in a path and returns a text map of paths and keys. Useful as a visual representation
of all data in vault.
Example:
vaku folder map secret/foo`,
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
input := vaku.NewPathInput(args[0])
input.TrimPathPrefix = true
list, err := vgc.FolderList(input)
if err != nil {
fmt.Printf("%s", errors.Wrapf(err, "Failed to list folder %s", args[0]))
os.Exit(1)
} else {
var output []string
var prevPS []string
var written bool
// Loop over each return path
for _, path := range list {
// Split the path and loop over each piece of the path
ps := strings.Split(path, "/")
for psi, word := range ps {
// Don't write anything if we've already written the "parent" word
// Once we write one part of a path, we should write all of it
if len(prevPS) > psi && word == prevPS[psi] && !written {
continue
}
// Unless this is the last word, add a "/" to the output
if len(ps) != psi+1 {
word = word + "/"
}
output = append(output, strings.Repeat(indentString, psi)+word)
written = true
}
prevPS = ps
written = false
}
print(map[string]interface{}{
args[0]: output,
})
}
},
}
func init() {
folderCmd.AddCommand(folderMapCmd)
folderMapCmd.Flags().StringVarP(&indentString, "indent-string", "I", " ", "The string to use for indenting the map")
}
|
// Copyright 2021 Clivern. All rights reserved.
// Use of this source code is governed by the MIT
// license that can be found in the LICENSE file.
package definition
import (
"fmt"
)
const (
// HttpbinService const
HttpbinService = "httpbin"
// HttpbinPort const
HttpbinPort = "80"
// HttpbinDockerImage const
HttpbinDockerImage = "kennethreitz/httpbin"
// HttpbinDockerImageVersion const
HttpbinDockerImageVersion = "latest"
// HttpbinRestartPolicy const
HttpbinRestartPolicy = "unless-stopped"
)
// GetHttpbinConfig gets yaml definition object
func GetHttpbinConfig(name, version string) DockerComposeConfig {
services := make(map[string]Service)
if version == "" {
version = HttpbinDockerImageVersion
}
services[name] = Service{
Image: fmt.Sprintf("%s:%s", HttpbinDockerImage, version),
Restart: HttpbinRestartPolicy,
Ports: []string{HttpbinPort},
}
return DockerComposeConfig{
Version: "3",
Services: services,
}
}
|
package renter
import (
"testing"
"time"
)
// TestEstimateTimeUntilComplete is a unit test that probes
// 'estimateTimeUntilComplete'
func TestEstimateTimeUntilComplete(t *testing.T) {
t.Parallel()
// took 100ms for the chunk to become available, using default Skynet EC
// params
timeUntilAvail := time.Duration(100 * time.Millisecond)
minPieces := 1
numPieces := 10
timeUntilComplete := estimateTimeUntilComplete(timeUntilAvail, minPieces, numPieces)
if timeUntilComplete.Milliseconds() != 990 {
t.Fatal("unexpected", timeUntilComplete)
}
// took 120s for the chunk to become available, using default Skynet EC
// params, expected maxWait to return the maxWait
timeUntilAvail = time.Duration(120 * time.Second)
timeUntilComplete = estimateTimeUntilComplete(timeUntilAvail, minPieces, numPieces)
if timeUntilComplete != maxWaitForCompleteUpload {
t.Fatal("unexpected")
}
// took 200ms for the chunk to become available, using default Renter EC
// params
timeUntilAvail = time.Duration(200 * time.Millisecond)
minPieces = 10
numPieces = 30
timeUntilComplete = estimateTimeUntilComplete(timeUntilAvail, minPieces, numPieces)
if timeUntilComplete.Milliseconds() != 440 {
t.Fatal("unexpected", timeUntilComplete)
}
// took 200ms for the chunk to become available, using custom Renter EC
// params
timeUntilAvail = time.Duration(200 * time.Millisecond)
minPieces = 64
numPieces = 96
timeUntilComplete = estimateTimeUntilComplete(timeUntilAvail, minPieces, numPieces)
if timeUntilComplete.Milliseconds() != 110 {
t.Fatal("unexpected", timeUntilComplete)
}
}
|
package util
const (
MirrorActivityTypeInit = "init"
MirrorActivityTypeReconfigure = "reconfigure"
MirrorActivityTypeLocked = "locked"
MirrorActivityTypeUnlocked = "unlocked"
MirrorActivityTypeCronSync = "scheduled-sync"
MirrorActivityTypeSync = "sync"
)
const (
MirrorActivityStatusComplete = "complete"
MirrorActivityStatusPending = "pending"
MirrorActivityStatusExecuting = "executing"
MirrorActivityStatusFailed = "failed"
)
|
package gottle
import (
"reflect"
"testing"
"time"
"github.com/adelowo/onecache/filesystem"
)
func TestIp(t *testing.T) {
ipProvider := NewRealIP()
throttler := NewOneCacheThrottler(IP(ipProvider))
if !reflect.DeepEqual(ipProvider, throttler.ipProvider) {
t.Fatalf(`
IP providers differ\n .. Expected %v..\n
Got %v`, ipProvider, throttler.ipProvider)
}
}
func TestStore(t *testing.T) {
store := filesystem.MustNewFSStore("cache")
defer store.Flush()
throttler := NewOneCacheThrottler(Store(store))
if !reflect.DeepEqual(store, throttler.store) {
t.Fatalf(`
Cache store differs...\n Expected %v \n
Got %v`, store, throttler.store)
}
}
func TestKeyGenerator(t *testing.T) {
customKeyGenerator := func(ip string) string {
return "custom-" + ip
}
throttler := NewOneCacheThrottler(KeyGenerator(customKeyGenerator))
ip := "123.456.789.000"
expected := customKeyGenerator(ip)
if actual := throttler.keyGenerator(ip); expected != actual {
t.Fatalf(`
Generated key differs.. \n
Expected %v..\n Got %v`, expected, actual)
}
}
func TestThrottleCondition(t *testing.T) {
interval := time.Minute
maxRequests := 60
throttler := NewOneCacheThrottler(ThrottleCondition(interval, maxRequests))
if !reflect.DeepEqual(interval, throttler.interval) {
t.Fatalf(`
Interval differs... Expected %v \n Got %v`,
interval, throttler.interval)
}
if !reflect.DeepEqual(maxRequests, throttler.maxRequests) {
t.Fatalf(`
Max requests differ.. \n
Expected %d.. Got %d`, maxRequests, throttler.maxRequests)
}
}
|
package http
import (
"fmt"
_twitter "github.com/dghubble/go-twitter/twitter"
"github.com/dora1998/snail-bot/twitter"
"github.com/gin-gonic/gin"
"net/http"
)
type PostIFTTTWebHookRequest struct {
Text string `json:"text"`
UserName string `json:"user_name"`
LinkToTweet string `json:"link_to_tweet"`
CreatedAt string `json:"created_at"`
}
func (s *Server) PostIFTTTWebHook(c *gin.Context) {
callbackBody := &PostIFTTTWebHookRequest{}
err := c.BindJSON(&callbackBody)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
fmt.Printf("%#v\n", callbackBody)
statusId, err := twitter.ExtractStatusIdFromUrl(callbackBody.LinkToTweet)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
text, err := twitter.ExtractBody(callbackBody.Text)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
err = s.commandHandler.Resolve(text, callbackBody.UserName, statusId)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
c.JSON(http.StatusOK, nil)
}
func (s *Server) GetCRCToken(c *gin.Context) {
crcToken := c.Query("crc_token")
resToken := s.twitterClient.CreateCRCToken(crcToken)
c.JSON(http.StatusOK, gin.H{"response_token": resToken})
}
type PostTwitterWebHookRequest struct {
TweetCreateEvents []_twitter.Tweet `json:"tweet_create_events"`
}
func (s *Server) PostWebHook(c *gin.Context) {
body := &PostTwitterWebHookRequest{}
err := c.BindJSON(body)
if err != nil {
fmt.Println(err.Error())
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"error": err.Error(),
})
return
}
fmt.Printf("%#v\n", body)
if len(body.TweetCreateEvents) == 0 {
c.String(http.StatusOK, "")
return
}
for _, t := range body.TweetCreateEvents {
// 自分宛の@ツイート以外は無視(引用RT, Botが返信した自身のツイートなどは除外)
if t.InReplyToScreenName != "assignment_bot" {
continue
}
statusId := t.ID
text, err := twitter.ExtractBody(t.Text)
if err != nil {
fmt.Println(err.Error())
c.String(http.StatusOK, "")
return
}
err = s.commandHandler.Resolve(text, t.User.ScreenName, statusId)
if err != nil {
fmt.Println(err.Error())
c.String(http.StatusOK, "")
return
}
}
c.String(http.StatusOK, "")
}
|
package utils
import "testing"
import "reflect"
type divideIntoBatchesTestData struct {
slice []uint8
batchSize int
expected [][]uint8
}
func TestDivideIntoBatches(t *testing.T) {
testData := [...]divideIntoBatchesTestData{
{[]uint8{1, 1, 2, 2, 3, 3, 4}, 2, [][]uint8{{1, 1}, {2, 2}, {3, 3}, {4}}},
{[]uint8{1, 1, 2, 2, 3, 3, 4, 4}, 2, [][]uint8{{1, 1}, {2, 2}, {3, 3}, {4, 4}}},
{[]uint8{1, 2, 3, 4}, 1, [][]uint8{{1}, {2}, {3}, {4}}},
// {[]uint8{1, 2, 3, 4}, 0, [][]uint8{}},
}
for i := range testData {
result := DivideIntoBatches(testData[i].slice, testData[i].batchSize)
if !reflect.DeepEqual(result, testData[i].expected) {
t.Errorf("DevideIntoBatchesData result %v expetcted %v", result, testData[i].expected)
}
}
}
func TestInverseMap(t *testing.T) {
{
result := InverseMap(map[string]int{"one": 1, "two": 2, "three": 3})
expected := map[int]string{1: "one", 2: "two", 3: "three"}
if !reflect.DeepEqual(result, expected) {
t.Errorf("Test inverse map result %v expected %v", result, expected)
}
}
{
defer func() { recover() }()
InverseMap(map[string]int{"one": 1, "two": 1, "three": 3})
t.Errorf("Double values in map")
}
}
func TestFilterMap(t *testing.T) {
{
testData := map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}
toRemove := []string{"two", "three"}
expected := map[string]int{"one": 1, "four": 4}
result := FilterMap(testData, toRemove)
if !reflect.DeepEqual(result, expected) {
t.Errorf("Test filter map result %v expected %v", result, expected)
}
}
{
testData := map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}
toRemove := []string{}
expected := map[string]int{"one": 1, "two": 2, "three": 3, "four": 4}
result := FilterMap(testData, toRemove)
if !reflect.DeepEqual(result, expected) {
t.Errorf("Test filter map result %v expected %v", result, expected)
}
}
{
testData := map[string]int{}
toRemove := []string{"two", "three"}
expected := map[string]int{}
result := FilterMap(testData, toRemove)
if !reflect.DeepEqual(result, expected) {
t.Errorf("Test filter map result %v expected %v", result, expected)
}
}
}
|
package ch01
import (
"sort"
"testing"
)
func TestEx15(t *testing.T) {
for _, c := range []struct {
in []int
x int
want []int
}{
{in: []int{0, 1, 2}, x: 3, want: []int{1,2}},
{in: []int{2, 1, 0}, x: 3, want: []int{1,2}},
{in: []int{2, 0, 1}, x: 3, want: []int{1,2}},
{in: []int{0, 1, 3, 3, 4, 5, 6, 7, 8, 2}, x: 3, want: []int{1,2}},
} {
intComparer := func(a []int, b []int) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if v != b[i] {
return false
}
}
return true
}
got := Find2Sum(c.in, c.x)
// make it a bit more testable
sort.Ints(got)
if !intComparer(got, c.want) {
t.Errorf("Find2Sum(%v, %d) == %v want %v", c.in, c.x, got, c.want)
}
}
} |
package main
// escreva um programa que crie um loop utilizando a sintax for{}
// Utilizeo para demonstrar os anos que voce nasceu até hoje
import "fmt"
func main() {
anoNascimento := 1980
anoFinal := 2020
for {
if anoNascimento > anoFinal {
break
}
fmt.Println(anoNascimento)
anoNascimento++
}
}
|
package p2exec
import (
"strings"
"testing"
)
func TestBuildWithArgs(t *testing.T) {
args := P2ExecArgs{
Command: []string{"script"},
}
expected := "script"
actual := strings.Join(args.CommandLine(), " ")
if actual != expected {
t.Errorf("Expected args.BuildWithArgs() to return '%s', was '%s'", expected, actual)
}
args = P2ExecArgs{
Command: []string{"script"},
NoLimits: true,
User: "some_user",
EnvDirs: []string{"some_dir", "other_dir"},
ExtraEnv: map[string]string{"FOO": "BAR"},
CgroupConfigName: "some_cgroup_config_name",
CgroupName: "cgroup_name",
RequireFile: "require_file",
}
expected = "-n -u some_user -e some_dir -e other_dir --extra-env FOO=BAR -l some_cgroup_config_name -c cgroup_name --require-file require_file -- script"
actual = strings.Join(args.CommandLine(), " ")
if actual != expected {
t.Errorf("Expected args.BuildWithArgs() to return '%s', was '%s'", expected, actual)
}
}
|
package main
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/feature/s3/manager"
"github.com/aws/aws-sdk-go-v2/service/ec2"
"github.com/aws/aws-sdk-go-v2/service/s3"
"log"
"os"
"strings"
"time"
)
var client *ec2.Client
var s3client *s3.Client
var azID map[string]string
var now string
var outputFileStr string
func init() {
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion("us-west-2"))
if err != nil {
log.Fatal(err)
}
client = ec2.NewFromConfig(cfg)
s3client = s3.NewFromConfig(cfg)
azID = make(map[string]string)
regionNames := []string{"us-east-1", "us-east-2", "us-west-1", "us-west-2"}
for _, region := range regionNames {
cfg, err := config.LoadDefaultConfig(context.TODO(), config.WithRegion(region))
c := ec2.NewFromConfig(cfg)
input := ec2.DescribeAvailabilityZonesInput{}
output, err := c.DescribeAvailabilityZones(context.TODO(), &input)
if err != nil {
panic(err)
}
for _, az := range output.AvailabilityZones {
azID[aws.ToString(az.ZoneId)] = aws.ToString(az.ZoneName)
}
}
now = fmt.Sprint(time.Now().Format(time.RFC3339))
outputFileStr = "time,region,instance,azID,azName,capacity,single,score\n"
}
type spotPlacementScoresCase struct {
instanceTypes []string
regionNames []string
targetCapacity int32
singleAvailabilityZone bool
name string
}
var spotPlacementScoresCases []spotPlacementScoresCase
func querySpotPlacementScores(targetCapacity int32, instanceTypes []string, regionNames []string, singleAvailabilityZone bool) {
input := ec2.GetSpotPlacementScoresInput{
TargetCapacity: aws.Int32(targetCapacity),
InstanceTypes: instanceTypes,
RegionNames: regionNames,
SingleAvailabilityZone: aws.Bool(singleAvailabilityZone),
}
output, err := client.GetSpotPlacementScores(context.TODO(), &input)
if err != nil {
panic(err)
}
for _, result := range output.SpotPlacementScores {
// outputFileStr = "time,region,instance,azID,azName,capacity,single,score\n"
s := fmt.Sprintf("%s,%s,%s,%s,%s,%d,%v,%d\n",
now,
aws.ToString(result.Region), strings.Join(instanceTypes, "+"),
aws.ToString(result.AvailabilityZoneId), azID[aws.ToString(result.AvailabilityZoneId)],
targetCapacity, singleAvailabilityZone, aws.ToInt32(result.Score))
outputFileStr += s
}
}
// var GPUInstanceTypes = []string{"p2.xlarge", "p2.8xlarge", "p2.16xlarge", "p3.2xlarge", "p3.8xlarge", "p3.16xlarge", "p3dn.24xlarge", "p4d.24xlarge"}
var GPUInstanceTypes = []string{"p2.xlarge", "p2.8xlarge", "p3.2xlarge", "p3.8xlarge"}
var regionNames = []string{"us-east-1", "us-east-2", "us-west-1", "us-west-2"}
var singleAvailabilityZoneRange = []bool{true, false}
var targetCapacityRange = []int32{1, 2, 4}
func saveResult() {
filename := now + ".csv"
file, err := os.Create(filename)
if err != nil {
panic(err)
}
defer file.Close()
_, err = file.WriteString(outputFileStr)
if err != nil {
panic(err)
}
uploader := manager.NewUploader(s3client)
_, err = uploader.Upload(context.TODO(), &s3.PutObjectInput{
Bucket: aws.String(os.Getenv("bucket")),
Key: aws.String(filename),
Body: strings.NewReader(outputFileStr),
})
if err != nil {
panic(err)
}
}
func main() {
spotPlacementScoresCases = []spotPlacementScoresCase{
{
instanceTypes: GPUInstanceTypes,
regionNames: regionNames,
targetCapacity: int32(1),
singleAvailabilityZone: true,
name: "all_1_true",
},
{
instanceTypes: GPUInstanceTypes,
regionNames: regionNames,
targetCapacity: int32(2),
singleAvailabilityZone: true,
name: "all_2_true",
},
{
instanceTypes: GPUInstanceTypes,
regionNames: regionNames,
targetCapacity: int32(4),
singleAvailabilityZone: true,
name: "all_4_true",
},
}
for _, instance := range GPUInstanceTypes {
for _, single := range singleAvailabilityZoneRange {
for _, capacity := range targetCapacityRange {
spotPlacementScoresCases = append(spotPlacementScoresCases, spotPlacementScoresCase{
instanceTypes: []string{instance},
regionNames: regionNames,
targetCapacity: capacity,
singleAvailabilityZone: single,
name: fmt.Sprintf("%s_%d_%v", instance, capacity, single),
})
}
}
}
for _, spsCase := range spotPlacementScoresCases {
fmt.Println(spsCase.name)
querySpotPlacementScores(spsCase.targetCapacity, spsCase.instanceTypes, spsCase.regionNames, spsCase.singleAvailabilityZone)
}
saveResult()
}
|
package testutils
import "testing"
func ASSERT_STREQ(t *testing.T, actual string, expected string) {
if expected != actual {
t.Fail()
t.Fatalf("expected=%s, got=%s", expected, actual)
}
}
func ASSERT_EQ(t *testing.T, actual int, expected int) {
if expected != actual {
t.Fatalf("expected=%d, got=%d", expected, actual)
}
}
func ASSERT_FLOAT32_EQ(t *testing.T, actual float32, expected float32) {
if expected != actual {
t.Fatalf("expected=%e, got=%e", expected, actual)
}
}
func ASSERT_FLOAT64_EQ(t *testing.T, actual float64, expected float64) {
if expected != actual {
t.Fatalf("expected=%e, got=%e", expected, actual)
}
}
func ASSERT_NEAR(t *testing.T, actual float64, expected float64, abs_error float64) {
if !((expected-actual) < abs_error && (actual-expected) < abs_error) {
t.Fatalf("expected=%e, got=%e", expected, actual)
}
}
func ASSERT_TRUE(t *testing.T, condition bool) {
if !condition {
t.Fatalf("We got False, but want True.")
}
}
func ASSERT_FALSE(t *testing.T, condition bool) {
if condition {
t.Fatalf("We got True, but want False.")
}
}
|
package main
import (
"github.com/iotaledger/hive.go/node"
"github.com/iotaledger/wasp/packages/parameters"
_ "github.com/iotaledger/wasp/packages/vm/sandbox"
"github.com/iotaledger/wasp/plugins/banner"
"github.com/iotaledger/wasp/plugins/chains"
"github.com/iotaledger/wasp/plugins/cli"
"github.com/iotaledger/wasp/plugins/config"
"github.com/iotaledger/wasp/plugins/dashboard"
"github.com/iotaledger/wasp/plugins/database"
"github.com/iotaledger/wasp/plugins/dispatcher"
"github.com/iotaledger/wasp/plugins/dkg"
"github.com/iotaledger/wasp/plugins/globals"
"github.com/iotaledger/wasp/plugins/gracefulshutdown"
"github.com/iotaledger/wasp/plugins/logger"
"github.com/iotaledger/wasp/plugins/nodeconn"
"github.com/iotaledger/wasp/plugins/peering"
"github.com/iotaledger/wasp/plugins/publisher"
"github.com/iotaledger/wasp/plugins/registry"
"github.com/iotaledger/wasp/plugins/testplugins/nodeping"
"github.com/iotaledger/wasp/plugins/wasmtimevm"
"github.com/iotaledger/wasp/plugins/webapi"
"go.dedis.ch/kyber/v3/pairing"
)
func main() {
suite := pairing.NewSuiteBn256() // TODO: [KP] Single suite should be used in all the places.
registry.InitFlags()
parameters.InitFlags()
plugins := node.Plugins(
banner.Init(),
config.Init(),
logger.Init(),
gracefulshutdown.Init(),
webapi.Init(),
cli.Init(),
database.Init(),
registry.Init(suite),
peering.Init(suite),
dkg.Init(suite),
nodeconn.Init(),
dispatcher.Init(),
chains.Init(),
publisher.Init(),
dashboard.Init(),
wasmtimevm.Init(),
globals.Init(),
)
testPlugins := node.Plugins(
nodeping.Init(),
)
node.Run(
plugins,
testPlugins,
)
}
|
package main
import "fmt"
func main() {
x := "Shikamaru Nara"
fmt.Println(x)
{
fmt.Println(x)
y := "Laziness is the mother of all bad habits, but ultimately she is a mother and we should respect her"
fmt.Println(y)
}
// fmt.Println(y) // outside the scope of y
}
// Shikamaru Nara
// Shikamaru Nara
// Laziness is the mother of all bad habits, but ultimately she is a mother and we should respect her
|
package main
import (
"fmt"
"unicode"
"bytes"
"strings"
)
func LetterChanges(str string) string {
var buffer bytes.Buffer
for i := 0; i < len(str); i++ {
var letter string
if unicode.IsLetter(rune(str[i])) {
if (string(str[i])) == "z" {
letter = "a"
} else if string(str[i]) == "Z" {
letter = "A"
} else {
letter = string(str[i] + 1)
}
} else {
letter = string(str[i])
}
if strings.Contains("aeiou", letter) {
letter = strings.ToUpper(letter)
}
buffer.WriteString(letter)
}
return buffer.String()
}
func main() {
fmt.Println(LetterChanges("abcdz"), "The correct answer is bcdEA")
fmt.Println(LetterChanges("hello world"), "The correct answer is Ifmmp xpsmE")
fmt.Println(LetterChanges("sentence"), "The correct answer is tfOUfOdf")
fmt.Println(LetterChanges("replace!*"), "The correct answer is sfqmbdf!*")
fmt.Println(LetterChanges("coderbyte"), "The correct answer is dpEfsczUf")
fmt.Println(LetterChanges("beautiful^"), "The correct answer is cfbvUjgvm^")
fmt.Println(LetterChanges("oxford"), "The correct answer is pygpsE")
fmt.Println(LetterChanges("123456789ae"), "The correct answer is 123456789bf")
fmt.Println(LetterChanges("this long cake@&"), "The correct answer is UIjt mpOh dblf@&")
fmt.Println(LetterChanges("a b c dee"), "The correct answer is b c d Eff")
fmt.Println(LetterChanges("a confusing /:sentence:/[ this is not!!!!!!!~"), "The correct answer is b dpOgvtjOh /:tfOUfOdf:/[ UIjt jt OpU!!!!!!!~")
}
|
package assembler
import (
"log"
"golang.org/x/sys/cpu"
)
var useAVX2, useAVX, useSSE4 bool
func init() {
useSSE4 = cpu.X86.HasSSE41
useAVX = cpu.X86.HasAVX
useAVX2 = cpu.X86.HasAVX2
Init(true)
log.Printf("SSE4: %v", useSSE4)
log.Printf("AVX: %v", useAVX)
log.Printf("AVX2: %v", useAVX2)
}
var logging bool = true
var Isamax func(x []float32) int
var Ismax func(x []float32) int
func Init(optimize bool) {
if optimize {
Isamax = isamax_asm
Ismax = ismax_asm
} else {
Isamax = isamax
Ismax = ismax
}
}
|
package testutil
import (
"reflect"
"sort"
)
var ints1k [1000]int
var ints1ksorted [1000]int
func init() {
for i := 0; i < 1000; i++ {
ints1k[i] = i ^ 0x2cc
ints1ksorted[i] = ints1k[i]
}
sort.Ints(ints1ksorted[:])
}
func InputInts() []int {
return []int{74, 59, 238, -784, 9845, 959, 905, 0, 0, 42, 7586, -5467984, 7586}
}
func InputInts1k() []int {
return ints1k[:]
}
func IsSortedInts(ints []int) bool {
return reflect.DeepEqual([]int{-5467984, -784, 0, 0, 42, 59, 74, 238, 905, 959, 7586, 7586, 9845}, ints)
}
func IsSortedInts1k(ints []int) bool {
return reflect.DeepEqual(ints, ints1ksorted[:])
}
|
package httpmanager
import (
"encoding/json"
"net/http"
)
type infoInputBody struct {
}
type infoOutputBody struct {
Used int32 `json:"used"`
Total int32 `json:"total"`
UnitRequest int32 `json:"unitRequest"`
UnitPrice int32 `json:"unitPrice"`
}
// InfoHandler returns the basic info of the cloud
func (m *Manager) InfoHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != http.MethodPost {
http.Error(w, "Not support method", http.StatusBadRequest)
return
}
// var req priceInputBody
// err := json.NewDecoder(r.Body).Decode(&req)
// if err != nil {
// http.Error(w, err.Error(), http.StatusBadRequest)
// return
// }
res := infoOutputBody{}
jsonRet, err := json.Marshal(res)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Add("Content-Type", "application/json;charset=utf-8")
w.Write(jsonRet)
return
}
|
package lib
import (
"fmt"
"github.com/go-piv/piv-go/piv"
"golang.org/x/crypto/ssh/terminal"
"strings"
)
func AskPin() (string, error) {
fmt.Print("Enter PIN: ")
pin, err := terminal.ReadPassword(0)
if err != nil {
return "", err
}
return strings.TrimSpace(string(pin)), nil
}
func GetYubikey() (*piv.YubiKey, func(), error) {
var close = func() {}
cards, err := piv.Cards()
if err != nil {
return nil, close, err
}
for _, card := range cards {
if !strings.Contains(strings.ToLower(card), "yubikey") {
continue
}
yk, err := piv.Open(card)
close = func() {
if err := yk.Close(); err != nil {
fmt.Printf("closing yubikey: %v\n", err)
return
}
}
if err != nil {
return yk, close, err
}
return yk, close, nil
}
return nil, close, nil
}
|
/*
This art-app is purely used to generate the HTML file
*/
package main
// Expects blockartlib.go to be in the ../blockartlib/ dir, relative to
// this art-app.go file
import (
"crypto/x509"
"encoding/hex"
"encoding/json"
"fmt"
"html/template"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"./blockartlib"
)
////// TYPES FOR THE WEBSERVER ///////
type AddRequest struct {
Fill string `json:"fill"`
Stroke string `json:"stroke"`
Path string `json:"path"`
}
type AddResponse struct {
SVGString string `json:"svg-string"`
InkRemaining uint32 `json:"ink-remaining"`
ShapeHash string `json:"shape-hash"`
BlockHash string `json:"block-hash"`
}
type HistoryResponse struct {
Paths []string `json:"paths"`
}
////// END OF TYPES FOR THE WEBSERVER ///////
func main() {
// Read file content and cast to string
ipPortBytes, err := ioutil.ReadFile("./ip-ports.txt")
checkError(err)
ipPortString := string(ipPortBytes[:])
keyPairsBytes, err := ioutil.ReadFile("./key-pairs.txt")
checkError(err)
keyPairsString := string(keyPairsBytes[:])
// Parse ip-port and privKey from content string
minerAddr := strings.Split(ipPortString, "\n")[0]
privKeyString := strings.Split(keyPairsString, "\n")[0]
privKeyBytes, err := hex.DecodeString(privKeyString)
checkError(err)
privKey, err := x509.ParseECPrivateKey(privKeyBytes)
checkError(err)
// Open a canvas.
canvas, settings, err := blockartlib.OpenCanvas(minerAddr, *privKey)
fmt.Println("OpenCanvas")
checkError(err)
fmt.Println(canvas)
fmt.Println(settings)
generateHTML(canvas, settings)
// Close the canvas.
ink1, err := canvas.CloseCanvas()
fmt.Println("CloseCanvas")
checkError(err)
fmt.Println("%d", ink1)
// Reopen canvas to poll for blockchain
canvas, _, err = blockartlib.OpenCanvas(minerAddr, *privKey)
handler := func(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
tpl, err := template.ParseFiles("blockart.html", "paths.html")
if err != nil {
fmt.Println(err.Error())
http.Error(w, err.Error(), 500)
}
err = tpl.ExecuteTemplate(w, "blockart.html", nil)
if err != nil {
fmt.Println(err.Error())
http.Error(w, err.Error(), 500)
}
} else if r.Method == "POST" {
var addReq AddRequest
err = json.NewDecoder(r.Body).Decode(&addReq)
if err != nil {
fmt.Println(err.Error())
fmt.Println("Error Marshalling/Decoding")
http.Error(w, err.Error(), 500)
return
}
shapeHash, blockHash, ink, err := canvas.AddShape(4, blockartlib.PATH, addReq.Path, addReq.Fill, addReq.Stroke)
if err != nil {
fmt.Println(err.Error())
http.Error(w, err.Error(), 500)
return
}
svgString, err := canvas.GetSvgString(shapeHash)
if err != nil {
fmt.Println(err.Error())
http.Error(w, err.Error(), 500)
return
}
addResp := AddResponse{
SVGString: svgString,
InkRemaining: ink,
ShapeHash: shapeHash,
BlockHash: blockHash}
resp, err := json.Marshal(addResp)
w.Header().Set("Content-Type", "application/json")
w.Write(resp)
// w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusBadRequest)
}
}
// Serve the html file if its a GET
http.HandleFunc("/", handler)
log.Fatal(http.ListenAndServe(":8888", nil))
// go func(canvas blockartlib.Canvas) {
// for {
// reader := bufio.NewReader(os.Stdin)
// fmt.Println("For AddShape: ADD,[PATH],[FILL],[STROKE],[PATH|CIRCLE]")
// fmt.Println("For DeleteShape: DELETE,[SHAPEHASH]")
// fmt.Print("Enter text: ")
// text, _ := reader.ReadString('\n')
// fmt.Println(text)
// tokens := strings.Split(text, ",")
// OPTYPE := tokens[0]
// fmt.Printf("Tokens: %+v\n", tokens)
// validateNum := uint8(4)
// if OPTYPE == "ADD" {
// path := tokens[1]
// fill := tokens[2]
// stroke := tokens[3]
// var pathType blockartlib.ShapeType
// if tokens[4] == "PATH" {
// pathType = blockartlib.PATH
// } else if tokens[4] == "CIRCLE" {
// pathType = blockartlib.CIRCLE
// } else {
// continue
// }
// fmt.Println("Adding from command line: ")
// shapeHash, blockHash, inkRemaining, err := canvas.AddShape(validateNum, pathType, path, fill, stroke)
// fmt.Println("Adding completed: ")
// if err != nil {
// checkError(err)
// } else {
// fmt.Println("Add is: %s, BlockHash: %s, InkRemaining: %d", shapeHash, blockHash, inkRemaining)
// }
// } else if OPTYPE == "DELETE" {
// shapeHash := tokens[1]
// fmt.Println("Deleting from command line: ")
// _, err := canvas.DeleteShape(validateNum, shapeHash)
// fmt.Println("Deleting completed: ")
// if err != nil {
// checkError(err)
// }
// }
// }
// }(canvas)
for {
}
}
// If error is non-nil, print it out.
func checkError(err error) {
if err != nil {
fmt.Fprintln(os.Stderr, "Error ", err.Error())
}
}
// Recursively get the longest blockchain
func getLongestBlockchain(currBlockHash string, canvas blockartlib.Canvas) []string {
// Add current block hash to longest chain
longestBlockchain := []string{}
longestBlockchain = append(longestBlockchain, currBlockHash)
// Iterate through children of current block if any exist,
// Adding the longest of them all to the longest blockchain
children, err := canvas.GetChildren(currBlockHash)
checkError(err)
longestChildBlockchain := []string{}
for _, child := range children {
childBlockchain := getLongestBlockchain(child, canvas)
if len(childBlockchain) > len(longestChildBlockchain) {
longestChildBlockchain = childBlockchain
}
}
return append(longestBlockchain, longestChildBlockchain...)
}
// Generate an HTML file, filled exclusively with
// HTML SVG strings from the longest blockchain in canvas
func generateHTML(canvas blockartlib.Canvas, settings blockartlib.CanvasSettings) {
// Create a blank HTML file
HTML, err := os.Create("./art-app.html")
checkError(err)
dir, err := os.Getwd()
fmt.Println("Currently working directory is: %s", dir)
pathsHTML, err := os.Create("./paths.html")
checkError(err)
defer HTML.Close()
defer pathsHTML.Close()
// Append starting HTML tags
pre := []byte("<!DOCTYPE html>\n<html>\n<head>\n\t<title>HTML SVG Output</title>\n</head>\n")
bodyString := fmt.Sprintf("<body>\n\t<svg xmlns=\"http://www.w3.org/2000/svg\" width=\"%d\" height=\"%d\" version=\"1.1\">\n", settings.CanvasXMax, settings.CanvasYMax)
body := []byte(bodyString)
HTML.Write(pre)
HTML.Write(body)
// Get the longest blockchain
// Start with the genesis block and recursively add to chain
gHash, err := canvas.GetGenesisBlock()
fmt.Println("GetGenesisBlock")
checkError(err)
blockchain := getLongestBlockchain(gHash, canvas)
// svgPaths := make([]string, 0)
// Add the HTML SVG string of each opeartion in the blockchain
fmt.Println("GetShapes")
for _, bHash := range blockchain {
sHashes, err := canvas.GetShapes(bHash)
checkError(err)
for _, sHash := range sHashes {
HTMLSVGString, err := canvas.GetSvgString(sHash)
// Expect to see an InvalidShapeHashError
// as the first line was deleted, but art-node can
// never tell strictly by shapeHash
if err == nil {
fmt.Println("Writing to paths.HTML")
HTML.Write([]byte("\t\t" + HTMLSVGString + "\n"))
pathsHTML.Write([]byte(HTMLSVGString + "\n"))
} else {
fmt.Println("Error in svg string")
break
}
}
}
// Append ending HTML tags
suf := []byte("\t</svg>\n</body>\n</html>\n")
HTML.Write(suf)
}
|
package main
import (
"fmt"
"image"
"image/color"
"image/draw"
"image/jpeg"
"os"
"path/filepath"
"neilpa.me/phace"
)
// OutlineFaces creates a new image, drawing a boarder around the faces.
// Dumps the resulting image in `out/` with the same basename as the
// original. Best way to check how things are actually working...
func OutlineFaces(s *phace.Session, p *phace.Photo, faces []*phace.Face, dir string) error {
src, err := s.Image(p)
if err != nil {
return err
}
// Need to create a mutable version of the image
bounds := src.Bounds()
dst := image.NewRGBA(bounds)
draw.Draw(dst, bounds, src, image.ZP, draw.Src)
border := 10
for _, f := range faces {
// Convert to pixel coords, size is aligned to smaller dimension
width, height := float64(bounds.Dx()), float64(bounds.Dy())
// Trial and error suggests f.Size is the radius about the center
// of the face. A box is close enough for validating.
radius := int(f.Size * width) // TODO Round?
if height < width {
radius = int(f.Size * height)
}
center := makePoint(f.CenterX, f.CenterY, bounds, p.Orientation)
min := image.Pt(center.X - radius, center.Y - radius)
max := image.Pt(center.X + radius, center.Y + radius)
top := image.Rect(min.X-border, min.Y-border, max.X+border, min.Y)
bot := image.Rect(min.X-border, max.Y, max.X+border, max.Y+border)
left := image.Rect(min.X-border, min.Y, min.X, max.Y)
right := image.Rect(max.X, min.Y, max.X+border, max.Y)
draw.Draw(dst, top, blue, image.ZP, draw.Src)
draw.Draw(dst, bot, green, image.ZP, draw.Src)
draw.Draw(dst, left, red, image.ZP, draw.Src)
draw.Draw(dst, right, gray, image.ZP, draw.Src)
drawDot(dst, f.LeftEyeX, f.LeftEyeY, bounds, p.Orientation, blue)
drawDot(dst, f.RightEyeX, f.RightEyeY, bounds, p.Orientation, red)
drawDot(dst, f.MouthX, f.MouthY, bounds, p.Orientation, green)
drawDot(dst, f.CenterX, f.CenterY, bounds, p.Orientation, black)
}
// Dump the files on disk for inspection
err = os.MkdirAll("out", 0755)
if err != nil {
return err
}
w, err := os.Create(filepath.Join(dir, filepath.Base(p.Path)))
if err != nil {
return err
}
return jpeg.Encode(w, dst, nil)
}
func drawDot(dst draw.Image, x, y float64, bounds image.Rectangle, orientation int, c image.Image) {
pt := makePoint(x, y, bounds, orientation)
sz := 15 // TODO Scale relative to size of face?
dot := image.Rect(pt.X-sz, pt.Y-sz, pt.X+sz, pt.Y+sz)
draw.Draw(dst, dot, c, image.ZP, draw.Src)
}
func makePoint(x, y float64, r image.Rectangle, orientation int) image.Point {
dx, dy := float64(r.Dx()), float64(r.Dy())
switch orientation {
case 1: // normal but not sure why the y axis is flipped
y = 1 - y
case 3: // upside down too
x = 1 - x
case 6: // portrait
x, y = 1-y, 1-x
case 8: // only 1 example with a face
x, y = y, x
default:
fmt.Println("unrecognized orientation:", orientation)
}
return image.Pt(int(x*dx), int(y*dy))
}
var (
red = &image.Uniform{color.RGBA{255, 0, 0, 255}}
green = &image.Uniform{color.RGBA{0, 255, 0, 255}}
blue = &image.Uniform{color.RGBA{0, 0, 255, 255}}
gray = &image.Uniform{color.RGBA{100, 100, 100, 255}}
black = &image.Uniform{color.RGBA{0, 0, 0, 255}}
)
|
package cloud
func GetRegion() {
return
}
|
package controller
import (
database "../connect"
"../models"
"github.com/gin-gonic/gin"
)
func Read(c *gin.Context) {
db := database.Connect()
models.ShowValues(db, c)
defer db.Close() // Hoãn lại việc close database connect cho đến khi hàm Read() thực hiệc xong
}
func Insert(c *gin.Context) {
record := models.ReadCsv("static/data/ward.csv")
db := database.Connect()
models.CreateWardTable(db)
models.InsertValue(db, record)
c.JSON(200, gin.H{
"message": "Create and insert success",
})
defer db.Close()
}
|
package piscine
func IsPrintable(str string) bool {
//checking whether string is containing smth, if not - false
if str == "" {
return false
}
array := []rune(str)
for _, char := range array {
//checking for printable values
// 32 - SPACE, 126 - TILDA
if char >= ' ' && char <= '~' {
continue
} else {
return false
}
}
return true
}
|
package main
import (
"fmt"
"os"
"sort"
"github.com/gin-gonic/gin"
"github.com/yuneejang/webserver/config"
"github.com/yuneejang/webserver/utils"
"gopkg.in/urfave/cli.v1"
)
func main() {
app := cli.NewApp()
app.Copyright = "Copyright 2020-2020 The go-symverse Authors"
app.Version = "0.0.0"
app.Name = "Web Server Test"
app.Usage = "See README"
app.Commands = []cli.Command{
utils.InitCommand,
}
sort.Sort(cli.CommandsByName(app.Commands))
app.Flags = []cli.Flag{
utils.ConfigFilePath,
// DevelopFlag,
utils.LogFlag,
utils.LogPathFlag,
utils.EnableAPI,
}
app.Before = func(ctx *cli.Context) error {
//
cfg, err := utils.MakeConfig(ctx)
if err != nil {
fmt.Println(err)
}
setConfig(ctx, cfg)
return nil
}
app.Action = func(ctx *cli.Context) error {
run(ctx)
return nil
}
err := app.Run(os.Args)
if err != nil {
fmt.Println(err)
}
}
func run(ctx *cli.Context) {
//Step 1. Creates a gin router
//- 1. 기본
// Default With the Logger and Recovery middleware already attached
router := gin.Default()
//- 2.
// Creates a router without any middleware by default
//router := gin.New()
//Step 2. Setup Router
router = SetupRouter(router)
//Step 3. Run Server
router.Run(":8131")
//세세한 http config설정이필요한 경우 아래와 같이 사용이 가능함
// s := &http.Server{
// Addr: ":8080",
// Handler: router,
// ReadTimeout: 10 * time.Second,
// WriteTimeout: 10 * time.Second,
// MaxHeaderBytes: 1 << 20,
// }
// s.ListenAndServe()
}
func setConfig(ctx *cli.Context, conf *config.Config) {
info := conf.Nodes[0]
config.HttpAttach = "http://" + info.Host + ":" + info.HttpPort
}
|
package configfile
import (
"encoding/json"
"encoding/xml"
"fmt"
"os"
"strings"
"github.com/stewelarend/config"
"gopkg.in/yaml.v2"
)
func Add(filename string) error {
f, err := os.Open(filename)
if err != nil {
return fmt.Errorf("cannot open file(%s): %v", filename, err)
}
defer f.Close()
if strings.HasSuffix(filename, ".json") {
var data map[string]interface{}
if err := json.NewDecoder(f).Decode(&data); err != nil {
return fmt.Errorf("cannot read JSON object from file(%s): %v", filename, err)
}
config.AddSource(config.NewValues(filename, data))
return nil
}
if strings.HasSuffix(filename, ".xml") {
var data map[string]interface{}
if err := xml.NewDecoder(f).Decode(&data); err != nil {
return fmt.Errorf("cannot read XML object from file(%s): %v", filename, err)
}
config.AddSource(config.NewValues(filename, data))
return nil
}
if strings.HasSuffix(filename, ".yaml") {
var data map[string]interface{}
if err := yaml.NewDecoder(f).Decode(&data); err != nil {
return fmt.Errorf("cannot read YAML object from file(%s): %v", filename, err)
}
config.AddSource(config.NewValues(filename, data))
return nil
}
return fmt.Errorf("unknown suffix in filename(%s) expecting json|xml|yaml", filename)
}
|
package schedule
import (
"fmt"
"gopkg.in/tomb.v2"
"gopkg.in/mgo.v2"
"strconv"
"strings"
"sync"
"time"
"TskSch/msgQ"
)
type Schedule struct {
L string
Id int
Name string
W *sync.WaitGroup
Session *mgo.Session
Host string
Port string
T tomb.Tomb
}
type Result struct {
Task_id string //command ID
Task_name string
Cmd string
Executed bool //Executed staus
TOE string //Time Of Execution
TTE string //Time Taken to Execute
Pid int // process id of client
Exec_Stat bool //Execution Status
output string
err string
}
func (Sch *Schedule) Push() error {
schedule := strings.Split(Sch.L, ":")
R , _ := strconv.Atoi(schedule[0]) // => For every interval or for only at particular time
Week , _ := strconv.Atoi(schedule[1]) // => Week NAME
Day , _ := strconv.Atoi(schedule[2]) // => For Every day : day = 1 or Foe Every 2nd day : day = 2
Hour, _ := strconv.Atoi(schedule[3]) // => 24 Hr Format
Minute , _ := strconv.Atoi(schedule[4]) // => Minutes
Second , _ := strconv.Atoi(schedule[5]) // => Seconds
Cmd := schedule[6] // => Command
if( R == 0 ){
if Week != -1 {
if int(time.Now().Weekday()) == Week {
ticker := updateTicker(Hour, Minute, Second, Day, 7)
for {
<-ticker.C
func() {
Sch.T.M.Lock()
put2msgQ(Sch.Host ,Sch.Port ,Cmd,Sch.Session,Sch.Id,Sch.Name)
Sch.T.M.Unlock()
fmt.Println("TASK : ", Sch.Id ,"GOT EXECUTED")
}()
ticker = updateTicker(Hour, Minute, Second, Day, 7)
}
} else {
var ticker *time.Ticker
if int(time.Now().Weekday()) > Week {
ticker = updateTicker(Hour, Minute, Second, 1, 7 - int(time.Now().Weekday()) + Week )
}else{
ticker = updateTicker(Hour, Minute, Second, 1, Week - int(time.Now().Weekday()))
}
for {
<-ticker.C
func() {
Sch.T.M.Lock()
put2msgQ(Sch.Host ,Sch.Port ,Cmd,Sch.Session,Sch.Id,Sch.Name)
Sch.T.M.Unlock()
fmt.Println("TASK : ", Sch.Id ,"GOT EXECUTED")
}()
ticker = updateTicker(Hour, Minute, Second, Day, 7)
}
}
}else{
ticker := updateTicker(Hour, Minute, Second, Day, 1)
for {
<-ticker.C
func() {
Sch.T.M.Lock()
put2msgQ(Sch.Host ,Sch.Port ,Cmd,Sch.Session,Sch.Id,Sch.Name)
Sch.T.M.Unlock()
fmt.Println("TASK : ", Sch.Id ,"GOT EXECUTED")
}()
ticker = updateTicker(Hour, Minute, Second, Day, 1)
}
}
}else {
for _ = range time.Tick(time.Second*time.Duration( Hour*60 + Minute*60 + Second*1 )){
if(Week == -1){
func() {
Sch.T.M.Lock()
put2msgQ(Sch.Host ,Sch.Port ,Cmd,Sch.Session,Sch.Id,Sch.Name)
Sch.T.M.Unlock()
fmt.Println("TASK : ", Sch.Id ,"GOT EXECUTED")
}()
}else{
if(int(time.Now().Weekday()) == Week){
func() {
Sch.T.M.Lock()
put2msgQ(Sch.Host ,Sch.Port ,Cmd,Sch.Session,Sch.Id,Sch.Name)
Sch.T.M.Unlock()
fmt.Println("TASK : ", Sch.Id ,"GOT EXECUTED")
}()
}
}
}
}
Sch.W.Done()
return nil
}
func put2msgQ(host string,port string,Cmd string,session *mgo.Session,cmd_id int,name string){
//INITIALIZING THE REDIS DB
Conn := msgQ.RedisInit(host ,port)
defer func(){
Conn.Close()
}()
//PUSHING THE cmd_id msgQ
_, err := Conn.Do("LPUSH", "task", cmd_id)
if err != nil {
fmt.Println("CAN'T PUSH IT TO msgQ",err)
}else{
put2resDB(session,cmd_id,name,Cmd)
}
}
func put2resDB(session *mgo.Session,cmd_id int,name string , Cmd string){
//INSERTING INTO RESULTDB
session.SetMode(mgo.Monotonic, true)
Col := session.DB("TskSch").C("Result")
err := Col.Insert(&Result{strconv.Itoa(cmd_id),name,Cmd,false,"","",0,false,"",""})
if err !=nil {
fmt.Println("NOT ABLE TO INSERT TO resultDB" , err)
}
}
func updateTicker(Hour int ,Minute int, Second int ,Day int,Week int ) *time.Ticker {
nextTick := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day(), Hour, Minute, Second, 0, time.Local)
if !nextTick.After(time.Now()) {
nextTick = nextTick.Add(time.Duration(Week * Day * 24 ) * time.Hour)
}
diff := nextTick.Sub(time.Now())
return time.NewTicker(diff)
}
|
package tmp
const DatabaseTmp = `package database{{$module := .ModuleName}}
import (
"fmt"
{{if isOnePostgres}}
"database/sql"
_ "github.com/lib/pq"{{end}}
{{printf "\"%v/helper\"" $module}}
{{range $i,$k := .DBS}}
{{printf "\"%v/store/%v_store\"" $module $i }}{{end}}
{{if isOneMongo}}
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"{{end}}
)
const ( {{range $i,$k := .DBS}}
{{printf "_%v = \"%v\"" $i $i}}{{end}}
)
type DB interface { {{range $i,$k := .DBS}}{{if eq $k "` + string(Mongodb) + `"}}
{{printf "%v() *mongo.Database" (title $i)}}{{else if eq $k "` + string(Postgres) + `"}}
{{printf "%v() *sql.DB" (title $i)}}{{end}}
{{printf "%v_store() %v_store.Store" (title $i) $i}}{{end}}
Close()
}
type DBForHandler interface { {{range $i,$k := .DBS}}
{{printf "%v_store() %v_store.Store" (title $i) $i}}{{end}}
}
type db struct { {{range $i,$k := .DBS}}
{{printf "_%v *d" $i}}{{end}}
}
type d struct {
store interface{}
conn interface{}
dbName string
}
func InitDB(conf *helper.Config) (DB DB, err error) {
db := &db{}
DB = db
var conn interface{} {{range $i,$k := .DBS}}
if v, ok := conf.DBS[{{printf "_%v" $i}}]; ok {
conn, err = {{if eq $k "` + string(Mongodb) + `"}}connMongo(v){{else if eq $k "` + string(Postgres) + `"}}connPostgres(v){{end}}
if err != nil {
return nil, fmt.Errorf("db not initializing: %v", err)
}
db.{{printf "_%v" $i}} = &d{
store: {{print $i}}_store.InitStore({{if eq $k "` + string(Mongodb) + `"}}conn.(*mongo.Client).Database(v.Name){{else if eq $k "` + string(Postgres) + `"}}conn.(*sql.DB){{end}}),
dbName: v.Name,
conn: conn,
}
helper.Log.Servicef("db %q initializing", {{printf "_%v" $i}})
}{{end}}
helper.Log.Service("db initializing")
return
}
func (d *db) Close() { {{range $i,$k := .DBS}}{{if eq $k "` + string(Mongodb) + `"}}
{{printf "d._%v.conn.(*mongo.Client).Disconnect(helper.Ctx)" $i}}{{else if eq $k "` + string(Postgres) + `"}}
{{printf "d._%v.conn.(*sql.DB).Close()" $i}}{{end}}
{{printf "helper.Log.Servicef(%v, _%v)" "\"db %q stoped\"" $i}}{{end}}
}
{{range $i,$k := .DBS}}{{if eq $k "` + string(Mongodb) + `"}}
{{printf "func (d *db) %v() *mongo.Database { return d._%v.conn.(*mongo.Client).Database(d._%v.dbName)" (title $i) $i $i}}}{{else if eq $k "` + string(Postgres) + `"}}
{{printf "func (d *db) %v() *sql.DB { return d._%v.conn.(*sql.DB)}" (title $i) $i}}{{end}}
{{printf "func (d *db) %v_store() %v_store.Store { return d._%v.store.(%v_store.Store)}" (title $i) $i $i $i}}{{end}}
{{if isOnePostgres}}
func connPostgres(v *helper.DbConfig) (conn *sql.DB, err error) {
conn, err = sql.Open("postgres", fmt.Sprintf("user=%v password=%v host=%v port=%v dbname=%v sslmode=disable", v.User, v.Password, v.Host, v.Port, v.Name))
if err != nil {
return conn, fmt.Errorf("db not connected: %v", err)
}
if err = conn.PingContext(helper.Ctx); err != nil {
return conn, fmt.Errorf("db not pinged: %v", err)
}
return
}{{end}}
{{if isOneMongo}}
func connMongo(v *helper.DbConfig) (conn *mongo.Client, err error) {
opt := options.Client().ApplyURI(fmt.Sprintf("mongodb://%v:%v", v.Host, v.Port)).SetAuth(options.Credential{AuthMechanism: "SCRAM-SHA-256", Username: v.User, Password: v.Password})
conn, err = mongo.Connect(helper.Ctx, opt)
if err != nil {
return conn, fmt.Errorf("db not connected: %v", err)
}
if err = conn.Ping(helper.Ctx, nil); err != nil {
return conn, fmt.Errorf("db not pinged: %v", err)
}
return
}{{end}}`
|
package pathtree
import (
"log"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func init() {
log.SetFlags(log.LstdFlags | log.Lshortfile)
}
func TestNewPathTree(t *testing.T) {
p := New()
require.NotNil(t, p)
assert.Equal(t, "/", p.Root())
p = New("/usr/local/bin")
assert.NotNil(t, p)
assert.Equal(t, "/", p.Root())
}
func TestAdd(t *testing.T) {
p := New()
require.NotNil(t, p)
err := p.Add("/usr/local/bin/bash")
assert.NoError(t, err)
}
|
package fuse
import (
"log"
)
var _ = log.Println
func (me *DefaultRawFuseFileSystem) Init(h *InHeader, input *InitIn) (*InitOut, Status) {
return new(InitOut), OK
}
func (me *DefaultRawFuseFileSystem) Destroy(h *InHeader, input *InitIn) {
}
func (me *DefaultRawFuseFileSystem) Lookup(h *InHeader, name string) (out *EntryOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Forget(h *InHeader, input *ForgetIn) {
}
func (me *DefaultRawFuseFileSystem) GetAttr(header *InHeader, input *GetAttrIn) (out *AttrOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Open(header *InHeader, input *OpenIn) (flags uint32, fuseFile RawFuseFile, status Status) {
return 0, nil, OK
}
func (me *DefaultRawFuseFileSystem) SetAttr(header *InHeader, input *SetAttrIn) (out *AttrOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Readlink(header *InHeader) (out []byte, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Mknod(header *InHeader, input *MknodIn, name string) (out *EntryOut, code Status) {
return new(EntryOut), ENOSYS
}
func (me *DefaultRawFuseFileSystem) Mkdir(header *InHeader, input *MkdirIn, name string) (out *EntryOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Unlink(header *InHeader, name string) (code Status) {
return ENOSYS
}
func (me *DefaultRawFuseFileSystem) Rmdir(header *InHeader, name string) (code Status) {
return ENOSYS
}
func (me *DefaultRawFuseFileSystem) Symlink(header *InHeader, pointedTo string, linkName string) (out *EntryOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Rename(header *InHeader, input *RenameIn, oldName string, newName string) (code Status) {
return ENOSYS
}
func (me *DefaultRawFuseFileSystem) Link(header *InHeader, input *LinkIn, name string) (out *EntryOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) SetXAttr(header *InHeader, input *SetXAttrIn) Status {
return ENOSYS
}
func (me *DefaultRawFuseFileSystem) GetXAttr(header *InHeader, attr string) (data []byte, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Access(header *InHeader, input *AccessIn) (code Status) {
return ENOSYS
}
func (me *DefaultRawFuseFileSystem) Create(header *InHeader, input *CreateIn, name string) (flags uint32, fuseFile RawFuseFile, out *EntryOut, code Status) {
return 0, nil, nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Bmap(header *InHeader, input *BmapIn) (out *BmapOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Ioctl(header *InHeader, input *IoctlIn) (out *IoctlOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Poll(header *InHeader, input *PollIn) (out *PollOut, code Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) OpenDir(header *InHeader, input *OpenIn) (flags uint32, fuseFile RawFuseDir, status Status) {
return 0, nil, ENOSYS
}
func (me *DefaultRawFuseFileSystem) Release(header *InHeader, f RawFuseFile) {
}
func (me *DefaultRawFuseFileSystem) ReleaseDir(header *InHeader, f RawFuseDir) {
}
////////////////////////////////////////////////////////////////
// DefaultRawFuseFile
func (me *DefaultRawFuseFile) Read(*ReadIn, *BufferPool) ([]byte, Status) {
return []byte(""), ENOSYS
}
func (me *DefaultRawFuseFile) Write(*WriteIn, []byte) (uint32, Status) {
return 0, ENOSYS
}
func (me *DefaultRawFuseFile) Flush() Status {
return ENOSYS
}
func (me *DefaultRawFuseFile) Release() {
}
func (me *DefaultRawFuseFile) Fsync(*FsyncIn) (code Status) {
return ENOSYS
}
////////////////////////////////////////////////////////////////
//
func (me *DefaultRawFuseDir) ReadDir(input *ReadIn) (*DirEntryList, Status) {
return nil, ENOSYS
}
func (me *DefaultRawFuseDir) ReleaseDir() {
}
func (me *DefaultRawFuseDir) FsyncDir(input *FsyncIn) (code Status) {
return ENOSYS
}
////////////////////////////////////////////////////////////////
// DefaultPathFilesystem
func (me *DefaultPathFilesystem) GetAttr(name string) (*Attr, Status) {
return nil, ENOSYS
}
func (me *DefaultPathFilesystem) GetXAttr(name string, attr string) ([]byte, Status) {
return nil, ENOSYS
}
func (me *DefaultPathFilesystem) Readlink(name string) (string, Status) {
return "", ENOSYS
}
func (me *DefaultPathFilesystem) Mknod(name string, mode uint32, dev uint32) Status {
return ENOSYS
}
func (me *DefaultPathFilesystem) Mkdir(name string, mode uint32) Status {
return ENOSYS
}
func (me *DefaultPathFilesystem) Unlink(name string) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Rmdir(name string) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Symlink(value string, linkName string) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Rename(oldName string, newName string) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Link(oldName string, newName string) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Chmod(name string, mode uint32) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Chown(name string, uid uint32, gid uint32) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Truncate(name string, offset uint64) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Open(name string, flags uint32) (file RawFuseFile, code Status) {
return nil, ENOSYS
}
func (me *DefaultPathFilesystem) OpenDir(name string) (stream chan DirEntry, status Status) {
return nil, ENOSYS
}
func (me *DefaultPathFilesystem) Mount(conn *PathFileSystemConnector) Status {
return OK
}
func (me *DefaultPathFilesystem) Unmount() {
}
func (me *DefaultPathFilesystem) Access(name string, mode uint32) (code Status) {
return ENOSYS
}
func (me *DefaultPathFilesystem) Create(name string, flags uint32, mode uint32) (file RawFuseFile, code Status) {
return nil, ENOSYS
}
func (me *DefaultPathFilesystem) Utimens(name string, AtimeNs uint64, CtimeNs uint64) (code Status) {
return ENOSYS
}
|
// Licensed to SolID under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. SolID licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package jwt
import (
"crypto"
"encoding/base64"
"fmt"
josejwt "github.com/square/go-jose/v3/jwt"
)
type tokenWrapper struct {
token *josejwt.JSONWebToken
}
func (tw *tokenWrapper) Type() (string, error) {
if len(tw.token.Headers) == 0 {
return "", fmt.Errorf("unable to retrieve embededded jwk from header")
}
if typ, ok := tw.token.Headers[0].ExtraHeaders["typ"]; ok {
return fmt.Sprintf("%v", typ), nil
}
return "", fmt.Errorf("unable to retrieve token type")
}
func (tw *tokenWrapper) KeyID() (string, error) {
if len(tw.token.Headers) == 0 {
return "", fmt.Errorf("unable to retrieve kid claim from header")
}
return tw.token.Headers[0].KeyID, nil
}
func (tw *tokenWrapper) PublicKey() (interface{}, error) {
if len(tw.token.Headers) == 0 {
return "", fmt.Errorf("unable to retrieve embededded jwk from header")
}
return tw.token.Headers[0].JSONWebKey, nil
}
func (tw *tokenWrapper) PublicKeyThumbPrint() (string, error) {
if len(tw.token.Headers) == 0 {
return "", fmt.Errorf("unable to retrieve embededded jwk from header")
}
// Generate thumbprint
h, err := tw.token.Headers[0].JSONWebKey.Thumbprint(crypto.SHA256)
if err != nil {
return "", fmt.Errorf("unable to generate embedded jwk thumbprint: %w", err)
}
// No error
return base64.RawURLEncoding.EncodeToString(h), nil
}
func (tw *tokenWrapper) Algorithm() (string, error) {
if len(tw.token.Headers) == 0 {
return "", fmt.Errorf("unable to retrieve `alg` claim from header")
}
return tw.token.Headers[0].Algorithm, nil
}
func (tw *tokenWrapper) Claims(publicKey interface{}, claims interface{}) error {
return tw.token.Claims(publicKey, claims)
}
|
package login
import (
"github.com/gin-gonic/gin"
"github.com/charlesfan/go-api/service/rsi"
"github.com/charlesfan/go-api/utils/log"
)
func CheckEmail() gin.HandlerFunc {
return func(c *gin.Context) {
//Implement
b := rsi.EmailLoginBody{}
if err := c.Bind(&b); err != nil {
log.Error(err)
log.Error("Email or Password does not exist")
resp := map[string]string{"error": "Email or Password does not exist"}
code := 401
c.JSON(code, resp)
c.Abort()
return
}
log.Info("Here is CheckEmail Middleware Function: PASS")
c.Set("info", b)
c.Next()
}
}
|
package main
import (
"net/http"
"web"
"log"
"time"
"spider/controller"
"mycache"
"task"
"util"
"strconv"
"spider/entity"
"fmt"
)
/**
@Todo 检查北上广是否重复创建缓存
*/
func main() {
//创建任务队列
taksCache := mycache.GetCache("task")
//开启服务
go func() {
for url := range task.Task {
_, err := mycache.Get(taksCache, url)
if err == true {
continue
}
mycache.Put(taksCache, url, 1)
//base64解码
url = util.Base642URL(url)
go service(url)
}
}()
go notify()
//每隔1小时进行一次gc
go func() {
for {
gc()
time.Sleep(1 * time.Hour)
}
}()
//注册查询接口
http.HandleFunc("/list", web.ReadList)
err := http.ListenAndServe(":8080", nil)
if err != nil {
log.Fatal(err.Error())
}
}
/**
爬虫服务
*/
func service(url string) {
conf := "2"
width, _ := strconv.Atoi(conf)
if width == 0 {
width = 1
}
for {
controller.Service(url)
time.Sleep(10000 * time.Millisecond)
}
}
func notify() {
for {
log.Print("check notify")
userCache := mycache.GetCache("user")
locationCache := mycache.GetCache("location")
for k, v := range *userCache {
userInfo := v.(map[string]int)
res := make([]entity.JobInfo, 0)
f := false
for lk, lv := range *locationCache {
locationInfo := lv.(*entity.JobInfoList)
if userInfo[lk] < len(*locationInfo) {
f = true
list := (*locationInfo)[userInfo[lk]:]
log.Printf("New Information location : %s of user %s, total count:%d", lk, k, len(list))
userInfo[lk] = len(list) + userInfo[lk]
for _, item := range list {
res = append(res, item)
}
}
}
if f {
log.Println("send mail")
//mail.SendMail(res)
}
}
time.Sleep(2 * time.Hour)
}
}
func gc() {
locationCache := mycache.GetCache("location")
for _, v := range *locationCache {
list := v.(*entity.JobInfoList)
fmt.Printf("before delete elements :%d", len(*list))
for i, rcount, rlen := 0, 0, len(*list); i < rlen; i++ {
j := i - rcount
date, err := time.Parse("2006-01-02", (*list)[j].Date)
if err == nil && date.AddDate(0, 0, 3).Before(time.Now()) {
(*list) = append((*list)[:j], (*list)[j+1:]...)
rcount++
}
}
fmt.Printf("after delete elements :%d", len(*list))
}
}
|
/*
Copyright 2021 CodeNotary, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sql
type Catalog struct {
dbsByID map[uint64]*Database
dbsByName map[string]*Database
}
type Database struct {
id uint64
name string
tablesByID map[uint64]*Table
tablesByName map[string]*Table
}
type Table struct {
db *Database
id uint64
name string
colsByID map[uint64]*Column
colsByName map[string]*Column
pk *Column
indexes map[uint64]struct{}
}
type Column struct {
table *Table
id uint64
colName string
colType SQLValueType
notNull bool
}
func newCatalog() *Catalog {
return &Catalog{
dbsByID: map[uint64]*Database{},
dbsByName: map[string]*Database{},
}
}
func (c *Catalog) ExistDatabase(db string) bool {
_, exists := c.dbsByName[db]
return exists
}
func (c *Catalog) newDatabase(name string) (*Database, error) {
exists := c.ExistDatabase(name)
if exists {
return nil, ErrDatabaseAlreadyExists
}
id := len(c.dbsByID) + 1
db := &Database{
id: uint64(id),
name: name,
tablesByID: map[uint64]*Table{},
tablesByName: map[string]*Table{},
}
c.dbsByID[db.id] = db
c.dbsByName[db.name] = db
return db, nil
}
func (c *Catalog) Databases() []*Database {
dbs := make([]*Database, len(c.dbsByID))
i := 0
for _, db := range c.dbsByID {
dbs[i] = db
i++
}
return dbs
}
func (c *Catalog) GetDatabaseByName(name string) (*Database, error) {
db, exists := c.dbsByName[name]
if !exists {
return nil, ErrDatabaseDoesNotExist
}
return db, nil
}
func (c *Catalog) GetDatabaseByID(id uint64) (*Database, error) {
db, exists := c.dbsByID[id]
if !exists {
return nil, ErrDatabaseDoesNotExist
}
return db, nil
}
func (db *Database) ID() uint64 {
return db.id
}
func (db *Database) Name() string {
return db.name
}
func (db *Database) ExistTable(table string) bool {
_, exists := db.tablesByName[table]
return exists
}
func (c *Catalog) GetTableByName(dbName, tableName string) (*Table, error) {
db, err := c.GetDatabaseByName(dbName)
if err != nil {
return nil, err
}
return db.GetTableByName(tableName)
}
func (db *Database) GetTables() []*Table {
ts := make([]*Table, len(db.tablesByName))
i := 0
for _, t := range db.tablesByID {
ts[i] = t
i++
}
return ts
}
func (db *Database) GetTableByName(name string) (*Table, error) {
table, exists := db.tablesByName[name]
if !exists {
return nil, ErrTableDoesNotExist
}
return table, nil
}
func (db *Database) GetTableByID(id uint64) (*Table, error) {
table, exists := db.tablesByID[id]
if !exists {
return nil, ErrTableDoesNotExist
}
return table, nil
}
func (t *Table) ID() uint64 {
return t.id
}
func (t *Table) Database() *Database {
return t.db
}
func (t *Table) ColsByID() map[uint64]*Column {
return t.colsByID
}
func (t *Table) ColsByName() map[string]*Column {
return t.colsByName
}
func (t *Table) Name() string {
return t.name
}
func (t *Table) PrimaryKey() *Column {
return t.pk
}
func (t *Table) IsIndexed(colName string) (bool, error) {
c, exists := t.colsByName[colName]
if !exists {
return false, ErrColumnDoesNotExist
}
_, indexed := t.indexes[c.id]
return indexed, nil
}
func (t *Table) GetColumnByName(name string) (*Column, error) {
col, exists := t.colsByName[name]
if !exists {
return nil, ErrColumnDoesNotExist
}
return col, nil
}
func (t *Table) GetColumnByID(id uint64) (*Column, error) {
col, exists := t.colsByID[id]
if !exists {
return nil, ErrColumnDoesNotExist
}
return col, nil
}
func (db *Database) newTable(name string, colsSpec []*ColSpec, pk string) (*Table, error) {
if len(name) == 0 || len(colsSpec) == 0 || len(pk) == 0 {
return nil, ErrIllegalArguments
}
exists := db.ExistTable(name)
if exists {
return nil, ErrTableAlreadyExists
}
id := len(db.tablesByID) + 1
table := &Table{
id: uint64(id),
db: db,
name: name,
colsByID: make(map[uint64]*Column, 0),
colsByName: make(map[string]*Column, 0),
indexes: make(map[uint64]struct{}, 0),
}
for _, cs := range colsSpec {
_, colExists := table.colsByName[cs.colName]
if colExists {
return nil, ErrDuplicatedColumn
}
id := len(table.colsByID) + 1
col := &Column{
id: uint64(id),
table: table,
colName: cs.colName,
colType: cs.colType,
notNull: cs.notNull || cs.colName == pk,
}
table.colsByID[col.id] = col
table.colsByName[col.colName] = col
if pk == col.colName {
table.pk = col
}
}
if table.pk == nil {
return nil, ErrInvalidPK
}
db.tablesByID[table.id] = table
db.tablesByName[table.name] = table
return table, nil
}
func (c *Column) ID() uint64 {
return c.id
}
func (c *Column) Name() string {
return c.colName
}
func (c *Column) Type() SQLValueType {
return c.colType
}
func (c *Column) IsNullable() bool {
return !c.notNull
}
|
package silverfish
import (
"crypto/sha512"
"encoding/hex"
"math/rand"
"strings"
)
const dictionary string = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
// SHA512Str export
func SHA512Str(src, hashSalt *string) *string {
salted := strings.Join([]string{*src, *hashSalt}, "")
h := sha512.New()
h.Write([]byte(salted))
s := hex.EncodeToString(h.Sum(nil))
return &s
}
// RandomStr export
func RandomStr(length int) *string {
output := ""
index := 0
for i := 0; i < length; i++ {
index = rand.Intn(len(dictionary))
output += dictionary[index : index+1]
}
return &output
}
|
func strStr(haystack string, needle string) int {
if needle == "" {
return 0
}
needleLength := len(needle)
haystackLength := len(haystack)
if haystackLength < needleLength {
return -1
}
searchFlag := 0
for i := 0; i < haystackLength-needleLength+1; i++ {
for j := 0; j < needleLength; j++ {
if i+j > haystackLength-1 {
continue
}
if string(haystack[i+j]) == string(needle[j]) {
searchFlag++
}
}
if searchFlag == needleLength {
return i
} else {
searchFlag = 0
}
}
return -1
}
|
package subscription
import (
"github.com/dennor/go-paddle/events/types"
"github.com/dennor/phpserialize"
)
const UpdatedAlertName = "subscription_updated"
// Updated refer to https://paddle.com/docs/subscriptions-event-reference/#subscription_updated
type Updated struct {
AlertID int `json:"alert_id,string"`
AlertName string `json:"alert_name"`
CancelURL string `json:"cancel_url"`
CheckoutID string `json:"checkout_id"`
Currency string `json:"currency,omitempty"`
CustomData string `json:"custom_data"`
Email string `json:"email"`
EventTime *types.Datetime `json:"event_time,string"`
LinkedSubscriptions string `json:"linked_subscriptions"`
MarketingConsent *types.MarketingConsent `json:"marketing_consent,string"`
NewPrice *types.CurrencyValue `json:"new_price,string"`
NewQuantity int `json:"new_quantity,string"`
NewUnitPrice *types.CurrencyValue `json:"new_unit_price,string"`
NextBillDate *types.Date `json:"next_bill_date,string"`
OldNextBillDate *types.Date `json:"old_next_bill_date,string"`
OldPrice *types.CurrencyValue `json:"old_price,string"`
OldQuantity int `json:"old_quantity,string"`
OldStatus string `json:"old_status"`
OldSubscriptionPlanID int `json:"old_subscription_plan_id,string"`
OldUnitPrice *types.CurrencyValue `json:"old_unit_price,string"`
Passthrough string `json:"passthrough"`
Status string `json:"status"`
SubscriptionID int `json:"subscription_id,string"`
SubscriptionPlanID int `json:"subscription_plan_id,string"`
UpdateURL string `json:"update_url"`
UserID int `json:"user_id,string,omitempty"`
PSignature string `json:"p_signature" php:"-"`
}
func (s *Updated) Serialize() ([]byte, error) {
return phpserialize.Marshal(s)
}
func (s *Updated) Signature() ([]byte, error) {
return []byte(s.PSignature), nil
}
|
package metrics
import (
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"plugins"
"regexp"
"strings"
"time"
)
// TCP Network stats
//
// gobs of this code lifted from: https://github.com/grahamking/latency/
//
// DESCRIPTION
// This plugin attempts to determine network latency.
// interface is up and we cannot ping.
//
// OUTPUT
// Graphite plain-text format (name value timestamp\n)
//
// PLATFORMS
// Linux
const TCP_STATS_NAME = "tcp_metrics"
type TcpStats struct {
flags *flag.FlagSet
networkInterface string
listenInterface string
remoteAddress string
localAddress string
networkPort int
timeout float64
workingTimeout time.Duration
retryCount int
hostNiceName string
}
type receiveErrorType struct {
err string
}
func (re receiveErrorType) Error() string {
return re.err
}
func init() {
plugins.Register("tcp_metrics", new(TcpStats))
}
func (tcp *TcpStats) Init(config plugins.PluginConfig) (string, error) {
tcp.flags = flag.NewFlagSet("tcp-metrics", flag.ContinueOnError)
tcp.flags.StringVar(&tcp.networkInterface, "test-interface", "", "The Network to test before pinging, defaults to the listen interface")
tcp.flags.StringVar(&tcp.listenInterface, "i", "", "The network interface to listen on")
tcp.flags.StringVar(&tcp.remoteAddress, "host", "", "The Network Address to ping")
tcp.flags.IntVar(&tcp.networkPort, "port", 22, "The Port to SYN (Ping)")
tcp.flags.Float64Var(&tcp.timeout, "timeout", 10, "Number of seconds to wait for a response")
tcp.flags.IntVar(&tcp.retryCount, "retry-count", 3, "The number of times to retry before failing")
var err error
if len(config.Args) > 1 {
err = tcp.flags.Parse(config.Args[1:])
if nil != err {
return TCP_STATS_NAME, err
}
}
if "" == tcp.listenInterface {
return TCP_STATS_NAME, fmt.Errorf("You need to specify an Interface! e.g.: -i eth0")
}
if "" == tcp.networkInterface {
tcp.networkInterface = tcp.listenInterface
}
if "" == tcp.remoteAddress {
return TCP_STATS_NAME, fmt.Errorf("You need to specify a host to ping! e.g.: -host 10.0.0.1")
}
tcp.workingTimeout, err = time.ParseDuration(fmt.Sprintf("%0.0fms", tcp.timeout*1000))
if err != nil {
log.Println(err)
}
r := regexp.MustCompile("[^0-9a-zA-Z]")
tcp.hostNiceName = r.ReplaceAllString(tcp.remoteAddress, "_")
if "" != os.Getenv("DEBUG") {
log.Println("Remote Host: ", tcp.remoteAddress)
log.Println("Listen Interface:", tcp.listenInterface)
log.Println("Test interface: ", tcp.networkInterface)
log.Println("Port: ", tcp.networkPort)
log.Println("Retry count: ", tcp.retryCount)
log.Printf("Ping Timeout: %s", tcp.workingTimeout.String())
}
return TCP_STATS_NAME, err
}
func (tcp *TcpStats) Gather(r *plugins.Result) error {
// measure TCP/IP response
stat, err := os.Stat("/sys/class/net/" + tcp.networkInterface)
if nil != err {
return fmt.Errorf("Interface %s does not exist.", tcp.networkInterface)
}
if !stat.IsDir() {
return fmt.Errorf("Interface %s does not exist.", tcp.networkInterface)
}
// is the network interface up?
state, err := ioutil.ReadFile("/sys/class/net/" + tcp.networkInterface + "/operstate")
if nil != err {
return fmt.Errorf("Unable to determine if interface is up.")
}
// cannot ping when the network is down
if "up" != string(state[0:2]) {
return fmt.Errorf("Network Interface %s is down", tcp.networkInterface)
}
iface, err := interfaceAddress(tcp.listenInterface)
if err != nil {
log.Print(err)
// we do not return the error, because that will cause the check to be stopped.
// we return nil and no stats instead while we wait for the interface to get an
// ip address again. (e.g. happens when network manager disables interface)
return nil
} else {
tcp.localAddress = strings.Split(iface.String(), "/")[0]
}
// does the remoteAddress look like an IP address?
remoteIp, err := getRemoteAddress(tcp.remoteAddress)
if err != nil {
return err
}
var counter int
var totalLatency time.Duration
if "" != tcp.localAddress {
TryLoop:
for counter < tcp.retryCount {
counter++
latency, errPing := tcp.ping(tcp.localAddress, remoteIp, uint16(tcp.networkPort))
if errPing == nil {
totalLatency += latency
r.Add(fmt.Sprintf("tcp.latency.%s.ms %0.2f", tcp.hostNiceName, float32(totalLatency)/float32(time.Millisecond)))
r.Add(fmt.Sprintf("tcp.try-count.%s %d", tcp.hostNiceName, counter))
break
}
switch errPing.(type) {
case receiveErrorType:
//log.Println(errPing)
break TryLoop
case error:
totalLatency += tcp.workingTimeout
log.Printf("Failed TCP Ping check %d...", counter)
}
}
}
return nil
}
func (tcp *TcpStats) GetStatus() string {
return ""
}
func (tcp *TcpStats) ShowUsage() {
tcp.flags.PrintDefaults()
}
func (tcp *TcpStats) ping(localAddr, remoteAddr string, port uint16) (time.Duration, error) {
receiveDuration := make(chan time.Duration)
receiveError := make(chan error)
timeoutChannel := make(chan bool)
// limit ourselves to 10 seconds
time.AfterFunc(tcp.workingTimeout, func() { timeoutChannel <- true })
go func() {
t, err := latency(localAddr, remoteAddr, port)
if err != nil {
receiveError <- err
} else {
receiveDuration <- t
}
}()
select {
case d := <-receiveDuration:
return d, nil
case e := <-receiveError:
var re receiveErrorType
re.err = e.Error()
return 0, re
case <-timeoutChannel:
return time.Duration(0), fmt.Errorf("Failed to TCP ping remote host")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.