text stringlengths 11 4.05M |
|---|
package services
import "github.com/cloudfoundry-incubator/notifications/models"
type PreferenceUpdaterInterface interface {
Execute(models.ConnectionInterface, []models.Preference, string) error
}
type PreferenceUpdater struct {
repo models.UnsubscribesRepoInterface
}
func NewPreferenceUpdater(repo models.UnsubscribesRepoInterface) PreferenceUpdater {
return PreferenceUpdater{
repo: repo,
}
}
func (updater PreferenceUpdater) Execute(conn models.ConnectionInterface, preferences []models.Preference, userID string) error {
for _, preference := range preferences {
if !preference.Email {
_, err := updater.repo.Upsert(conn, models.Unsubscribe{
ClientID: preference.ClientID,
KindID: preference.KindID,
UserID: userID,
})
if err != nil {
return err
}
} else {
_, err := updater.repo.Destroy(conn, models.Unsubscribe{
ClientID: preference.ClientID,
KindID: preference.KindID,
UserID: userID,
})
if err != nil {
return err
}
}
}
return nil
}
|
package sqrtandcube
import (
"fmt"
"math"
)
//digitPower returns -1 if error
//digit := number % 10
//for power>0 {
// result = digit*result
// power--
//}
//
func digitPowerSum(number int, power int) int {
resultSum := -1
if number > 0 && power > 0 {
resultSum = 0
for number > 0 {
digit := number % 10
resultSum += int(math.Pow(float64(digit), float64(power)))
number /= 10
}
}
return resultSum
}
func sqrt(number int, power int, op chan int) {
op <- digitPowerSum(number, power)
}
func cube(number int, power int, op chan int) {
op <- digitPowerSum(number, power)
}
//SqrtAndCube returns smth
func SqrtAndCube() {
sqrtch := make(chan int)
cubech := make(chan int)
number := 589
go sqrt(number, 2, sqrtch)
go cube(number, 3, cubech)
squares, cubes := <-sqrtch, <-cubech
fmt.Println("sqrt is ", squares)
fmt.Println("cubes are ", cubes)
fmt.Println("Final output sqrt+cube of digits ", number, " is ", (squares + cubes))
}
func digit(number int, digch chan int) {
fmt.Println("Entered to sqrtandcube->digit()")
for number != 0 {
digit := number % 10
fmt.Println("digit ", digit)
digch <- digit
number /= 10
}
close(digch)
}
func calcPower(number int, power int, chnl chan int) {
fmt.Println("Entered to calcPower()")
sum := 0
digch := make(chan int)
go digit(number, digch)
for digit := range digch {
sum += int(math.Pow(float64(digit), float64(power)))
fmt.Println("power sum", sum)
}
chnl <- sum
}
//RunCalcPower runs calcPower and print sum
func RunCalcPower() {
number := 589
sqrtop := make(chan int)
cubeop := make(chan int)
go calcPower(number, 3, cubeop)
go calcPower(number, 2, sqrtop)
cubes := <-cubeop
squares := <-sqrtop
fmt.Println("Final output ", (squares + cubes))
}
//RunDigit runs digit
func RunDigit() {
chnl := make(chan int)
go digit(123456789, chnl)
for v := range chnl {
fmt.Println("digit", v)
}
}
|
package raft
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
zmq "github.com/pebbe/zmq4"
"io/ioutil"
"log"
"math/rand"
"os"
"sort"
"strconv"
"time"
)
//type appendEntriesResponse struct{}
//Server interface declares functions that will be used to provide APIs to comminicate with server.
type Server interface {
ServId() int
PeersIds() []int
GetTerm() uint64 //Returns current term for the shared log for this raft server.
GetLeader() int //Returns leader id for the current term.
GetVotedFor() int //Returns ID of candidate for which raft server has voted for.
GetCommitIndex() uint64
GetLastApplied() uint64
Start()
Outbox() chan *Envelope
Inbox() chan *Envelope
No_Of_Peers() int
ClientServerComm(clientPortAddr string)
RetLsn() Lsn
GetState() int
resetElectTimeout() // resets election timeout
loop()
follower() //Go routine which is run when server enters into follower state at boot up and also later in its life
candidate()
leader()
GetPrevLogIndex() uint64
GetPrevLogTerm() uint64
SetTerm(uint64)
SetVotedFor(int)
requestForVoteToPeers()
handleRequestVote(env *Envelope) bool
sendHeartBeats(ni *nextIndex, timeout time.Duration) (int, bool)
handleAppendEntries(env *Envelope) (*appendEntriesResponse, bool)
ApplyCommandToSM()
}
func (ServerVar *Raft) Start() {
ServerVar.loop()
}
func (ServerVar *Raft) SetTerm(term uint64) {
ServerVar.Term = term
}
func (ServerVar *Raft) GetPrevLogIndex() uint64 {
return (*ServerVar).PrevLogIndex
}
func (ServerVar *Raft) GetPrevLogTerm() uint64 {
return (*ServerVar).PrevLogTerm
}
func (ServerVar *Raft) GetLastApplied() uint64 {
return (*ServerVar).LastApplied
}
func (ServerVar *Raft) GetState() int {
return (*ServerVar).State
}
func (ServerVar *Raft) GetCommitIndex() uint64 {
return (*ServerVar).CommitIndex
}
func (ServerVar *Raft) GetTerm() uint64 {
return (*ServerVar).Term
}
func (ServerVar *Raft) GetLeader() int {
return (*ServerVar).LeaderId
}
func (ServerVar *Raft) GetVotedFor() int {
return (*ServerVar).VotedFor
}
func (ServerVar *Raft) SetVotedFor(vote int) {
(*ServerVar).VotedFor = vote
}
func (ServerVar *Raft) RetLsn() Lsn {
return Lsn((*ServerVar).LsnVar)
}
func (ServerVar *Raft) No_Of_Peers() int {
return (*ServerVar).ClusterSize
}
func (ServerVar *Raft) Outbox() chan *Envelope {
return (*ServerVar).Out
}
func (ServerVar *Raft) Inbox() chan *Envelope {
return (*ServerVar).In
}
func (ServerVar *Raft) ServId() int {
return (*ServerVar).Pid
}
func (ServerVar *Raft) PeersIds() []int {
return (*ServerVar).Peers
}
func (ServerVar *Raft) loop() {
for {
state := ServerVar.GetState() // begin life as a follower
switch state {
case FOLLOWER:
ServerVar.follower()
case CANDIDATE:
ServerVar.candidate()
case LEADER:
ServerVar.leader()
default:
return
}
}
}
func (ServerVar *Raft) catchUpLog(ni *nextIndex, id int, timeout time.Duration) error {
currentTerm := ServerVar.Term
prevLogIndex := ni.prevLogIndex(uint64(id)) //.........
resultentries, prevLogTerm := ServerVar.SLog.entriesAfter(prevLogIndex, 10)
//commitIndex := ServerVar.SLog.getCommitIndex()
var envVar Envelope
envVar.Pid = id
envVar.MessageId = APPENDENTRIES
envVar.SenderId = ServerVar.ServId()
envVar.Leaderid = ServerVar.LeaderId
envVar.LastLogIndex = prevLogIndex
envVar.LastLogTerm = prevLogTerm
envVar.CommitIndex = ServerVar.SLog.commitIndex
// fmt.Println("APPENDENTRIES sending lsn = ",envVar.LastLsn,"for server = ",id)
envVar.Message = &appendEntries{TermIndex: currentTerm, Entries: resultentries}
if debug {
fmt.Println("Server ", ServerVar.ServId(), "->", id, " Prev log= ", prevLogIndex, "Prev Term = ", prevLogTerm)
}
ServerVar.Outbox() <- &envVar
select {
//case <-replication:
case env := <-(ServerVar.Inbox()):
switch env.MessageId {
case APPENDENTRIESRESPONSE:
id = env.SenderId
prevLogIndex = ni.prevLogIndex(uint64(id)) //.........
resultentries, prevLogTerm = ServerVar.SLog.entriesAfter(prevLogIndex, 10)
// fmt.Println("APPENDENTRIESRESPONSE received lsn = ",LastLsn,"for server = ",env.SenderId)
if debug {
fmt.Println("Received APPENDENTRIESRESPONSE at RIGHT place")
}
resp := env.Message.(appendEntriesResponse)
if resp.Term > currentTerm {
// fmt.Println("Term not matched hence returing from APPENDENTRIES RESPONSE")
return errorDeposed
}
if !resp.Success {
newPrevLogIndex, err := ni.decrement(uint64(id), prevLogIndex)
if debug {
fmt.Println("No SUCC: new index for ", id, "is", newPrevLogIndex, "where prev log index was ", prevLogIndex)
}
if err != nil {
return err
}
if debug {
fmt.Println("flush to %v: rejected")
}
return errorappendEntriesRejected
}
if len(resultentries) > 0 {
newPrevLogIndex, err := ni.set(uint64(id), uint64(resultentries[len(resultentries)-1].Lsn()), prevLogIndex)
if debug {
fmt.Println("SET : new prev index for ", id, "is", newPrevLogIndex,"Term = ",resp.Term)
}
if err != nil {
return err
}
return nil
} /*else {
fmt.Println("NOT SET : new prev index for id =", id,"sender = ",env.SenderId,"Term = ",resp.Term)
}*/
return nil
}
case <-time.After(2 * timeout):
return errorTimeout
}
return nil
}
func (ServerVar *Raft) handleAppendEntries(env *Envelope) (*appendEntriesResponse, bool) {
if env == nil {
fmt.Println("Eureka..............")
}
resp := env.Message.(appendEntries)
if debug {
fmt.Println("handleAppendEntries() : Fo server ", ServerVar.ServId(), " Term : ", ServerVar.Term, " env Term = ", resp.TermIndex, "Commit index ", env.CommitIndex, " ServerVar.SLog.commitIndex ", ServerVar.SLog.commitIndex)
}
if resp.TermIndex < ServerVar.Term {
// fmt.Println("Giving false here.....1")
return &appendEntriesResponse{
Term: ServerVar.Term,
Success: false,
reason: fmt.Sprintf("Term is less"),
}, false
}
//success := true
downGrade := false
if resp.TermIndex > ServerVar.Term {
ServerVar.Term = resp.TermIndex
ServerVar.VotedFor = NOVOTE
downGrade = true
}
if ServerVar.State == CANDIDATE && env.SenderId != ServerVar.LeaderId && resp.TermIndex >= ServerVar.Term {
ServerVar.Term = resp.TermIndex
ServerVar.VotedFor = NOVOTE
downGrade = true
}
ServerVar.resetElectTimeout()
// fmt.Println("Server ",ServerVar.ServId(),"Discard ",env.LastLogIndex,env.LastLogTerm," - log ",ServerVar.SLog.lastIndex(),ServerVar.SLog.lastTerm())
if err := ServerVar.SLog.discardEntries(env.LastLogIndex, env.LastLogTerm); err != nil {
// fmt.Println("Giving false here.....2", err)
return &appendEntriesResponse{
Term: ServerVar.Term,
Success: false,
reason: fmt.Sprintf("while ensuring last log entry had index=%d term=%d: error: %s", env.LastLogIndex, env.LastLogTerm, err)}, downGrade
}
//resp := env.Message.(appendEntries)
for i, entry := range resp.Entries {
// fmt.Println("Appending entry for server ",ServerVar.ServId())
if err := ServerVar.SLog.appendEntry(entry); err != nil {
// fmt.Println("Giving false here.....3")
return &appendEntriesResponse{
Term: ServerVar.Term,
Success: false,
reason: fmt.Sprintf(
"AppendEntry %d/%d failed: %s",
i+1,
len(resp.Entries),
err,
),
}, downGrade
}
if debug {
fmt.Println("handle() Server ", ServerVar.ServId(), " appendEntry")
}
}
if env.CommitIndex > 0 && env.CommitIndex > ServerVar.SLog.commitIndex {
if err := ServerVar.SLog.commitTill(env.CommitIndex); err != nil {
// fmt.Println("Giving false here.....4")
return &appendEntriesResponse{
Term: ServerVar.Term,
Success: false,
reason: fmt.Sprintf("CommitTo failed"),
}, downGrade
}
}
return &appendEntriesResponse{
Term: ServerVar.Term,
Success: true,
}, downGrade
}
//Below function is used to set new index values to all the peers
func (ServerVar *Raft) newNextIndex(defaultNextIndex uint64) *nextIndex {
ni := &nextIndex{
m: map[uint64]uint64{},
}
for _, id := range ServerVar.PeersIds() {
ni.m[uint64(id)] = defaultNextIndex
//fmt.Println("INITIALIZEDn ", id, " to ", ni.m[uint64(id)])
}
return ni
}
//Returns the previous log index of a server whose id passed as argument
func (ni *nextIndex) prevLogIndex(id uint64) uint64 {
ni.RLock()
defer ni.RUnlock()
if _, ok := ni.m[id]; !ok {
panic(fmt.Sprintf("peer %d not found", id))
}
return ni.m[id]
}
//Decrements lastlog index by 1
func (ni *nextIndex) decrement(id uint64, prev uint64) (uint64, error) {
ni.Lock()
defer ni.Unlock()
i, ok := ni.m[id]
if !ok {
panic(fmt.Sprintf("peer %d not found", id))
}
if i != prev {
return i, errorOutOfSync
}
if i > 0 {
ni.m[id]--
if debug {
fmt.Println("decremented val ", ni.m[id])
}
}
return ni.m[id], nil
}
//Sets prev log index to index ,for a server if previous log index matches with argument
func (ni *nextIndex) set(id, index, prev uint64) (uint64, error) {
ni.Lock()
defer ni.Unlock()
i, ok := ni.m[id]
if !ok {
panic(fmt.Sprintf("server %d not found", id))
}
if i != prev {
return i, errorOutOfSync
}
ni.m[id] = index
if debug {
fmt.Println("set val ", ni.m[id])
}
return index, nil
}
//This function is used to send concurrent heartbeat messages to all the peers, and then collects and returns number of positive responses
func (ServerVar *Raft) sendHeartBeats(ni *nextIndex, timeout time.Duration) (int, bool) {
type tuple struct {
id uint64
err error
}
responses := make(chan tuple, len(ServerVar.PeersIds()))
for _, id1 := range ServerVar.PeersIds() {
go func(id int) {
errChan := make(chan error, 1)
go func() { errChan <- ServerVar.catchUpLog(ni, id, timeout) }()
responses <- tuple{uint64(id), <-errChan}
}(id1)
}
successes, downGrade := 0, false
for i := 0; i < cap(responses); i++ {
switch t := <-responses; t.err {
case nil:
successes++
case errorDeposed:
downGrade = true
default:
}
}
return successes, downGrade
}
func (ServerVar *Raft) leader() {
//replicate := make(chan struct{})
replicate := make(chan struct{})
hbeat := time.NewTicker(heartBeatInterval())
defer hbeat.Stop()
go func() {
for _ = range hbeat.C {
replicate <- struct{}{}
}
}()
nIndex := ServerVar.newNextIndex(uint64(ServerVar.SLog.lastIndex()))
//go ServerVar.sendHeartBeats(nIndex)
for {
select {
case t := <-ServerVar.Outchan:
cmd := t.(*CommandTuple)
// Append the command to our (leader) log
if debug { fmt.Println("got command, appending", ServerVar.Term)
}
currentTerm := ServerVar.Term
comma := new(bytes.Buffer)
encCommand := gob.NewEncoder(comma)
encCommand.Encode(cmd)
entry := &LogEntryStruct{
Logsn: Lsn(ServerVar.SLog.lastIndex() + 1),
TermIndex: currentTerm,
DataArray: cmd.Com, //comma.Bytes(),
Commit: cmd.ComResponse,
}
if err := ServerVar.SLog.appendEntry(entry); err != nil {
panic(err)
continue
}
if debug {
fmt.Printf(" Leader after append, commitIndex=%d lastIndex=%d lastTerm=%d", ServerVar.SLog.getCommitIndex(), ServerVar.SLog.lastIndex(), ServerVar.SLog.lastTerm())
}
go func() {
// fmt.Println("sending replicate")
replicate <- struct{}{}
}()
case <-replicate:
// fmt.Println("HBT")
successes, downGrade := ServerVar.sendHeartBeats(nIndex, 2*heartBeatInterval())
if downGrade {
//As leader downgrade will result in having unknown leader and going into follower state
ServerVar.LeaderId = UNKNOWN
ServerVar.State = FOLLOWER
return
}
// fmt.Println("Successes ---> ", successes)
if successes >= Quorum-1 {
var indices []uint64
indices = append(indices, ServerVar.SLog.currentIndex())
for _, i := range nIndex.m {
indices = append(indices, i)
}
sort.Sort(uint64Slice(indices))
commitIndex := indices[Quorum-1]
committedIndex := ServerVar.SLog.commitIndex
peersBestIndex := commitIndex
ourLastIndex := ServerVar.SLog.lastIndex()
ourCommitIndex := ServerVar.SLog.getCommitIndex()
if peersBestIndex > ourLastIndex {
ServerVar.LeaderId = UNKNOWN
ServerVar.VotedFor = NOVOTE
ServerVar.State = FOLLOWER
return
}
if commitIndex > committedIndex {
// leader needs to do a sync before committing log entries
if err := ServerVar.SLog.commitTill(peersBestIndex); err != nil {
continue
}
if ServerVar.SLog.getCommitIndex() > ourCommitIndex {
go func() { replicate <- struct{}{} }()
}
}
}
case env := <-(ServerVar.Inbox()):
switch env.MessageId {
case REQUESTVOTE:
if debug {
fmt.Println("Received request vote for candidate....")
}
downgrade := ServerVar.handleRequestVote(env)
if downgrade {
//As leader downgrade will result in having unknown leader and going into follower state
ServerVar.LeaderId = UNKNOWN
ServerVar.State = FOLLOWER
return
}
case VOTERESPONSE:
case APPENDENTRIES:
if debug {
fmt.Println("Received APPENDENTRIES for ", ServerVar.ServId())
}
resp, down := ServerVar.handleAppendEntries(env)
var envVar Envelope
envVar.Pid = env.Leaderid
envVar.MessageId = APPENDENTRIESRESPONSE
envVar.SenderId = ServerVar.ServId()
envVar.Leaderid = ServerVar.LeaderId
envVar.LastLogIndex = env.LastLogIndex
envVar.LastLogTerm = env.LastLogTerm
envVar.CommitIndex = env.CommitIndex
//envVar.LastLsn = env.LastLsn
envVar.Message = resp
//fmt.Println("Before sending ",ServerVar.ServId())
ServerVar.Outbox() <- &envVar
//fmt.Println("After sending ",ServerVar.ServId())
//TODO: handle and count sucesses
if down {
ServerVar.LeaderId = env.Leaderid
ServerVar.State = FOLLOWER
return
}
}
}
}
}
func heartBeatInterval() time.Duration {
tm := MinElectTo / 6
return time.Duration(tm) * time.Millisecond
}
func (ServerVar *Raft) candidate() {
ServerVar.requestForVoteToPeers()
if debug {
fmt.Println("CANDIDATE ID = ", ServerVar.ServId())
}
for {
select {
case <-ServerVar.ElectTicker:
ServerVar.resetElectTimeout()
ServerVar.Term++
ServerVar.VotedFor = NOVOTE
if debug {
fmt.Println("TIMEOUT for CANDIDATE ID = ", ServerVar.ServId(), "New Term = ", ServerVar.Term)
}
return
case env := <-(ServerVar.Inbox()):
//les := env.Message.(LogEntryStruct)
if debug {
fmt.Println("CANDIDATE : Received Message is %v for %d ", env.MessageId, ServerVar.ServId())
}
switch env.MessageId {
case REQUESTVOTE:
if debug {
fmt.Println("Received request vote for candidate....")
}
downgrade := ServerVar.handleRequestVote(env)
if downgrade {
//As a candidate downgrade will result in having unknown leader and going into follower state
ServerVar.LeaderId = UNKNOWN
ServerVar.State = FOLLOWER
return
}
case VOTERESPONSE:
les := env.Message.(LogEntryStruct)
if les.TermIndex > ServerVar.Term {
ServerVar.LeaderId = UNKNOWN
ServerVar.State = FOLLOWER
ServerVar.VotedFor = NOVOTE
if debug {
fmt.Println("Message term is greater for candidate = ", ServerVar.ServId(), " becoming follower")
}
return
}
if les.TermIndex < ServerVar.Term {
break
}
voteLock.Lock()
voteMap[env.SenderId] = true
voteLock.Unlock()
vcount := 1
for i := range ServerVar.PeersIds() {
if voteMap[ServerVar.Peers[i]] == true {
vcount++
}
}
if debug {
fmt.Println(" Candidate Server id = ", ServerVar.ServId(), " vcount = ", vcount, " Quorum = ", Quorum)
}
if vcount >= (Quorum) {
ServerVar.LeaderId = ServerVar.ServId()
ServerVar.State = LEADER
ServerVar.VotedFor = NOVOTE
if debug {
fmt.Println(" New Leader Server id = ", ServerVar.ServId())
}
return
}
case APPENDENTRIES:
if debug {
fmt.Println("Received APPENDENTRIES for ", ServerVar.ServId())
}
resp, down := ServerVar.handleAppendEntries(env)
var envVar Envelope
envVar.Pid = env.Leaderid
envVar.MessageId = APPENDENTRIESRESPONSE
envVar.SenderId = ServerVar.ServId()
envVar.Leaderid = ServerVar.LeaderId
envVar.LastLogIndex = env.LastLogIndex
envVar.LastLogTerm = env.LastLogTerm
envVar.CommitIndex = env.CommitIndex
//envVar.LastLsn = env.LastLsn
envVar.Message = resp
ServerVar.Outbox() <- &envVar
if down {
ServerVar.LeaderId = env.Leaderid
ServerVar.State = FOLLOWER
return
}
}
}
}
}
func (ServerVar *Raft) requestForVoteToPeers() {
var lesn LogEntryStruct
//(*ServerVar).LsnVar = (*ServerVar).LsnVar + 1
lesn.Logsn = Lsn((*ServerVar).LsnVar)
lesn.DataArray = nil
lesn.TermIndex = ServerVar.GetTerm()
lesn.Commit = nil
var envVar Envelope
envVar.Pid = BROADCAST
envVar.MessageId = REQUESTVOTE
envVar.SenderId = ServerVar.ServId()
envVar.LastLogIndex = ServerVar.GetPrevLogIndex()
envVar.LastLogTerm = ServerVar.GetPrevLogTerm()
envVar.Message = lesn
//TODO: Whats below line??
//MsgAckMap[les.Lsn()] = 1
ServerVar.VotedFor = ServerVar.ServId()
ServerVar.Outbox() <- &envVar
}
func (ServerVar *Raft) follower() {
if debug {
fmt.Println(" Follower ID = ", ServerVar.ServId())
}
ServerVar.resetElectTimeout()
for {
select {
case <-ServerVar.ElectTicker:
ServerVar.Term++
ServerVar.VotedFor = NOVOTE
ServerVar.LeaderId = UNKNOWN
ServerVar.resetElectTimeout()
ServerVar.State = CANDIDATE
if debug {
fmt.Println("TIMEOUT for Follower ID = ", ServerVar.ServId(), " Now Candidate")
}
return
case env := <-(ServerVar.Inbox()):
//les := LogEntryStruct(env.Message)
switch env.MessageId {
case REQUESTVOTE:
downgrade := ServerVar.handleRequestVote(env)
if downgrade {
//As a follower downgrade will result in having unknown leader
ServerVar.LeaderId = UNKNOWN
}
case VOTERESPONSE:
//TODO:
case APPENDENTRIES:
//fmt.Println("Received APPENDENTRIES for ", ServerVar.ServId())
if ServerVar.LeaderId == UNKNOWN {
ServerVar.LeaderId = env.SenderId
}
resp, down := ServerVar.handleAppendEntries(env)
//fmt.Println("FOLLOWER: B4 Sending returned from handle ",ServerVar.ServId())
var envVar Envelope
envVar.Pid = env.Leaderid
envVar.MessageId = APPENDENTRIESRESPONSE
envVar.SenderId = ServerVar.ServId()
envVar.Leaderid = ServerVar.LeaderId
envVar.LastLogIndex = env.LastLogIndex
envVar.LastLogTerm = env.LastLogTerm
envVar.CommitIndex = env.CommitIndex
//envVar.LastLsn = env.LastLsn
envVar.Message = resp
ServerVar.Outbox() <- &envVar
//fmt.Println("FOLLOWER: After Sending ",ServerVar.ServId())
//TODO: handle and count sucesses
if down {
ServerVar.LeaderId = env.Leaderid
ServerVar.State = FOLLOWER
return
}
}
}
}
}
func (serverVar *Raft) handleRequestVote(req *Envelope) bool {
resp := req.Message.(LogEntryStruct)
if resp.TermIndex < serverVar.Term {
return false
}
downgrade := false
if resp.TermIndex > serverVar.Term {
if debug {
fmt.Println("RequestVote from newer term (%d): my term %d", resp.TermIndex, serverVar.Term)
}
serverVar.Term = resp.TermIndex
serverVar.VotedFor = NOVOTE
serverVar.LeaderId = UNKNOWN
downgrade = true
}
if serverVar.GetState() == LEADER && !downgrade {
return false
}
if serverVar.VotedFor != NOVOTE && serverVar.VotedFor != req.SenderId {
return downgrade
}
if serverVar.PrevLogIndex > req.LastLogIndex || serverVar.PrevLogTerm > req.LastLogTerm {
return downgrade
}
var lesn LogEntryStruct
//(*serverVar).LsnVar = (*serverVar).LsnVar + 1
lesn.Logsn = Lsn((*serverVar).LsnVar)
lesn.DataArray = nil
lesn.TermIndex = serverVar.GetTerm()
lesn.Commit = nil
var envVar Envelope
envVar.Pid = req.SenderId
envVar.MessageId = VOTERESPONSE
envVar.SenderId = serverVar.ServId()
envVar.LastLogIndex = serverVar.GetPrevLogIndex()
envVar.LastLogTerm = serverVar.GetPrevLogTerm()
envVar.Message = lesn
//TODO: Whats below line??
//MsgAckMap[les.Lsn()] = 1
if debug {
fmt.Println("Sending vote for Candidate=", req.SenderId, " Term = ", lesn.TermIndex, " follower = ", envVar.SenderId)
}
serverVar.VotedFor = req.SenderId
serverVar.resetElectTimeout()
serverVar.Outbox() <- &envVar
return downgrade
}
//FireAServer() starts a server with forking methods to listen at a port for intra cluster comminication and for client-server communication
func FireAServer(myid int) Server {
fileName := "clusterConfig.json"
var obj ClusterConfig
file, e := ioutil.ReadFile(fileName)
if e != nil {
panic("File error: " + e.Error())
}
json.Unmarshal(file, &obj)
logfile := os.Getenv("GOPATH") + "/log/log_" + strconv.Itoa(myid)
tLog := createNewLog(logfile)
serverVar := &Raft{
Pid: UNKNOWN,
Peers: make([]int, len(obj.Servers)-1),
Path: "",
Term: INITIALIZATION,
VotedFor: NOVOTE,
VotedTerm: UNKNOWN,
LeaderId: UNKNOWN,
CommitIndex: 0,
ElectTicker: nil,
PrevLogIndex: 0,
PrevLogTerm: 0,
MatchIndex: make(map[int]uint64),
NextIndex: make(map[int]uint64),
LastApplied: 0,
State: FOLLOWER, //Initialized server as FOLLOWER
In: make(chan *Envelope),
Out: make(chan *Envelope),
Address: map[int]string{},
ClientSockets: make(map[int]*zmq.Socket),
LsnVar: 0,
LogSockets: make(map[int]*zmq.Socket),
Inchan: make(chan *LogEntryStruct),
Outchan: make(chan interface{}),
ClusterSize: len(obj.Servers) - 1,
GotConsensus: make(chan bool),
SLog: tLog,
Inprocess: false,
}
count := 0
LeaderID = UNKNOWN
Quorum = UNKNOWN
//fmt.Println("======= Initating Server : ", myid, "==========")
var clientPortAddr string
for i := range obj.Servers {
if obj.Servers[i].Id == strconv.Itoa(myid) {
serverVar.Pid, _ = strconv.Atoi(obj.Servers[i].Id)
clientPortAddr = obj.Servers[i].HostName + ":" + obj.Servers[i].ClientPort
} else {
serverVar.Peers[count], _ = strconv.Atoi(obj.Servers[i].Id)
//fmt.Println("Peers -> ", serverVar.Peers[count])
count++
}
//Assigning first ID as leader
if LeaderID == -1 {
LeaderID, _ = strconv.Atoi(obj.Servers[i].Id)
}
//fmt.Println("Server PID = ", serverVar.Pid)
serverVar.Path = obj.Path.Path
servid, _ := strconv.Atoi(obj.Servers[i].Id)
serverVar.Address[servid] = obj.Servers[i].HostName + ":" + obj.Servers[i].LogPort
//fmt.Println("Server Address is ",serverVar.Address[servid],"servid = ",servid)
}
gob.Register(LogEntryStruct{})
gob.Register(appendEntries{})
gob.Register(appendEntriesResponse{})
gob.Register(Command{})
no_servers, _ := strconv.Atoi(obj.Count.Count)
Quorum = int((no_servers-1)/2 + 1.0)
for i := range serverVar.PeersIds() {
serverVar.LogSockets[serverVar.Peers[i]], _ = zmq.NewSocket(zmq.PUSH)
serverVar.LogSockets[serverVar.Peers[i]].SetSndtimeo(time.Millisecond * 30)
err := serverVar.LogSockets[serverVar.Peers[i]].Connect("tcp://" + serverVar.Address[serverVar.Peers[i]])
//fmt.Println("Log Port : ", serverVar.LogSockets[serverVar.Peers[i]])
if err != nil {
panic("Connect error " + err.Error())
}
//initialize matchIndex
serverVar.MatchIndex[serverVar.Peers[i]] = 0
serverVar.NextIndex[serverVar.Peers[i]] = serverVar.GetLastApplied() + 1
voteMap[serverVar.Peers[i]] = false
//fmt.Println("serverVar.NextIndex[serverVar.Peers[i]] = ", serverVar.NextIndex[serverVar.Peers[i]])
}
// Fork methods for communication within cluster
//fmt.Println("Leader = ", LeaderID)
serverVar.SLog.ApplyFunc = func(e *LogEntryStruct) {
//fmt.Println("ApplyFunc() --> ", e.Logsn)
serverVar.Inchan <- e
}
go SendMail(serverVar)
go GetMail(serverVar)
//Open port for client-server communication
go serverVar.ClientServerComm(clientPortAddr)
return serverVar
}
func SendMail(serverVar *Raft) {
var network bytes.Buffer
for {
envelope := <-(serverVar.Outbox())
if envelope.Pid == BROADCAST {
envelope.Pid = serverVar.ServId()
for i := range serverVar.PeersIds() {
network.Reset()
enc := gob.NewEncoder(&network)
err := enc.Encode(envelope)
if err != nil {
panic("gob error: " + err.Error())
}
serverVar.LogSockets[serverVar.Peers[i]].Send(network.String(), 0)
}
} else {
network.Reset()
a := envelope.Pid
envelope.Pid = serverVar.ServId()
enc := gob.NewEncoder(&network)
err := enc.Encode(envelope)
if err != nil {
panic("gob error: " + err.Error())
}
serverVar.LogSockets[a].Send(network.String(), 0)
}
}
}
func GetMail(ServerVar *Raft) {
input, err := zmq.NewSocket(zmq.PULL)
if err != nil {
panic("Socket: " + err.Error())
}
err = input.Bind("tcp://" + ServerVar.Address[ServerVar.ServId()])
if err != nil {
panic("Socket: " + err.Error())
}
for {
msg, err := input.Recv(0)
if err != nil {
}
b := bytes.NewBufferString(msg)
dec := gob.NewDecoder(b)
env := new(Envelope)
err = dec.Decode(env)
if err != nil {
log.Fatal("decode:", err)
}
(ServerVar.Inbox()) <- env
}
}
//resetElectTo() resets the election ticker once it reaches timeout
func (ServerVar *Raft) resetElectTimeout() {
ServerVar.ElectTicker = time.NewTimer(electTimeout()).C
}
//electTimeout() returns timeout duration for an election timer
func electTimeout() time.Duration {
min := rand.Intn(int(MaxElectTo - MinElectTo))
tm := int(MinElectTo) + min
return time.Duration(tm) * time.Millisecond
}
|
package main
import (
"fmt"
"time"
)
func main() {
ch := make(chan int)
go func() {
for i:=0; i< 3; i++ {
fmt.Printf("Sending value %d to channel\n",i)
ch <- i
time.Sleep(time.Second)
}
}()
for i:=0; i<3;i++ {
val := <-ch
fmt.Printf("Received value %d from channel\n",val)
}
}
|
package artifacts
import (
"context"
"fmt"
"io/ioutil"
"net/http"
"os"
"strings"
log "github.com/sirupsen/logrus"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/argoproj/argo/persist/sqldb"
wfv1 "github.com/argoproj/argo/pkg/apis/workflow/v1alpha1"
"github.com/argoproj/argo/server/auth"
"github.com/argoproj/argo/util/instanceid"
artifact "github.com/argoproj/argo/workflow/artifacts"
"github.com/argoproj/argo/workflow/hydrator"
)
type ArtifactServer struct {
gatekeeper auth.Gatekeeper
hydrator hydrator.Interface
wfArchive sqldb.WorkflowArchive
instanceIDService instanceid.Service
}
func NewArtifactServer(authN auth.Gatekeeper, hydrator hydrator.Interface, wfArchive sqldb.WorkflowArchive, instanceIDService instanceid.Service) *ArtifactServer {
return &ArtifactServer{authN, hydrator, wfArchive, instanceIDService}
}
func (a *ArtifactServer) GetArtifact(w http.ResponseWriter, r *http.Request) {
ctx, err := a.gateKeeping(r)
if err != nil {
w.WriteHeader(401)
_, _ = w.Write([]byte(err.Error()))
return
}
path := strings.SplitN(r.URL.Path, "/", 6)
namespace := path[2]
workflowName := path[3]
nodeId := path[4]
artifactName := path[5]
log.WithFields(log.Fields{"namespace": namespace, "workflowName": workflowName, "nodeId": nodeId, "artifactName": artifactName}).Info("Download artifact")
wf, err := a.getWorkflowAndValidate(ctx, namespace, workflowName)
if err != nil {
a.serverInternalError(err, w)
return
}
data, err := a.getArtifact(ctx, wf, nodeId, artifactName)
if err != nil {
a.serverInternalError(err, w)
return
}
w.Header().Add("Content-Disposition", fmt.Sprintf(`filename="%s.tgz"`, artifactName))
a.ok(w, data)
}
func (a *ArtifactServer) GetArtifactByUID(w http.ResponseWriter, r *http.Request) {
ctx, err := a.gateKeeping(r)
if err != nil {
w.WriteHeader(401)
_, _ = w.Write([]byte(err.Error()))
return
}
path := strings.SplitN(r.URL.Path, "/", 6)
uid := path[2]
nodeId := path[3]
artifactName := path[4]
log.WithFields(log.Fields{"uid": uid, "nodeId": nodeId, "artifactName": artifactName}).Info("Download artifact")
wf, err := a.getWorkflowByUID(ctx, uid)
if err != nil {
a.serverInternalError(err, w)
return
}
data, err := a.getArtifact(ctx, wf, nodeId, artifactName)
if err != nil {
a.serverInternalError(err, w)
return
}
w.Header().Add("Content-Disposition", fmt.Sprintf(`filename="%s.tgz"`, artifactName))
a.ok(w, data)
}
func (a *ArtifactServer) gateKeeping(r *http.Request) (context.Context, error) {
token := r.Header.Get("Authorization")
if token == "" {
cookie, err := r.Cookie("authorization")
if err != nil {
if err != http.ErrNoCookie {
return nil, err
}
} else {
token = cookie.Value
}
}
ctx := metadata.NewIncomingContext(r.Context(), metadata.MD{"authorization": []string{token}})
return a.gatekeeper.Context(ctx)
}
func (a *ArtifactServer) ok(w http.ResponseWriter, data []byte) {
w.WriteHeader(200)
_, err := w.Write(data)
if err != nil {
a.serverInternalError(err, w)
}
}
func (a *ArtifactServer) serverInternalError(err error, w http.ResponseWriter) {
w.WriteHeader(500)
_, _ = w.Write([]byte(err.Error()))
}
func (a *ArtifactServer) getArtifact(ctx context.Context, wf *wfv1.Workflow, nodeId, artifactName string) ([]byte, error) {
kubeClient := auth.GetKubeClient(ctx)
art := wf.Status.Nodes[nodeId].Outputs.GetArtifactByName(artifactName)
if art == nil {
return nil, fmt.Errorf("artifact not found")
}
driver, err := artifact.NewDriver(art, resources{kubeClient, wf.Namespace})
if err != nil {
return nil, err
}
tmp, err := ioutil.TempFile(".", "artifact")
if err != nil {
return nil, err
}
path := tmp.Name()
defer func() { _ = os.Remove(path) }()
err = driver.Load(art, path)
if err != nil {
return nil, err
}
file, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
log.WithFields(log.Fields{"size": len(file)}).Debug("Artifact file size")
return file, nil
}
func (a *ArtifactServer) getWorkflowAndValidate(ctx context.Context, namespace string, workflowName string) (*wfv1.Workflow, error) {
wfClient := auth.GetWfClient(ctx)
wf, err := wfClient.ArgoprojV1alpha1().Workflows(namespace).Get(workflowName, metav1.GetOptions{})
if err != nil {
return nil, err
}
err = a.instanceIDService.Validate(wf)
if err != nil {
return nil, err
}
err = a.hydrator.Hydrate(wf)
if err != nil {
return nil, err
}
return wf, nil
}
func (a *ArtifactServer) getWorkflowByUID(ctx context.Context, uid string) (*wfv1.Workflow, error) {
wf, err := a.wfArchive.GetWorkflow(uid)
if err != nil {
return nil, err
}
allowed, err := auth.CanI(ctx, "get", "workflows", wf.Namespace, wf.Name)
if err != nil {
return nil, err
}
if !allowed {
return nil, status.Error(codes.PermissionDenied, "permission denied")
}
return wf, nil
}
|
package main
import (
"fmt"
cli "gopkg.in/urfave/cli.v2"
"github.com/johnwyles/vrddt-reboot/pkg/config"
"github.com/johnwyles/vrddt-reboot/pkg/reddit"
)
// GetRedditVideoInfo is the command to get simply the data about a Reddit
// video from a Reddit URL
func GetRedditVideoInfo(cfg *config.Config) *cli.Command {
return &cli.Command{
Action: getRedditVideoInfo,
ArgsUsage: " ",
Before: beforeRedditVideoInfo,
Flags: []cli.Flag{
&cli.StringFlag{
Aliases: []string{"r"},
EnvVars: []string{"VRDDT_CLI_GET_REDDIT_VIDEO_INFO_REDDIT_URL"},
Name: "reddit-url",
Usage: "Specifies the Reddit URL to pull the video from",
Value: "",
},
},
Name: "get-reddit-video-info",
Usage: "Get video information from a Reddit URL",
}
}
// beforeRedditVideoInfo will validate that we have set a Reddit URL
func beforeRedditVideoInfo(cliContext *cli.Context) (err error) {
if !cliContext.IsSet("reddit-url") {
cli.ShowCommandHelp(cliContext, cliContext.Command.Name)
err = fmt.Errorf("A Reddit URL was not given")
}
return
}
// getRedditVideoInfo will operate locally without any of the services to
// return useful information about the Reddit video
func getRedditVideoInfo(cliContext *cli.Context) (err error) {
getLogger("getRedditVideoInfo").Info().Msg("getRedditVideoInfo()")
// Setup a new Reddit video with all the video information
redditVideo := reddit.NewVideo()
if err != nil {
return err
}
redditVideo.URL = cliContext.String("reddit-url")
getLogger("getRedditVideoInfo").Info().Msgf("Finding final URL for Reddit URL: %s", redditVideo.URL)
// We shouldn't need this if all the entries to the queue are done
if err = redditVideo.SetFinalURL(); err != nil {
return
}
getLogger("getRedditVideoInfo").Info().Msgf("Final URL for Reddit URL: %s", redditVideo.URL)
// Set the AudioURL, VideoURL, and Title
if err = redditVideo.SetMetadata(); err != nil {
return
}
getLogger("getRedditVideoInfo").Info().Msgf("Metadata set for Reddit URL: %s", redditVideo.URL)
msg := fmt.Sprintf("Reddit URL: %s", redditVideo.URL)
getLogger("getRedditVideoInfo").Info().Msg(msg)
fmt.Printf("%s\n", msg)
msg = fmt.Sprintf("Title: %s", redditVideo.Title)
getLogger("getRedditVideoInfo").Info().Msg(msg)
fmt.Printf("%s\n", msg)
msg = fmt.Sprintf("Audio URL: %s", redditVideo.AudioURL)
getLogger("getRedditVideoInfo").Info().Msg(msg)
fmt.Printf("%s\n", msg)
msg = fmt.Sprintf("Video URL: %s", redditVideo.VideoURL)
getLogger("getRedditVideoInfo").Info().Msg(msg)
fmt.Printf("%s\n", msg)
return
}
|
package main
import (
"fmt"
"gopkg.in/urfave/cli.v1"
"os"
)
func main() {
app := cli.NewApp()
app.Name = "file-generator"
app.Usage = "File Generation Application"
app.Commands = []cli.Command{
{
Name: "generate", ShortName: "g",
Usage: "Generate files",
Flags: []cli.Flag{
cli.StringFlag{
Name: "fileName, fn",
Usage: "Specifies the name of the files. The default name is 'test'",
Value: "test",
},
cli.StringFlag{
Name: "filePath, fp",
Usage: "Specifies the path to files. The default path is 'files'",
Value: "files",
},
cli.Int64Flag{
Name: "fileSize, fs",
Usage: "Specifies the size of files. Default size 10kb",
Value: 1e4,
},
cli.Int64Flag{
Name: "numberFiles , nf",
Usage: "Sets the number of files to generate. Default amount 10",
Value: 10,
},
},
Action: func(c *cli.Context) error {
var (
fileName = c.String("fileName")
filePath = c.String("filePath")
fileSize = c.Int("fileSize")
numberFiles = c.Int("numberFiles")
)
err := os.Mkdir(filePath, os.ModeDir)
if err != nil {
return err
}
for i := 1; i < numberFiles+1; i++ {
file, err := os.Create(fmt.Sprintf("%v/%v%v", filePath, fileName, i))
if err != nil {
return err
}
defer file.Close()
data := make([]byte, fileSize, fileSize)
file.Write(data)
}
return nil
},
},
{
Name: "delete", ShortName: "d",
Usage: "Delete files",
Flags: []cli.Flag{
cli.StringFlag{
Name: "deleteDirectory, dir",
Usage: "Delete all files in a directory. The default directory is 'files'",
Value: "files",
},
},
Action: func(c *cli.Context) error {
var (
fileDirectory = c.String("deleteDirectory")
)
err := os.RemoveAll(fileDirectory)
if err != nil {
return err
}
return nil
},
},
}
app.Run(os.Args)
}
|
/*
Copyright Digital Asset Holdings, LLC 2016 All Rights Reserved.
Copyright IBM Corp. 2017 All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package errors
import (
"fmt"
"strings"
"testing"
"github.com/hyperledger/fabric/common/flogging"
)
func TestError(t *testing.T) {
e := Error("UNK", "404", "An unknown error occurred.")
s := e.GetStack()
if s != "" {
t.Fatalf("No error stack should have been recorded.")
}
}
// TestErrorWithArg tests creating an error with a message argument
func TestErrorWithArg(t *testing.T) {
e := Error("UNK", "405", "An error occurred: %s", "arg1")
s := e.GetStack()
if s != "" {
t.Fatalf("No error stack should have been recorded.")
}
}
func TestErrorWithCallstack(t *testing.T) {
e := ErrorWithCallstack("UNK", "404", "An unknown error occurred.")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack was recorded.")
}
}
func TestErrorWithCallstack_wrapped(t *testing.T) {
e := ErrorWithCallstack("UNK", "404", "An unknown error occurred.")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack was recorded.")
}
e2 := ErrorWithCallstack("CHA", "404", "An unknown error occurred.").WrapError(e)
s2 := e2.GetStack()
if s2 == "" {
t.Fatalf("No error stack was recorded.")
}
}
// TestErrorWithCallstackAndArg tests creating an error with a callstack and
// message argument
func TestErrorWithCallstackAndArg(t *testing.T) {
e := ErrorWithCallstack("UNK", "405", "An error occurred: %s", "arg1")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack was recorded.")
}
}
func TestErrorWithCallstackAndArg_wrappedNoCallstack(t *testing.T) {
e := Error("UNK", "405", "An error occurred: %s", "arg1")
s := e.GetStack()
if s != "" {
t.Fatalf("No error stack should have been recorded.")
}
e2 := ErrorWithCallstack("CHA", "404", "An error occurred: %s", "anotherarg1").WrapError(e)
s2 := e2.GetStack()
if s2 == "" {
t.Fatalf("No error stack was recorded.")
}
}
func TestError_wrappedWithCallstackAndArg(t *testing.T) {
e := ErrorWithCallstack("UNK", "405", "An error occurred: %s", "arg1")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack should have been recorded.")
}
e2 := Error("CHA", "404", "An error occurred: %s", "anotherarg1").WrapError(e)
s2 := e2.GetStack()
if s2 != "" {
t.Fatalf("No error stack was recorded.")
}
}
// TestErrorWithCallstackMessage tests the output for a logging error where
// and an invalid log level has been provided and the stack trace should be
// displayed with the error message
func TestErrorWithCallstackMessage(t *testing.T) {
// when the 'error' module is set to debug, the callstack will be appended
// to the error message
flogging.SetModuleLevel("error", "debug")
e := ErrorWithCallstack("UNK", "405", "An unknown error occurred.")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack was recorded.")
}
// check that the error message contains this part of the stack trace, which
// is non-platform specific
if !strings.Contains(e.Error(), "github.com/hyperledger/fabric/common/errors.TestErrorWithCallstackMessage") {
t.Fatalf("Error message does not have stack trace appended.")
}
}
func TestErrorWithCallstackMessage_wrapped(t *testing.T) {
// when the 'error' module is set to debug, the callstack will be appended
// to the error message
flogging.SetModuleLevel("error", "debug")
e := ErrorWithCallstack("UNK", "405", "An error occurred: %s", "arg1")
s := e.GetStack()
if s == "" {
t.Fatalf("No error stack was recorded.")
}
// check that the error message contains this part of the stack trace, which
// is non-platform specific
if !strings.Contains(e.Error(), "github.com/hyperledger/fabric/common/errors.TestErrorWithCallstackMessage_wrapped") {
t.Fatalf("Error message does not have stack trace appended.")
}
e2 := ErrorWithCallstack("CHA", "405", "A chaincode error occurred: %s", "ccarg1").WrapError(e)
s2 := e2.GetStack()
if s2 == "" {
t.Fatalf("No error stack was recorded.")
}
// check that the error message contains this part of the stack trace, which
// is non-platform specific
if !strings.Contains(e2.Error(), "github.com/hyperledger/fabric/common/errors.TestErrorWithCallstackMessage_wrapped") {
t.Fatalf("Error message does not have stack trace appended.")
}
}
func TestIsValidComponentOrReasonCode(t *testing.T) {
validComponents := []string{"LGR", "CHA", "PER", "xyz", "aBc"}
for _, component := range validComponents {
if ok := isValidComponentOrReasonCode(component, componentPattern); !ok {
t.FailNow()
}
}
validReasons := []string{"404", "500", "999"}
for _, reason := range validReasons {
if ok := isValidComponentOrReasonCode(reason, reasonPattern); !ok {
t.FailNow()
}
}
invalidComponents := []string{"LEDG", "CH", "P3R", "123", ""}
for _, component := range invalidComponents {
if ok := isValidComponentOrReasonCode(component, componentPattern); ok {
t.FailNow()
}
}
invalidReasons := []string{"4045", "E12", "ZZZ", "1", ""}
for _, reason := range invalidReasons {
if ok := isValidComponentOrReasonCode(reason, reasonPattern); ok {
t.FailNow()
}
}
}
func ExampleError() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
err := Error("UNK", "404", "An unknown error occurred.")
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// UNK:404 - An unknown error occurred.
// UNK:404
// UNK
// 404
// UNK:404 - An unknown error occurred.
}
}
func ExampleErrorWithCallstack() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
err := ErrorWithCallstack("UNK", "404", "An unknown error occurred.")
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// UNK:404 - An unknown error occurred.
// UNK:404
// UNK
// 404
// UNK:404 - An unknown error occurred.
}
}
// Example_utilityErrorWithArg tests the output for a sample error with a message
// argument
func Example_utilityErrorWithArg() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
err := ErrorWithCallstack("UNK", "405", "An error occurred: %s", "arg1")
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// UNK:405 - An error occurred: arg1
// UNK:405
// UNK
// 405
// UNK:405 - An error occurred: arg1
}
}
// Example_wrappedUtilityErrorWithArg tests the output for a CallStackError
// with message argument that is wrapped into another error.
func Example_wrappedUtilityErrorWithArg() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
wrappedErr := ErrorWithCallstack("UNK", "405", "An error occurred: %s", "arg1")
err := ErrorWithCallstack("CHA", "500", "Utility error occurred: %s", "ccarg1").WrapError(wrappedErr)
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// CHA:500 - Utility error occurred: ccarg1
// Caused by: UNK:405 - An error occurred: arg1
// CHA:500
// CHA
// 500
// CHA:500 - Utility error occurred: ccarg1
// Caused by: UNK:405 - An error occurred: arg1
}
}
// Example_wrappedStandardError tests the output for a standard error
// with message argument that is wrapped into a CallStackError.
func Example_wrappedStandardError() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
wrappedErr := fmt.Errorf("grpc timed out: %s", "failed to connect to server")
err := ErrorWithCallstack("CHA", "500", "Error sending message: %s", "invoke").WrapError(wrappedErr)
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// CHA:500 - Error sending message: invoke
// Caused by: grpc timed out: failed to connect to server
// CHA:500
// CHA
// 500
// CHA:500 - Error sending message: invoke
// Caused by: grpc timed out: failed to connect to server
}
}
// Example_wrappedStandardError2 tests the output for CallStackError wrapped
// into a standard error with message argument that is wrapped into a
// CallStackError.
func Example_wrappedStandardError2() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
wrappedErr := ErrorWithCallstack("CON", "500", "failed to connect to server")
wrappedErr2 := fmt.Errorf("grpc timed out: %s", wrappedErr)
err := ErrorWithCallstack("CHA", "500", "Error sending message: %s", "invoke").WrapError(wrappedErr2)
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// CHA:500 - Error sending message: invoke
// Caused by: grpc timed out: CON:500 - failed to connect to server
// CHA:500
// CHA
// 500
// CHA:500 - Error sending message: invoke
// Caused by: grpc timed out: CON:500 - failed to connect to server
}
}
// Example_loggingInvalidLevel tests the output for a logging error where
// and an invalid log level has been provided
func Example_loggingInvalidLevel() {
// when the 'error' module is set to anything but debug, the callstack will
// not be appended to the error message
flogging.SetModuleLevel("error", "warning")
err := ErrorWithCallstack("LOG", "400", "Invalid log level provided - %s", "invalid")
if err != nil {
fmt.Printf("%s\n", err.Error())
fmt.Printf("%s\n", err.GetErrorCode())
fmt.Printf("%s\n", err.GetComponentCode())
fmt.Printf("%s\n", err.GetReasonCode())
fmt.Printf("%s\n", err.Message())
// Output:
// LOG:400 - Invalid log level provided - invalid
// LOG:400
// LOG
// 400
// LOG:400 - Invalid log level provided - invalid
}
}
|
package field
import "github.com/graphql-go/graphql"
var args = graphql.FieldConfigArgument{
"name": &graphql.ArgumentConfig{
Type: graphql.String,
DefaultValue: "world",
Description: "Name to say hello",
},
}
func sayHello(p graphql.ResolveParams) (interface{}, error) {
return p.Args["name"], nil
}
// Hello says hello to somebody.
var Hello = graphql.Field{
Type: graphql.String,
Args: args,
Resolve: sayHello,
Description: "Say hello to somebody.",
}
|
package main
import (
"fmt"
"github.com/d2r2/go-dht"
"io/ioutil"
"log"
)
type DHT11State struct {
temperature float32
humidity float32
}
func ReadDHT11() {
// Read DHT11 sensor data from pin 4, retrying 10 times in case of failure.
temperature, humidity, retried, err :=
dht.ReadDHTxxWithRetry(dht.DHT11, 4, true, 10)
if err != nil {
log.Println("Encountered error reading dht11", err)
}
// Print temperature and humidity
log.Printf("Read DHT11: Temperature = %v*C, Humidity = %v%% (retried %d times)\n",
temperature, humidity, retried)
s := fmt.Sprintf("Temperature = %v*C, Humidity = %v%%", temperature, humidity)
storeInFile([]byte(s))
}
func storeInFile(bytes []byte) {
if err := ioutil.WriteFile("/tmp/dht", bytes, 0644); err != nil {
log.Println("Could not store new data to file")
}
}
|
package rpc_service
type rpc_chat int
|
package toml_test
import (
"bytes"
"io/fs"
"os"
"path/filepath"
"sort"
"strings"
"testing"
"time"
"github.com/BurntSushi/toml"
tomltest "github.com/BurntSushi/toml/internal/toml-test"
)
func BenchmarkDecode(b *testing.B) {
files := make(map[string][]string)
fs.WalkDir(tomltest.EmbeddedTests(), ".", func(path string, d fs.DirEntry, err error) error {
if strings.HasPrefix(path, "valid/") && strings.HasSuffix(path, ".toml") {
d, _ := fs.ReadFile(tomltest.EmbeddedTests(), path)
g := filepath.Dir(path[6:])
if g == "." {
g = "top"
}
files[g] = append(files[g], string(d))
}
return nil
})
type test struct {
group string
toml []string
}
tests := make([]test, 0, len(files))
for k, v := range files {
tests = append(tests, test{group: k, toml: v})
}
sort.Slice(tests, func(i, j int) bool { return tests[i].group < tests[j].group })
b.ResetTimer()
for _, tt := range tests {
b.Run(tt.group, func(b *testing.B) {
b.ResetTimer()
for n := 0; n < b.N; n++ {
for _, f := range tt.toml {
var val map[string]interface{}
toml.Decode(f, &val)
}
}
})
}
b.Run("large-doc", func(b *testing.B) {
d, err := os.ReadFile("testdata/ja-JP.toml")
if err != nil {
b.Fatal(err)
}
doc := string(d)
b.ResetTimer()
for n := 0; n < b.N; n++ {
var val map[string]interface{}
toml.Decode(doc, &val)
}
})
}
func BenchmarkEncode(b *testing.B) {
files := make(map[string][]map[string]interface{})
fs.WalkDir(tomltest.EmbeddedTests(), ".", func(path string, d fs.DirEntry, err error) error {
if strings.HasPrefix(path, "valid/") && strings.HasSuffix(path, ".toml") {
d, _ := fs.ReadFile(tomltest.EmbeddedTests(), path)
g := filepath.Dir(path[6:])
if g == "." {
g = "top"
}
// "next" version of TOML.
switch path {
case "valid/string/escape-esc.toml", "valid/datetime/no-seconds.toml",
"valid/string/hex-escape.toml", "valid/inline-table/newline.toml",
"valid/key/unicode.toml":
return nil
}
var dec map[string]interface{}
_, err := toml.Decode(string(d), &dec)
if err != nil {
b.Fatalf("decode %q: %s", path, err)
}
buf := new(bytes.Buffer)
err = toml.NewEncoder(buf).Encode(dec)
if err != nil {
b.Logf("encode failed for %q (skipping): %s", path, err)
return nil
}
files[g] = append(files[g], dec)
}
return nil
})
type test struct {
group string
data []map[string]interface{}
}
tests := make([]test, 0, len(files))
for k, v := range files {
tests = append(tests, test{group: k, data: v})
}
sort.Slice(tests, func(i, j int) bool { return tests[i].group < tests[j].group })
b.ResetTimer()
for _, tt := range tests {
b.Run(tt.group, func(b *testing.B) {
buf := new(bytes.Buffer)
buf.Grow(1024 * 64)
b.ResetTimer()
for n := 0; n < b.N; n++ {
for _, f := range tt.data {
toml.NewEncoder(buf).Encode(f)
}
}
})
}
}
func BenchmarkExample(b *testing.B) {
d, err := os.ReadFile("_example/example.toml")
if err != nil {
b.Fatal(err)
}
t := string(d)
var decoded example
_, err = toml.Decode(t, &decoded)
if err != nil {
b.Fatal(err)
}
buf := new(bytes.Buffer)
err = toml.NewEncoder(buf).Encode(decoded)
if err != nil {
b.Fatal(err)
}
b.ResetTimer()
b.Run("decode", func(b *testing.B) {
for n := 0; n < b.N; n++ {
var c example
toml.Decode(t, &c)
}
})
b.Run("encode", func(b *testing.B) {
for n := 0; n < b.N; n++ {
buf.Reset()
toml.NewEncoder(buf).Encode(decoded)
}
})
}
// Copy from _example/example.go
type (
example struct {
Title string
Integers []int
Times []fmtTime
Duration []duration
Distros []distro
Servers map[string]server
Characters map[string][]struct {
Name string
Rank string
}
}
server struct {
IP string
Hostname string
Enabled bool
}
distro struct {
Name string
Packages string
}
duration struct{ time.Duration }
fmtTime struct{ time.Time }
)
func (d *duration) UnmarshalText(text []byte) (err error) {
d.Duration, err = time.ParseDuration(string(text))
return err
}
func (t fmtTime) String() string {
f := "2006-01-02 15:04:05.999999999"
if t.Time.Hour() == 0 {
f = "2006-01-02"
}
if t.Time.Year() == 0 {
f = "15:04:05.999999999"
}
if t.Time.Location() == time.UTC {
f += " UTC"
} else {
f += " -0700"
}
return t.Time.Format(`"` + f + `"`)
}
|
package main
import "fmt"
type person struct { //Go’s structs are typed collections of fields. They’re useful for grouping data together to form records.
name string
age int
}
func newPerson(name string) *person { //newPerson constructs a new person struct with the given name
p := person{name: name}
p.age = 42 //You can safely return a pointer to local variable as a local variable will survive the scope of the function.
return &p
}
func main() {
fmt.Println(person{"Bob", 20}) //syntax for a new struct
fmt.Println(person{name: "Alice", age: 30}) // you also can name the fields while initilazing a struct
fmt.Println(person{name: "Fred"}) // ommited fields will bpe zero valued
fmt.Println(&person{name: "Ann", age: 40}) // & prefix yields a pointer to struct
fmt.Println(newPerson("Jon")) // It’s idiomatic to encapsulate new struct creation in constructor functions
s := person{name: "Sean", age: 50} // Access struct fields with a dot.
fmt.Println(s.name)
sp := &s // You can also use dots with struct pointers - the pointers are automatically dereferenced.
fmt.Println(sp.age)
sp.age = 51 // Structs are mutable.
fmt.Println(s.age)
} |
package main
import (
"fmt"
"sync"
"time"
)
// Wait for multiple goroutines to finish, use wait group
func workerW(id int, wg *sync.WaitGroup) { //wait group must be passed to functions by pointer
fmt.Printf("Worker %d starting\n", id)
time.Sleep(time.Second)
fmt.Printf("Worker %d done\n", id)
wg.Done() // notify wait group the worker is done
}
func main() {
var wg sync.WaitGroup // wait group used to wait for all go routines launched here to finish
for i := 1; i <= 5; i++ {
wg.Add(1)
go workerW(i, &wg) // launch multiple go routines and increment waitGroup counter for each
}
wg.Wait() // Block until all workers notify they are done
}
|
package demofile_test
import (
"testing"
demofile "github.com/MobalyticsGG/csgo-demofile"
)
func TestDemofileOpen(t *testing.T) {
dem, err := demofile.NewDemofile("testdata/demos/cache_9-21_mm.dem", true)
if err != nil {
t.Error(err)
}
err = dem.Start()
if err != nil {
t.Error(err)
}
}
func BenchmarkDemofileOpen1(b *testing.B) {
b.ReportAllocs()
for n := 0; n < b.N; n++ {
dem, err := demofile.NewDemofile("testdata/demos/cache_9-21_mm.dem", false)
if err != nil {
b.Error(err)
}
err = dem.Start()
if err != nil {
b.Error(err)
}
}
}
|
package hot100
// 关键: 有序 代表着双指针
// 推荐解法: https://leetcode-cn.com/problems/remove-duplicates-from-sorted-array-ii/solution/gong-shui-san-xie-guan-yu-shan-chu-you-x-glnq/
// 关键:
func removeDuplicates2(nums []int) int {
var process func(k int) int
process = func(k int) int {
ret := 0
for _, v := range nums {
// ret<k 代表着,先收集前面k个元素
// nums[ret-k]!=v 代表着 只有当 当前位置和前面k个元素的下标的位置不同的时候才能保留
// 如 k=2 ,nums=1,1,1,1,2 ,则 ret<k的时候会先将前面2个1 填充, 然后第三个元素的时候,因为nums[2-2]=1 ,和当前的1匹配,则不能赋值
if ret < k || nums[ret-k] != v {
nums[ret] = v
ret++
}
}
return ret
}
return process(2)
}
|
package openinstrument
import (
"code.google.com/p/goprotobuf/proto"
openinstrument_proto "code.google.com/p/open-instrument/proto"
"code.google.com/p/open-instrument/variable"
"errors"
"fmt"
"os"
"sort"
"time"
)
func NewVariableFromString(textvar string) *variable.Variable {
return variable.NewFromString(textvar)
}
func NewVariableFromProto(p *openinstrument_proto.StreamVariable) *variable.Variable {
return variable.NewFromProto(p)
}
type Timer struct {
t *openinstrument_proto.LogMessage
start_time time.Time
message string
}
func NewTimer(message string, t *openinstrument_proto.LogMessage) *Timer {
return &Timer{
start_time: time.Now(),
t: t,
message: message,
}
}
func (this *Timer) Stop() uint64 {
duration := time.Since(this.start_time)
if this.t != nil {
this.t.Timestamp = proto.Uint64(uint64(duration.Nanoseconds() / 1000000))
if this.message != "" {
this.t.Message = &this.message
}
}
return uint64(duration.Nanoseconds() / 1000000)
}
type Semaphore chan bool
// acquire n resources
func (s Semaphore) P(n int) {
for i := 0; i < n; i++ {
s <- true
}
}
// release n resources
func (s Semaphore) V(n int) {
for i := 0; i < n; i++ {
<-s
}
}
func (s Semaphore) Lock() {
s.P(1)
}
func (s Semaphore) Unlock() {
s.V(1)
}
/* signal-wait */
func (s Semaphore) Signal() {
s.V(1)
}
func (s Semaphore) Wait(n int) {
s.P(n)
}
// ValueStreamWriter returns a channel that appends values to the supplied ValueStream, performing run-length-encoding.
// No effort is made to ensure that the ValueStream contains sorted Values
func ValueStreamWriter(stream *openinstrument_proto.ValueStream) chan *openinstrument_proto.Value {
c := make(chan *openinstrument_proto.Value)
go func() {
for value := range c {
if len(stream.Value) > 0 {
last := stream.Value[len(stream.Value)-1]
if (last.GetStringValue() != "" && last.GetStringValue() == value.GetStringValue()) ||
(last.GetDoubleValue() == value.GetDoubleValue()) {
if value.GetEndTimestamp() > 0 {
last.EndTimestamp = value.EndTimestamp
} else {
last.EndTimestamp = value.Timestamp
}
} else {
stream.Value = append(stream.Value, value)
}
} else {
stream.Value = append(stream.Value, value)
}
}
}()
return c
}
// ValueStreamReader returns a channel producing Values from the supplied ValueStream
func ValueStreamReader(stream *openinstrument_proto.ValueStream) chan *openinstrument_proto.Value {
c := make(chan *openinstrument_proto.Value)
go func() {
for _, value := range stream.Value {
c <- value
}
close(c)
}()
return c
}
// MergeValueStreams merges multiple ValueStreams, returning a channel producing sorted Values.
func MergeValueStreams(streams []*openinstrument_proto.ValueStream) chan *openinstrument_proto.Value {
c := make(chan *openinstrument_proto.Value)
n := len(streams)
go func() {
indexes := make([]int, n)
for {
var min_timestamp uint64
var min_stream *openinstrument_proto.ValueStream
var min_value *openinstrument_proto.Value
for i := 0; i < n; i++ {
if indexes[i] >= len(streams[i].Value) {
continue
}
v := streams[i].Value[indexes[i]]
if min_stream == nil || v.GetTimestamp() < min_timestamp {
min_timestamp = v.GetTimestamp()
min_stream = streams[i]
min_value = v
indexes[i]++
}
}
if min_value == nil {
break
}
c <- min_value
}
close(c)
}()
return c
}
func MergeStreamsBy(streams []*openinstrument_proto.ValueStream, by string) chan []*openinstrument_proto.ValueStream {
c := make(chan []*openinstrument_proto.ValueStream)
go func() {
unique_vars := make(map[string]bool)
unique_labels := make(map[string]bool)
for _, stream := range streams {
v := variable.NewFromProto(stream.Variable)
unique_vars[v.Variable] = true
label_value, ok := v.Labels[by]
if !ok {
unique_labels[""] = true
} else {
unique_labels[label_value] = true
}
}
for varname := range unique_vars {
v := variable.NewFromString(varname)
if by == "" {
output := make([]*openinstrument_proto.ValueStream, 0)
for _, stream := range streams {
testvar := variable.NewFromProto(stream.Variable)
if testvar.Variable != v.Variable {
continue
}
output = append(output, stream)
}
if len(output) > 0 {
c <- output
}
} else {
for labelvalue := range unique_labels {
output := make([]*openinstrument_proto.ValueStream, 0)
for _, stream := range streams {
testvar := variable.NewFromProto(stream.Variable)
if testvar.Variable != v.Variable {
continue
}
value, ok := testvar.Labels[by]
if !ok {
continue
}
if value != labelvalue {
continue
}
output = append(output, stream)
}
if len(output) > 0 {
c <- output
}
}
}
}
close(c)
}()
return c
}
type valueStreamChannelList struct {
input chan *openinstrument_proto.Value
channels []chan *openinstrument_proto.Value
}
func (this *valueStreamChannelList) Add(c chan *openinstrument_proto.Value) {
this.channels = append(this.channels, c)
}
func (this *valueStreamChannelList) Last() chan *openinstrument_proto.Value {
return this.channels[len(this.channels)-1]
}
func ValueStreamChannelList(initial chan *openinstrument_proto.Value) *valueStreamChannelList {
this := new(valueStreamChannelList)
this.channels = make([]chan *openinstrument_proto.Value, 0)
this.channels = append(this.channels, initial)
return this
}
func Readdirnames(directory string) ([]string, error) {
dir, err := os.Open(directory)
if err != nil {
return nil, errors.New(fmt.Sprintf("Can't open %s for readdir: %s", directory, err))
}
defer dir.Close()
names, err := dir.Readdirnames(0)
if err != nil {
return nil, errors.New(fmt.Sprintf("Can't read file names in %s: %s", directory, err))
}
sort.Strings(names)
return names, nil
}
|
// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package plugin
import (
"fmt"
"net"
"os"
"github.com/aws/amazon-vpc-cni-plugins/network/eni"
"github.com/aws/amazon-vpc-cni-plugins/network/imds"
"github.com/aws/amazon-vpc-cni-plugins/network/netns"
"github.com/aws/amazon-vpc-cni-plugins/network/vpc"
"github.com/aws/amazon-vpc-cni-plugins/plugins/vpc-branch-eni/config"
log "github.com/cihub/seelog"
cniSkel "github.com/containernetworking/cni/pkg/skel"
cniTypes "github.com/containernetworking/cni/pkg/types"
cniTypesCurrent "github.com/containernetworking/cni/pkg/types/100"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
// Name templates used for objects created by this plugin.
branchLinkNameFormat = "%s.%d"
bridgeNameFormat = "tapbr%d"
)
// Add is the internal implementation of CNI ADD command.
func (plugin *Plugin) Add(args *cniSkel.CmdArgs) error {
// Parse network configuration.
netConfig, err := config.New(args)
if err != nil {
log.Errorf("Failed to parse netconfig from args: %v.", err)
return err
}
log.Infof("Executing ADD with netconfig: %+v.", netConfig)
// Find the network namespace.
log.Infof("Searching for netns %s.", args.Netns)
ns, err := netns.GetNetNS(args.Netns)
if err != nil {
log.Errorf("Failed to find netns %s: %v.", args.Netns, err)
return err
}
// Create the trunk ENI.
trunk, err := eni.NewTrunk(netConfig.TrunkName, netConfig.TrunkMACAddress, eni.TrunkIsolationModeVLAN)
if err != nil {
log.Errorf("Failed to find trunk interface %s: %v.", netConfig.TrunkName, err)
return err
}
// Bring up the trunk ENI.
err = trunk.SetOpState(true)
if err != nil {
log.Errorf("Failed to bring up trunk interface %s: %v", netConfig.TrunkName, err)
return err
}
// Create the branch ENI.
branchName := fmt.Sprintf(branchLinkNameFormat, trunk.GetLinkName(), netConfig.BranchVlanID)
branch, err := eni.NewBranch(trunk, branchName, netConfig.BranchMACAddress, netConfig.BranchVlanID)
if err != nil {
log.Errorf("Failed to create branch interface %s: %v.", branchName, err)
return err
}
// Create a link for the branch ENI.
log.Infof("Creating branch link %s.", branchName)
overrideMAC := netConfig.InterfaceType == config.IfTypeVLAN
err = branch.AttachToLink(overrideMAC)
if err != nil {
if os.IsExist(err) {
// If the branch link already exists, it may have been created in a previous invocation
// of this plugin. Look for it in the target network namespace and reset it.
err = ns.Run(func() error {
err := branch.ENI.AttachToLink()
if err != nil {
return err
}
for _, ipAddr := range netConfig.IPAddresses {
err = branch.DeleteIPAddress(&ipAddr)
if os.IsNotExist(err) {
err = nil
} else if err != nil {
log.Errorf("Failed to reset branch link: %v", err)
}
}
return err
})
}
if err != nil {
log.Errorf("Failed to attach branch interface %s: %v.", branchName, err)
return err
}
} else {
// Move branch ENI to the network namespace.
log.Infof("Moving branch link %s to netns %s.", branch, args.Netns)
err = branch.SetNetNS(ns)
if err != nil {
log.Errorf("Failed to move branch link: %v.", err)
return err
}
}
// Complete the remaining setup in target network namespace.
err = ns.Run(func() error {
var err error
// Create the container-facing link based on the requested interface type.
switch netConfig.InterfaceType {
case config.IfTypeVLAN:
// Container is running in a network namespace on this host.
err = plugin.createVLANLink(branch, args.IfName, netConfig.IPAddresses, netConfig.GatewayIPAddresses)
case config.IfTypeTAP:
// Container is running in a VM.
// Connect the branch ENI to a TAP link in the target network namespace.
bridgeName := fmt.Sprintf(bridgeNameFormat, netConfig.BranchVlanID)
err = plugin.createTAPLink(branch, bridgeName, args.IfName, netConfig.Tap)
case config.IfTypeMACVTAP:
// Container is running in a VM.
// Connect the branch ENI to a MACVTAP link in the target network namespace.
err = plugin.createMACVTAPLink(args.IfName, branch.GetLinkIndex())
}
// Add a blackhole route for IMDS endpoint if required.
if netConfig.BlockIMDS {
err = imds.BlockInstanceMetadataEndpoint()
if err != nil {
return err
}
}
// Set branch link operational state up. VLAN interfaces were already brought up above.
if netConfig.InterfaceType != config.IfTypeVLAN && err == nil {
log.Infof("Setting branch link state up.")
err = branch.SetOpState(true)
if err != nil {
log.Errorf("Failed to set branch link %v state: %v.", branch, err)
return err
}
}
return err
})
if err != nil {
log.Errorf("Failed to setup the link: %v.", err)
return err
}
// Generate CNI result.
// IP addresses, routes and DNS are configured by VPC DHCP servers.
result := &cniTypesCurrent.Result{
Interfaces: []*cniTypesCurrent.Interface{
{
Name: args.IfName,
Mac: netConfig.BranchMACAddress.String(),
Sandbox: args.Netns,
},
},
}
log.Infof("Writing CNI result to stdout: %+v", result)
return cniTypes.PrintResult(result, netConfig.CNIVersion)
}
// Del is the internal implementation of CNI DEL command.
// CNI DEL command can be called by the orchestrator agent multiple times for the same interface,
// and thus must be best-effort and idempotent.
func (plugin *Plugin) Del(args *cniSkel.CmdArgs) error {
// Parse network configuration.
netConfig, err := config.New(args)
if err != nil {
log.Errorf("Failed to parse netconfig from args: %v.", err)
return err
}
log.Infof("Executing DEL with netconfig: %+v.", netConfig)
// Derive names from CNI network config.
var branchName string
if netConfig.InterfaceType == config.IfTypeVLAN {
branchName = args.IfName
} else {
// Find the trunk link name if not known.
if netConfig.TrunkName == "" {
trunk, err := eni.NewTrunk("", netConfig.TrunkMACAddress, eni.TrunkIsolationModeVLAN)
if err != nil {
// Log and ignore the failure.
log.Errorf("Failed to find trunk with MAC address %v: %v.", netConfig.TrunkMACAddress, err)
return nil
}
netConfig.TrunkName = trunk.GetLinkName()
}
branchName = fmt.Sprintf(branchLinkNameFormat, netConfig.TrunkName, netConfig.BranchVlanID)
}
tapBridgeName := fmt.Sprintf(bridgeNameFormat, netConfig.BranchVlanID)
tapLinkName := args.IfName
// Search for the target network namespace.
netns, err := netns.GetNetNS(args.Netns)
if err == nil {
// In target network namespace...
err = netns.Run(func() error {
if netConfig.InterfaceType == config.IfTypeMACVTAP ||
netConfig.InterfaceType == config.IfTypeTAP {
// Delete the tap link.
la := netlink.NewLinkAttrs()
la.Name = tapLinkName
tapLink := &netlink.Tuntap{LinkAttrs: la}
log.Infof("Deleting tap link: %v.", tapLinkName)
err = netlink.LinkDel(tapLink)
if err != nil {
log.Errorf("Failed to delete tap link: %v.", err)
}
}
// Delete the branch link.
la := netlink.NewLinkAttrs()
la.Name = branchName
branchLink := &netlink.Vlan{LinkAttrs: la}
log.Infof("Deleting branch link: %v.", branchName)
err = netlink.LinkDel(branchLink)
if err != nil {
log.Errorf("Failed to delete branch link: %v.", err)
}
if netConfig.InterfaceType == config.IfTypeTAP {
// Delete the tap bridge.
la = netlink.NewLinkAttrs()
la.Name = tapBridgeName
tapBridge := &netlink.Bridge{LinkAttrs: la}
log.Infof("Deleting tap bridge: %v.", tapBridgeName)
err = netlink.LinkDel(tapBridge)
if err != nil {
log.Errorf("Failed to delete tap bridge: %v.", err)
}
}
return nil
})
if err != nil {
log.Errorf("Failed to set netns %s, ignoring: %v.", args.Netns, err)
}
} else {
// Log and ignore the failure. DEL can be called multiple times and thus must be idempotent.
log.Errorf("Failed to find netns %s, ignoring: %v.", args.Netns, err)
}
return nil
}
// createVLANLink creates a VLAN link in the target network namespace.
func (plugin *Plugin) createVLANLink(
branch *eni.Branch,
linkName string,
ipAddresses []net.IPNet,
gatewayIPAddresses []net.IP) error {
// Rename the branch link to the requested interface name.
if branch.GetLinkName() != linkName {
log.Infof("Renaming branch link %v to %s.", branch, linkName)
err := branch.SetLinkName(linkName)
if err != nil {
log.Errorf("Failed to rename branch link %v: %v.", branch, err)
return err
}
}
// Set branch link operational state up.
err := branch.SetOpState(true)
if err != nil {
log.Errorf("Failed to set branch link %v state: %v.", branch, err)
return err
}
// Set branch IP addresses if specified.
for _, ipAddress := range ipAddresses {
// Assign the IP address.
log.Infof("Assigning IP address %v to branch link.", ipAddress)
err = branch.AddIPAddress(&ipAddress)
if err != nil {
log.Errorf("Failed to assign IP address to branch link %v: %v.", branch, err)
return err
}
}
// Set default gateways if specified.
for _, gatewayIPAddress := range gatewayIPAddresses {
// Add default route via branch link.
route := &netlink.Route{
Gw: gatewayIPAddress,
LinkIndex: branch.GetLinkIndex(),
}
log.Infof("Adding default IP route %+v.", route)
err = netlink.RouteAdd(route)
if err != nil {
log.Errorf("Failed to add IP route %+v via branch %v: %v.", route, branch, err)
return err
}
}
return nil
}
// createTAPLink creates a TAP link in the target network namespace.
func (plugin *Plugin) createTAPLink(
branch *eni.Branch,
bridgeName string,
tapLinkName string,
tapCfg *config.TAPConfig) error {
// Create the bridge link.
la := netlink.NewLinkAttrs()
la.Name = bridgeName
la.MTU = vpc.JumboFrameMTU
bridge := &netlink.Bridge{LinkAttrs: la}
log.Infof("Creating bridge link %+v.", bridge)
err := netlink.LinkAdd(bridge)
if err != nil {
log.Errorf("Failed to create bridge link: %v", err)
return err
}
// Set bridge link MTU.
err = netlink.LinkSetMTU(bridge, vpc.JumboFrameMTU)
if err != nil {
log.Errorf("Failed to set bridge link MTU: %v", err)
return err
}
// In TAP mode, the branch ENI's MAC address is used exclusively by the consumer of the TAP
// interface (e.g. a VM), so it shouldn't be assigned to the branch link itself. However, this
// can happen if the branch link is being reused between successive invocations of the plugin.
// Overriding the branch link's MAC address with that of the bridge prevents that.
bridgeLink, err := netlink.LinkByIndex(bridge.Index)
if err != nil {
log.Errorf("Failed to find bridge link: %v", err)
return err
}
err = branch.SetMACAddress(bridgeLink.Attrs().HardwareAddr)
if err != nil {
log.Errorf("Failed to set branch link MAC address: %v.", err)
return err
}
// Set bridge link operational state up.
err = netlink.LinkSetUp(bridge)
if err != nil {
log.Errorf("Failed to set bridge link state: %v", err)
return err
}
// Connect branch link to the bridge.
la = netlink.NewLinkAttrs()
la.Name = branch.GetLinkName()
branchLink := &netlink.Dummy{LinkAttrs: la}
err = netlink.LinkSetMaster(branchLink, bridge)
if err != nil {
log.Errorf("Failed to set branch link master: %v", err)
return err
}
// Create the TAP link.
// Parse headers added by virtio_net implementation.
la = netlink.NewLinkAttrs()
la.Name = tapLinkName
la.MasterIndex = bridge.Index
la.MTU = vpc.JumboFrameMTU
tapLink := &netlink.Tuntap{
LinkAttrs: la,
Mode: netlink.TUNTAP_MODE_TAP,
Flags: netlink.TUNTAP_VNET_HDR,
Queues: tapCfg.Queues,
}
if tapCfg.Queues == 1 {
tapLink.Flags |= netlink.TUNTAP_ONE_QUEUE
}
log.Infof("Creating TAP link %+v.", tapLink)
err = netlink.LinkAdd(tapLink)
if err != nil {
log.Errorf("Failed to add TAP link: %v", err)
return err
}
// Set TAP link MTU.
err = netlink.LinkSetMTU(tapLink, vpc.JumboFrameMTU)
if err != nil {
log.Errorf("Failed to set TAP link MTU: %v", err)
return err
}
// Set TAP link ownership.
log.Infof("Setting TAP link owner to UID %d and GID %d.", tapCfg.Uid, tapCfg.Gid)
for _, tapFd := range tapLink.Fds {
fd := int(tapFd.Fd())
err = unix.IoctlSetInt(fd, unix.TUNSETOWNER, tapCfg.Uid)
if err != nil {
log.Errorf("Failed to set TAP link UID: %v", err)
return err
}
err = unix.IoctlSetInt(fd, unix.TUNSETGROUP, tapCfg.Gid)
if err != nil {
log.Errorf("Failed to set TAP link GID: %v", err)
return err
}
tapFd.Close()
}
// Set TAP link operational state up.
err = netlink.LinkSetUp(tapLink)
if err != nil {
log.Errorf("Failed to set TAP link state: %v", err)
return err
}
return nil
}
// createMACVTAPLink creates a MACVTAP link in the target network namespace.
func (plugin *Plugin) createMACVTAPLink(linkName string, parentIndex int) error {
// Create a MACVTAP link attached to the parent link.
la := netlink.NewLinkAttrs()
la.Name = linkName
la.ParentIndex = parentIndex
macvtapLink := &netlink.Macvtap{
Macvlan: netlink.Macvlan{
LinkAttrs: la,
Mode: netlink.MACVLAN_MODE_PASSTHRU,
},
}
log.Infof("Creating MACVTAP link %+v.", macvtapLink)
err := netlink.LinkAdd(macvtapLink)
if err != nil {
log.Errorf("Failed to add MACVTAP link: %v.", err)
return err
}
// Set MACVTAP link operational state up.
err = netlink.LinkSetUp(macvtapLink)
if err != nil {
log.Errorf("Failed to set MACVTAP link state: %v.", err)
return err
}
return nil
}
|
package kafkasource
import (
"fmt"
"log"
"os"
"github.com/lfmexi/tcpgateway/events"
"github.com/confluentinc/confluent-kafka-go/kafka"
)
type kafkaEventsouce struct {
createConsumer CreateKafkaConsumer
producer KafkaProducer
consumersControlChan chan string
}
// CreateKafkaEventSource creates an EventSource for kafka
func CreateKafkaEventSource(createConsumer CreateKafkaConsumer, producer KafkaProducer) events.EventSource {
return &kafkaEventsouce{
createConsumer,
producer,
make(chan string),
}
}
func (es *kafkaEventsouce) Publish(destination string, key string, data []byte) error {
deliveryChannel := make(chan kafka.Event)
defer func() {
close(deliveryChannel)
}()
kafkaMessage := &kafka.Message{
TopicPartition: kafka.TopicPartition{
Topic: &destination,
Partition: kafka.PartitionAny,
},
Key: []byte(key),
Value: data,
}
err := es.producer.Produce(kafkaMessage, deliveryChannel)
e := <-deliveryChannel
m := e.(*kafka.Message)
if m.TopicPartition.Error != nil {
return m.TopicPartition.Error
}
return err
}
func (es *kafkaEventsouce) Consume(key string) (<-chan events.Event, error) {
consumer, err := es.createConsumer(key)
if err != nil {
return nil, err
}
consumer.SubscribeTopics([]string{key}, nil)
eventChannel := make(chan events.Event, 10)
go func() {
stopChannel := make(chan bool)
loop:
for {
select {
case ev := <-consumer.Events():
switch e := ev.(type) {
case kafka.AssignedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
consumer.Assign(e.Partitions)
case kafka.RevokedPartitions:
fmt.Fprintf(os.Stderr, "%% %v\n", e)
consumer.Unassign()
case *kafka.Message:
eventChannel <- &kafkaEvent{e.Value}
case kafka.Error:
fmt.Fprintf(os.Stderr, "%% Error: %v\n", e)
}
case consumerKey := <-es.consumersControlChan:
log.Printf("Removing consumer for %s", consumerKey)
break loop
case <-stopChannel:
break loop
}
}
consumer.Close()
close(stopChannel)
close(eventChannel)
}()
return eventChannel, nil
}
func (es *kafkaEventsouce) Stop(key string) error {
es.consumersControlChan <- key
return nil
}
type kafkaEvent struct {
data []byte
}
func (k kafkaEvent) Data() []byte {
return k.data
}
|
package interfaces
import "server/src/dto"
type UserRepositoryProvider interface {
Create(user *dto.User) error
GetById(id string) (*dto.User, error)
GetByLogin(login string) (*dto.User, error)
GetByEmail(email string) (*dto.User, error)
GetByLoginAndHashedPassword(login string, hashedPassword string) (*dto.User, error)
Update(user *dto.User) error
RemoveById(id string) error
GetUserArticles(userId string) ([]*dto.Article, error)
GetUserFocusingExercises(userId string) ([]*dto.FocusingExercise, error)
GetUserMeditationExercises(userId string) ([]*dto.MeditationExercise, error)
GetUserAchievements(userId string) ([]*dto.Achievement, error)
GetUserPreferences(userId string) ([]string, error)
AddUserPreference(userId string, preferenceName string) error
}
|
package main
import (
"context"
"time"
"fmt"
)
func main() {
//ctx, cancel := context.WithCancel(context.Background())
var t time.Time;
t=time.Now().Add(5*time.Second);
ctx,_:=context.WithDeadline(context.Background(),t);
//这里不是传地址
go watch(ctx,"【监控1】")
go watch(ctx,"【监控2】")
go watch(ctx,"【监控3】")
time.Sleep(100 * time.Second)
fmt.Println("可以了,通知监控停止")
//cancel()
//为了检测监控过是否停止,如果没有监控输出,就表示停止了
time.Sleep(2 * time.Second)
}
func watch(ctx context.Context, name string) {
//内存地址是不一样的
//fmt.Println(&ctx);
for {
select {
case <-ctx.Done():
fmt.Println(name,"监控退出,停止了...")
return
default:
fmt.Println(name,"goroutine监控中...")
time.Sleep(2 * time.Second)
}
}
}
//func watch2(ctx context.Context, name string) {
// for {
// select {
// case <-ctx.Done():
// fmt.Println(name, "watch2监控退出,停止了...")
// return
// default:
// fmt.Println(name, "watch2goroutine监控中...")
// time.Sleep(2 * time.Second)
// }
// }
//}
|
package txpool
import (
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/logging"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork"
"github.com/pkg/errors"
//"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/reqMsg"
//"encoding/json"
"encoding/json"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/reqMsg"
)
var TXPoolLog logging.LogModule
var TxPoolIns *TXPoolServer
const (
LOGTABLE_TXPOOL string = "txpool"
//HGS string = "hgs"
//HNB string = "hnb"
)
type TXPoolServer struct {
txpool *TxPool
}
func NewTXPoolServer() *TXPoolServer {
TXPoolLog = logging.GetLogIns()
s := &TXPoolServer{
NewTxPool(DefaultTxPoolConfig),
}
TxPoolIns = s
return s
}
func (tps *TXPoolServer) RecvTx(msg []byte, msgSender uint64) error {
if msg == nil {
errStr := fmt.Sprintf("msg is nil")
TXPoolLog.Warning(LOGTABLE_TXPOOL, errStr)
return errors.New(errStr)
}
tx := common.Transaction{}
err := json.Unmarshal(msg, &tx)
if err != nil {
TXPoolLog.Warning(LOGTABLE_TXPOOL, err.Error())
return err
}
if msgSender == 0 { //local
m := reqMsg.NewTxMsg(msg)
p2pNetwork.Xmit(m, false)
err = tps.txpool.AddLocal(&tx)
} else {
err = tps.txpool.AddRemote(&tx)
}
if err != nil {
return err
}
infoStr := fmt.Sprintf("recv tx %v", string(msg))
TXPoolLog.Debugf(LOGTABLE_TXPOOL, infoStr)
//msgTx := &RecvTxStruct{}
//if msgSender == 0{
// msgTx.local = true
//}else{
// msgTx.local = false
//}
//TXPoolLog.Debugf(LOGTABLE_TXPOOL, "recv msg local:%v", msgTx.local)
//
//msgTx.recvTx = msg
//tps.txpool.recvTx <- msgTx
return nil
}
func (tps *TXPoolServer) GetTxsFromTXPool(chainId string, count int) ([]*common.Transaction, error) {
txs, err := tps.txpool.Pending()
if err != nil {
return nil, err
}
var dstTxs []*common.Transaction
var dstCount int = 0
for _, addrTxs := range txs {
for _, v := range addrTxs {
dstTxs = append(dstTxs, v)
dstCount++
if dstCount == count {
return dstTxs, nil
}
}
}
return dstTxs, nil
}
func (tps *TXPoolServer) Start() {
p2pNetwork.RegisterTxNotify(tps.RecvTx)
}
func (tps *TXPoolServer) DelTxs(chainId string, txs []*common.Transaction) {
resChan := make(chan struct{})
dbt := &DelBlkTxs{}
dbt.res = resChan
dbt.txs = txs
tps.txpool.recvBlkTxs <- dbt
<-dbt.res
TXPoolLog.Infof(LOGTABLE_TXPOOL, "del txs ok")
}
func (tps *TXPoolServer) IsTxsLenZero(chainId string) bool {
pending, _ := tps.txpool.Pending()
return len(pending) == 0
}
func (tps *TXPoolServer) NotifyTx() chan struct{} {
return tps.txpool.GetNotify()
}
func (tps *TXPoolServer) GetPendingNonce(address common.Address) uint64 {
return tps.txpool.State().GetNonce(address)
}
func (tps *TXPoolServer) GetContent() (map[common.Address]common.Transactions, map[common.Address]common.Transactions) {
return tps.txpool.Content()
}
|
package main
import (
"encoding/json"
"time"
"github.com/tidusant/chadmin-repo/models"
"gopkg.in/mgo.v2/bson"
)
type Template struct {
ID bson.ObjectId `bson:"_id,omitempty"`
Code string `bson:"code"`
UserID string `bson:"userid"`
Status int `bson:"status"` //-2: delete, -1: reject, 1: approved and publish, 2: pending, 3: approved but not publish
Title string `bson:"title"`
Description string `bson:"description"`
Viewed int `bson:"viewed"`
InstalledIDs []string `bson:"installedid"`
ActiveIDs []string `bson:"activedid"`
Configs string `bson:"configs"`
Avatar string `bson:"avatar"`
Created time.Time `bson:"created"`
Modified time.Time `bson:"modified"`
Content string `bson:"content"`
Pages string `bson:"pages"`
CSS string `bson:"css"`
Script string `bson:"script"`
Images string `bson:"images"`
Fonts string `bson:"fonts"`
Screenshot string `bson:"screenshot"`
Langs string `bson:"langs"`
}
type RequestResult struct {
Status string `json:"status"`
Error string `json:"error"`
Message string `json:"message"`
Data json.RawMessage `json:"data"`
}
type ViewData struct {
PageName string
Siteurl string
Data map[string]json.RawMessage
TemplatePath string
Templateurl string
Imageurl string
Pages map[string]string
Resources map[string]string
Configs map[string]string
}
type NewsCat struct {
Code string `bson:"code"`
Slug string `bson:"slug"`
Title string `bson:"title"`
Description string `bson:"description"`
Content string `bson:"content"`
}
//NewsLang ...
type News struct {
CatId string `bson:"catid"`
Title string `bson:"title"`
Slug string `bson:"slug"`
Content string `bson:"content"`
Description string `bson:"description"`
Avatar string `bson:"avatar"`
Viewed int `bson:"viewed"`
}
type ProdCat struct {
Code string `bson:"code"`
Slug string `bson:"slug"`
Title string `bson:"name"`
Description string `bson:"description"`
Content string `bson:"conent"`
}
type Product struct {
CatId string `bson:"catid"`
Name string `bson:"name"`
Slug string `bson:"slug"`
MaxPrice int
MinPrice int
BasePrice int `bson:"baseprice"`
DiscountPrice int `bson:"discountprice"`
PercentDiscount int `bson:"percentdiscount"`
Description string `bson:"description"`
Content string `bson:"content"`
Avatar string `bson:"avatar"`
Images []string `bson:"images"`
Viewed int `bson:"viewed"`
CatTitle string
CatSlug string
Properties []models.ProductProperty
NumInCart int
}
|
package service
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
"github.com/go-ocf/cloud/cloud2cloud-connector/events"
oapiStore "github.com/go-ocf/cloud/cloud2cloud-connector/store"
"github.com/go-ocf/cloud/cloud2cloud-gateway/store"
"github.com/go-ocf/kit/codec/json"
kitNetGrpc "github.com/go-ocf/kit/net/grpc"
"github.com/gofrs/uuid"
pbAS "github.com/go-ocf/cloud/authorization/pb"
cqrsRA "github.com/go-ocf/cloud/resource-aggregate/cqrs"
pbRA "github.com/go-ocf/cloud/resource-aggregate/pb"
"github.com/gorilla/mux"
)
func (rh *RequestHandler) IsAuthorized(ctx context.Context, r *http.Request, deviceID string) error {
token, err := getAccessToken(r)
if err != nil {
return fmt.Errorf("cannot authorized: cannot get users devices: %w", err)
}
getUserDevicesClient, err := rh.asClient.GetUserDevices(kitNetGrpc.CtxWithToken(ctx, token), &pbAS.GetUserDevicesRequest{
DeviceIdsFilter: []string{deviceID},
})
if err != nil {
return fmt.Errorf("cannot authorized: cannot get users devices: %w", err)
}
defer getUserDevicesClient.CloseSend()
for {
userDevice, err := getUserDevicesClient.Recv()
if err == io.EOF {
break
}
if err != nil {
return fmt.Errorf("cannot authorized: cannot get users devices: %w", err)
}
if userDevice.DeviceId == deviceID {
return nil
}
}
return fmt.Errorf("cannot authorized: access denied")
}
type SubscriptionResponse struct {
SubscriptionID string `json:"subscriptionId"`
}
func (rh *RequestHandler) makeSubscription(w http.ResponseWriter, r *http.Request, typ oapiStore.Type, userID string, validEventTypes []events.EventType) (store.Subscription, int, error) {
var res store.Subscription
var req events.SubscriptionRequest
err := json.ReadFrom(r.Body, &req)
if err != nil {
return res, http.StatusBadRequest, fmt.Errorf("cannot decode request body: %w", err)
}
_, err = url.Parse(req.URL)
if err != nil {
return res, http.StatusBadRequest, fmt.Errorf("invalid eventsurl(%v)", err)
}
eventTypes := make([]events.EventType, 0, 10)
for _, r := range req.EventTypes {
ev := events.EventType(r)
for _, v := range validEventTypes {
if ev == v {
eventTypes = append(eventTypes, ev)
}
}
}
if len(eventTypes) == 0 {
return res, http.StatusBadRequest, fmt.Errorf("invalid eventtypes(%v)", err)
}
res.ID = uuid.Must(uuid.NewV4()).String()
res.EventTypes = eventTypes
res.URL = req.URL
res.CorrelationID = r.Header.Get(events.CorrelationIDKey)
res.ContentType = r.Header.Get(events.ContentTypeKey)
res.UserID = userID
res.SigningSecret = req.SigningSecret
res.Type = typ
return res, http.StatusOK, nil
}
func (rh *RequestHandler) subscribeToResource(w http.ResponseWriter, r *http.Request) (int, error) {
routeVars := mux.Vars(r)
deviceID := routeVars[deviceIDKey]
href := routeVars[resourceLinkHrefKey]
err := rh.IsAuthorized(r.Context(), r, deviceID)
if err != nil {
return http.StatusUnauthorized, err
}
_, userID, err := parseAuth(r.Header.Get("Authorization"))
if err != nil {
return http.StatusBadRequest, fmt.Errorf("cannot parse authorization header: %w", err)
}
s, code, err := rh.makeSubscription(w, r, oapiStore.Type_Resource, userID, []events.EventType{events.EventType_ResourceChanged})
if err != nil {
return code, err
}
_, err = rh.resourceProjection.Register(r.Context(), deviceID)
if err != nil {
return http.StatusBadRequest, fmt.Errorf("cannot register to resource projection: %w", err)
}
resourceID := cqrsRA.MakeResourceId(deviceID, href)
models := rh.resourceProjection.Models(deviceID, resourceID)
if len(models) == 0 {
err = rh.resourceProjection.ForceUpdate(r.Context(), deviceID, resourceID)
if err != nil {
rh.resourceProjection.Unregister(deviceID)
return http.StatusBadRequest, fmt.Errorf("cannot load resource: %w", err)
}
}
s.DeviceID = deviceID
s.Href = href
err = rh.store.SaveSubscription(r.Context(), s)
if err != nil {
return http.StatusBadRequest, fmt.Errorf("cannot save subscription: %w", err)
}
models = rh.resourceProjection.Models(deviceID, resourceID)
for _, m := range models {
resourceCtx := m.(*resourceCtx).Clone()
if resourceCtx.content.GetStatus() != pbRA.Status_OK && resourceCtx.content.GetStatus() != pbRA.Status_UNKNOWN {
rh.store.PopSubscription(r.Context(), s.ID)
rh.resourceProjection.Unregister(deviceID)
return statusToHttpStatus(resourceCtx.content.GetStatus()), fmt.Errorf("cannot prepare content to emit first event: %w", err)
}
rep, err := unmarshalContent(resourceCtx.content.GetContent())
if err != nil {
rh.store.PopSubscription(r.Context(), s.ID)
rh.resourceProjection.Unregister(deviceID)
return http.StatusBadRequest, fmt.Errorf("cannot prepare content to emit first event: %w", err)
}
_, err = emitEvent(r.Context(), events.EventType_ResourceChanged, s, rh.store.IncrementSubscriptionSequenceNumber, rep)
if err != nil {
rh.store.PopSubscription(r.Context(), s.ID)
rh.resourceProjection.Unregister(deviceID)
return http.StatusBadRequest, fmt.Errorf("cannot emit event: %w", err)
}
}
err = jsonResponseWriterEncoder(w, SubscriptionResponse{
SubscriptionID: s.ID,
}, http.StatusCreated)
if err != nil {
rh.store.PopSubscription(r.Context(), s.ID)
rh.resourceProjection.Unregister(deviceID)
return http.StatusBadRequest, fmt.Errorf("cannot write response: %w", err)
}
return http.StatusOK, nil
}
func (rh *RequestHandler) SubscribeToResource(w http.ResponseWriter, r *http.Request) {
statusCode, err := rh.subscribeToResource(w, r)
if err != nil {
logAndWriteErrorResponse(fmt.Errorf("cannot subscribe to resource: %w", err), statusCode, w)
}
}
|
// Package activitystream provides an interface to implement an activitystream.
// Further it contains a default implementation using Redis.
//
// Definition ActivityStream
// An ActivityStream is a list of Activities sorted by time of insertion (LIFO)
//
// By this definition an ActivityStream is a list, not a set. Therefore elements that are inserted multiple times
// will also appear multiple times in the stream.
package activitystream
import (
"github.com/garyburd/redigo/redis"
)
// DefaultMaxStreamSize is the number of elements a stream can store by default.
// This number can be adjusted on the ActivityStream using its method SetMaxStreamSize.
const DefaultMaxStreamSize = 50
// Direction represents the direction a pagination token is going
type Direction bool
const (
// After Direction indicates in pagination that next page comes After a certain element
After Direction = true
// After Direction indicates in pagination that next page comes Before a certain element
Before Direction = false
)
var ErrEmpty = redis.ErrNil
// ActivityStream interface defines functionality to implement an activity stream. An activity can be stored and added
// to a stream. A stream is always sorted with the newest (last insterted) element on top.
type ActivityStream interface {
// Init initializes the ActivityStream, arguments are defined by specific implementation
Init(args ...string)
// SetMaxStreamSize will set the maximum number of elements of a stream to the specified number.
// A negative number means there is no limit, the streams will keep growing.
// Important: Decreasing this number will
// 1. not affect existing streams unless a new element is added.
// 2. by adding a new element to an existing stream, the stream will be cut down to the new maximum
SetMaxStreamSize(maxStreamSize int)
// Get returns a single Activity by its ID
Get(id string) (activity Activity, err error)
// BulkGet returns an array of Activity by their IDs
BulkGet(id ...string) ([]Activity, error)
// Store stores a single Activity in the database
// This method is idempotent since the Activity is identified by its ID.
Store(activity Activity) error
// GetStream returns an array of Activity belonging to a certain stream. First element is last inserted.
// The stream is identified by its ID.
// Pagination is provided as follow:
// limit the size of the page
// pivotID the last received ID, this element will not be included in the result
// direction the direction from pivotID, the page starts either After the pivot or Before the pivot
GetStream(streamId string, limit int, pivotID int, direction Direction) ([]Activity, error)
// AddToStreams adds a certain activity to one or more streams. The streams are identified by their IDs
// Important: This will also write the activity to database, a call to the method 'Store' would be duplicate
AddToStreams(activity Activity, streamIds ...string) []error
}
|
package main
func combine(n int, k int) [][]int {
res := [][]int{}
cur := []int{}
combination(n, k, 1, cur, &res)
return res
}
func combination(n int, k int, start int, cur []int, res *[][]int) {
if k == len(cur) {
temp := make([]int, len(cur))
copy(temp, cur)
*res = append(*res, temp)
return
}
for i := start; i <= n-(k-len(cur))+1; i++ { //这里i的取值上限这么定义是因为:每次开始的位置不能重复,即每个数字不能重复使用
cur = append(cur, i)
combination(n, k, i+1, cur, res) //i+1,因为不能包括自身,对比problem39
cur = cur[:len(cur)-1]
}
return
}
|
package script
import "reflect"
func GoTypeOf(value Value) reflect.Type {
return reflect.TypeOf(GoValueOf(value))
}
func GoValueOf(value Value) interface{} {
switch value.(type) {
case Int:
return int(0)
case String:
return string("")
case Bool:
return bool(false)
default:
return nil
}
}
|
package router
import "context"
type Handler interface {
ServeGRPC(ctx context.Context, request interface{}) (context.Context, interface{}, error)
}
type grpcHandler struct {
endpoint Endpoint
decode DecodeGrpcRequestFunc
encode EncodeGrpcResponseFunc
}
func NewGrpcHandler(endpoint Endpoint, decode DecodeGrpcRequestFunc, encode EncodeGrpcResponseFunc) *grpcHandler {
return &grpcHandler{
endpoint: endpoint,
decode: decode,
encode: encode,
}
}
func (g *grpcHandler) ServeGRPC(ctx context.Context, req interface{}) (context.Context, interface{}, error) {
request, err := g.decode(ctx, req)
if err != nil {
return ctx, nil, err
}
response, err := g.endpoint(ctx, request)
if err != nil {
return ctx, nil, err
}
grpcResp, err := g.encode(ctx, response)
if err != nil {
return ctx, nil, err
}
return ctx, grpcResp, nil
}
type DecodeGrpcRequestFunc func(context.Context, interface{}) (request interface{}, err error)
type EncodeGrpcResponseFunc func(context.Context, interface{}) (response interface{}, err error)
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Package controller contains the implementation of the Controller CRD reconcile function
package controller
import (
"context"
"reflect"
"strconv"
"strings"
"time"
"github.com/google/uuid"
rocketmqv1alpha1 "github.com/apache/rocketmq-operator/pkg/apis/rocketmq/v1alpha1"
cons "github.com/apache/rocketmq-operator/pkg/constants"
"github.com/apache/rocketmq-operator/pkg/share"
"github.com/apache/rocketmq-operator/pkg/tool"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
var log = logf.Log.WithName("dledger_controller")
// SetupWithManager creates a new Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func SetupWithManager(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
}
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
return &ReconcileController{client: mgr.GetClient(), scheme: mgr.GetScheme()}
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
// Create a new controller
c, err := controller.New("dledger-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to primary resource Controller
err = c.Watch(&source.Kind{Type: &rocketmqv1alpha1.Controller{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
// TODO(user): Modify this to be the types you create that are owned by the primary resource
// Watch for changes to secondary resource Pods and requeue the owner Controller
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{
IsController: true,
OwnerType: &rocketmqv1alpha1.Controller{},
})
if err != nil {
return err
}
return nil
}
//+kubebuilder:rbac:groups=rocketmq.apache.org,resources=controllers,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=rocketmq.apache.org,resources=controllers/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=rocketmq.apache.org,resources=controllers/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods/exec,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="apps",resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
// ReconcileController reconciles a Controller object
type ReconcileController struct {
// This client, initialized using mgr.Client() above, is a split client
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
}
// Reconcile reads that state of the cluster for a Controller object and makes changes based on the state read
// and what is in the Controller.Spec
// Note:
// The Controller will requeue the Request to be processed again if the returned error is non-nil or
// Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
func (r *ReconcileController) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name)
reqLogger.Info("Reconciling Controller.")
// Fetch the Controller instance
controller := &rocketmqv1alpha1.Controller{}
err := r.client.Get(context.TODO(), request.NamespacedName, controller)
if err != nil {
if errors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.
// Return and don't requeue
reqLogger.Info("Controller resource not found. Ignoring since object must be deleted.")
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
reqLogger.Error(err, "Failed to get Controller.")
return reconcile.Result{RequeueAfter: time.Duration(cons.RequeueIntervalInSecond) * time.Second}, err
}
//create headless svc
headlessSvc := &corev1.Service{}
err = r.client.Get(context.TODO(), types.NamespacedName{Name: tool.BuildHeadlessSvcResourceName(request.Name), Namespace: request.Namespace}, headlessSvc)
if err != nil {
if errors.IsNotFound(err) {
// create;
consoleSvc := r.generateHeadlessSvc(controller)
err = r.client.Create(context.TODO(), consoleSvc)
if err != nil {
reqLogger.Error(err, "Failed to create controller headless svc")
return reconcile.Result{}, err
} else {
reqLogger.Info("Successfully create controller headless svc")
}
} else {
return reconcile.Result{}, err
}
}
sts := r.getControllerStatefulSet(controller)
// Check if the statefulSet already exists, if not create a new one
found := &appsv1.StatefulSet{}
err = r.client.Get(context.TODO(), types.NamespacedName{Name: sts.Name, Namespace: sts.Namespace}, found)
if err != nil && errors.IsNotFound(err) {
reqLogger.Info("Creating a new Controller StatefulSet.", "StatefulSet.Namespace", sts.Namespace, "StatefulSet.Name", sts.Name)
err = r.client.Create(context.TODO(), sts)
if err != nil {
reqLogger.Error(err, "Failed to create new Controller StatefulSet", "StatefulSet.Namespace", sts.Namespace, "StatefulSet.Name", sts.Name)
}
} else if err != nil {
reqLogger.Error(err, "Failed to list Controller StatefulSet.")
}
// List the pods for this controller's statefulSet
podList := &corev1.PodList{}
labelSelector := labels.SelectorFromSet(labelsForController(controller.Name))
listOps := &client.ListOptions{
Namespace: controller.Namespace,
LabelSelector: labelSelector,
}
err = r.client.List(context.TODO(), podList, listOps)
if err != nil {
reqLogger.Error(err, "Failed to list pods.", "Controller.Namespace", controller.Namespace, "Controller.Name", controller.Name)
return reconcile.Result{}, err
}
podNames := getPodNames(podList.Items)
log.Info("controller.Status.Nodes length = " + strconv.Itoa(len(controller.Status.Nodes)))
log.Info("podNames length = " + strconv.Itoa(len(podNames)))
// Ensure every pod is in running phase
for _, pod := range podList.Items {
if !reflect.DeepEqual(pod.Status.Phase, corev1.PodRunning) {
log.Info("pod " + pod.Name + " phase is " + string(pod.Status.Phase) + ", wait for a moment...")
}
}
// Update status.Size if needed
if controller.Spec.Size != controller.Status.Size {
log.Info("controller.Status.Size = " + strconv.Itoa(controller.Status.Size))
log.Info("controller.Spec.Size = " + strconv.Itoa(controller.Spec.Size))
controller.Status.Size = controller.Spec.Size
err = r.client.Status().Update(context.TODO(), controller)
if err != nil {
reqLogger.Error(err, "Failed to update Controller Size status.")
}
}
// Update status.Nodes if needed
if !reflect.DeepEqual(podNames, controller.Status.Nodes) {
controller.Status.Nodes = podNames
err = r.client.Status().Update(context.TODO(), controller)
if err != nil {
reqLogger.Error(err, "Failed to update Controller Nodes status.")
}
}
//create svc
controllerSvc := &corev1.Service{}
controllerSvcName := tool.BuildSvcResourceName(request.Name)
err = r.client.Get(context.TODO(), types.NamespacedName{Name: controllerSvcName, Namespace: request.Namespace}, controllerSvc)
if err != nil {
if errors.IsNotFound(err) {
// create;
svcToCreate := r.generateSvc(controller)
err = r.client.Create(context.TODO(), svcToCreate)
if err != nil {
reqLogger.Error(err, "Failed to create controller svc")
return reconcile.Result{}, err
} else {
reqLogger.Info("Successfully create controller svc")
}
} else {
return reconcile.Result{}, err
}
}
share.ControllerAccessPoint = controllerSvcName + ":9878"
return reconcile.Result{Requeue: true, RequeueAfter: time.Duration(cons.RequeueIntervalInSecond) * time.Second}, nil
}
// returns a controller StatefulSet object
func (r *ReconcileController) getControllerStatefulSet(controller *rocketmqv1alpha1.Controller) *appsv1.StatefulSet {
ls := labelsForController(controller.Name)
// After CustomResourceDefinition version upgraded from v1beta1 to v1
// `controller.spec.VolumeClaimTemplates.metadata` declared in yaml will not be stored by kubernetes.
// Here is a temporary repair method: to generate a random name
if strings.EqualFold(controller.Spec.VolumeClaimTemplates[0].Name, "") {
controller.Spec.VolumeClaimTemplates[0].Name = uuid.New().String()
}
var replica = int32(controller.Spec.Size)
dep := &appsv1.StatefulSet{
ObjectMeta: metav1.ObjectMeta{
Name: controller.Name,
Namespace: controller.Namespace,
},
Spec: appsv1.StatefulSetSpec{
ServiceName: tool.BuildHeadlessSvcResourceName(controller.Name),
Replicas: &replica,
Selector: &metav1.LabelSelector{
MatchLabels: ls,
},
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: ls,
},
Spec: corev1.PodSpec{
ServiceAccountName: controller.Spec.ServiceAccountName,
Affinity: controller.Spec.Affinity,
Tolerations: controller.Spec.Tolerations,
NodeSelector: controller.Spec.NodeSelector,
PriorityClassName: controller.Spec.PriorityClassName,
ImagePullSecrets: controller.Spec.ImagePullSecrets,
Containers: []corev1.Container{{
Resources: controller.Spec.Resources,
Image: controller.Spec.ControllerImage,
Name: cons.ControllerContainerName,
SecurityContext: getContainerSecurityContext(controller),
ImagePullPolicy: controller.Spec.ImagePullPolicy,
Env: getENV(controller),
VolumeMounts: []corev1.VolumeMount{{
MountPath: cons.LogMountPath,
Name: controller.Spec.VolumeClaimTemplates[0].Name,
SubPath: cons.LogSubPathName,
}, {
MountPath: cons.StoreMountPath,
Name: controller.Spec.VolumeClaimTemplates[0].Name,
SubPath: cons.StoreSubPathName,
}},
// Command: []string{"sh", "mqcontroller"},
}},
Volumes: getVolumes(controller),
SecurityContext: getPodSecurityContext(controller),
},
},
VolumeClaimTemplates: getVolumeClaimTemplates(controller),
},
}
// Set Controller instance as the owner and controller
controllerutil.SetControllerReference(controller, dep, r.scheme)
return dep
}
func getENV(controller *rocketmqv1alpha1.Controller) []corev1.EnvVar {
var controllerDLegerPeersStr string
for controllerIndex := 0; controllerIndex < int(controller.Spec.Size); controllerIndex++ {
controllerDLegerPeersStr += controller.Name + strconv.Itoa(controllerIndex) + "-" + controller.Name + "-" + strconv.Itoa(controllerIndex) + "." + tool.BuildHeadlessSvcResourceName(controller.Name) + ":9878"
if controllerIndex < int(controller.Spec.Size)-1 {
controllerDLegerPeersStr += ";"
}
}
log.Info("controllerDLegerPeersStr=" + controllerDLegerPeersStr)
envs := []corev1.EnvVar{{
Name: "MY_POD_NAME",
ValueFrom: &corev1.EnvVarSource{FieldRef: &corev1.ObjectFieldSelector{FieldPath: "metadata.name"}},
}, {
Name: cons.EnvControllerDLegerGroup,
Value: "ControllerGroup-" + controller.Name,
}, {
Name: cons.EnvControllerDLegerPeers,
Value: controllerDLegerPeersStr,
}, {
Name: cons.EnvControllerStorePath,
Value: cons.StoreMountPath,
}}
envs = append(envs, controller.Spec.Env...)
return envs
}
func getVolumeClaimTemplates(controller *rocketmqv1alpha1.Controller) []corev1.PersistentVolumeClaim {
switch controller.Spec.StorageMode {
case cons.StorageModeStorageClass:
return controller.Spec.VolumeClaimTemplates
case cons.StorageModeEmptyDir, cons.StorageModeHostPath:
fallthrough
default:
return nil
}
}
func getPodSecurityContext(controller *rocketmqv1alpha1.Controller) *corev1.PodSecurityContext {
var securityContext = corev1.PodSecurityContext{}
if controller.Spec.PodSecurityContext != nil {
securityContext = *controller.Spec.PodSecurityContext
}
return &securityContext
}
func getContainerSecurityContext(controller *rocketmqv1alpha1.Controller) *corev1.SecurityContext {
var securityContext = corev1.SecurityContext{}
if controller.Spec.ContainerSecurityContext != nil {
securityContext = *controller.Spec.ContainerSecurityContext
}
return &securityContext
}
func getVolumes(controller *rocketmqv1alpha1.Controller) []corev1.Volume {
switch controller.Spec.StorageMode {
case cons.StorageModeStorageClass:
return nil
case cons.StorageModeEmptyDir:
volumes := []corev1.Volume{{
Name: controller.Spec.VolumeClaimTemplates[0].Name,
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{}},
}}
return volumes
case cons.StorageModeHostPath:
fallthrough
default:
volumes := []corev1.Volume{{
Name: controller.Spec.VolumeClaimTemplates[0].Name,
VolumeSource: corev1.VolumeSource{
HostPath: &corev1.HostPathVolumeSource{
Path: controller.Spec.HostPath,
}},
}}
return volumes
}
}
// labelsForController returns the labels for selecting the resources
// belonging to the given controller CR name.
func labelsForController(name string) map[string]string {
return map[string]string{"app": "controller", "controller_cr": name}
}
// getPodNames returns the pod names of the array of pods passed in
func getPodNames(pods []corev1.Pod) []string {
var podNames []string
for _, pod := range pods {
podNames = append(podNames, pod.Name)
}
return podNames
}
func (r *ReconcileController) generateHeadlessSvc(cr *rocketmqv1alpha1.Controller) *corev1.Service {
controllerSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: cr.Namespace,
Name: tool.BuildHeadlessSvcResourceName(cr.Name),
Annotations: map[string]string{"service.alpha.kubernetes.io/tolerate-unready-endpoints": "true"},
Labels: cr.Labels,
//Finalizers: []string{metav1.FinalizerOrphanDependents},
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
PublishNotReadyAddresses: true,
Selector: labelsForController(cr.Name),
Ports: []corev1.ServicePort{
{
Name: "controller",
Port: 9878,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(9878),
},
},
},
}
controllerutil.SetControllerReference(cr, controllerSvc, r.scheme)
return controllerSvc
}
func (r *ReconcileController) generateSvc(cr *rocketmqv1alpha1.Controller) *corev1.Service {
controllerSvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Namespace: cr.Namespace,
Name: tool.BuildSvcResourceName(cr.Name),
Labels: labelsForController(cr.Name),
Finalizers: []string{metav1.FinalizerOrphanDependents},
},
Spec: corev1.ServiceSpec{
Selector: labelsForController(cr.Name),
Ports: []corev1.ServicePort{
{
Name: "controller",
Port: 9878,
Protocol: corev1.ProtocolTCP,
TargetPort: intstr.FromInt(9878),
},
},
},
}
controllerutil.SetControllerReference(cr, controllerSvc, r.scheme)
return controllerSvc
}
|
package command
import (
"errors"
"github.com/opsgenie/opsgenie-go-sdk-v2/alert"
gcli "github.com/urfave/cli"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
)
func NewAlertClient(c *gcli.Context) (*alert.Client, error) {
alertCli, cliErr := alert.NewClient(getConfigurations(c))
if cliErr != nil {
message := "Can not create the alert client. " + cliErr.Error()
printMessage(ERROR,message)
return nil, errors.New(message)
}
printMessage(DEBUG,"Alert Client created.")
return alertCli, nil
}
// CreateAlertAction creates an alert at Opsgenie.
func CreateAlertAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.CreateAlertRequest{}
if val, success := getVal("message", c); success {
req.Message = val
}
responders := generateResponders(c, alert.TeamResponder, "teams")
responders = append(responders, generateResponders(c, alert.UserResponder, "users")...)
responders = append(responders, generateResponders(c, alert.EscalationResponder, "escalations")...)
responders = append(responders, generateResponders(c, alert.ScheduleResponder, "schedules")...)
req.Responders = responders
if val, success := getVal("alias", c); success {
req.Alias = val
}
if val, success := getVal("actions", c); success {
req.Actions = strings.Split(val, ",")
}
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("tags", c); success {
req.Tags = strings.Split(val, ",")
}
if val, success := getVal("description", c); success {
req.Description = val
}
if val, success := getVal("entity", c); success {
req.Entity = val
}
if val, success := getVal("priority", c); success {
req.Priority = alert.Priority(val)
}
req.User = grabUsername(c)
if val, success := getVal("note", c); success {
req.Note = val
}
if c.IsSet("D") {
req.Details = extractDetailsFromCommand(c)
}
printMessage(DEBUG,"Create alert request prepared from flags, sending request to Opsgenie...")
resp, err := cli.Create(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Alert will be created.")
printMessage(INFO, resp.RequestId)
}
func generateResponders(c *gcli.Context, responderType alert.ResponderType, parameter string) []alert.Responder {
if val, success := getVal(parameter, c); success {
responderNames := strings.Split(val, ",")
var responders []alert.Responder
for _, name := range responderNames {
responders = append(responders, alert.Responder{
Name: name,
Username: name,
Type: responderType,
})
}
return responders
}
return nil
}
func extractDetailsFromCommand(c *gcli.Context) map[string]string {
details := make(map[string]string)
extraProps := c.StringSlice("D")
for i := 0; i < len(extraProps); i++ {
prop := extraProps[i]
if !isEmpty("D", prop, c) && strings.Contains(prop, "=") {
p := strings.Split(prop, "=")
details[p[0]] = strings.Join(p[1:], "=")
} else {
printMessage(ERROR, "Dynamic parameters should have the value of the form a=b, but got: " + prop + "\n")
gcli.ShowCommandHelp(c, c.Command.Name)
os.Exit(1)
}
}
return details
}
// GetAlertAction retrieves specified alert details from Opsgenie.
func GetAlertAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.GetAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
printMessage(DEBUG,"Get alert request prepared from flags, sending request to Opsgenie...")
resp, err := cli.Get(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"Got Alert successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
}
}
// AttachFileAction attaches a file to an alert at Opsgenie.
func AttachFileAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.CreateAlertAttachmentRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("filePath", c); success {
req.FilePath = val
}
if val, success := getVal("fileName", c); success {
req.FileName = val
}
if val, success := getVal("indexFile", c); success {
req.IndexFile = val
}
req.User = grabUsername(c)
printMessage(DEBUG,"Attach request prepared from flags, sending request to Opsgenie..")
response, err := cli.CreateAlertAttachments(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"File attached to alert successfully.")
printMessage(INFO, "Result : " + response.Result + "\n")
}
// GetAttachmentAction retrieves a download link to specified alert attachment
func GetAttachmentAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.GetAttachmentRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("attachmentId", c); success {
req.AttachmentId = val
}
printMessage(DEBUG,"Get alert attachment request prepared from flags, sending request to Opsgenie..")
resp, err := cli.GetAlertAttachment(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Got Alert Attachment successfully, and will print download link.")
printMessage(INFO, "Download Link: " + resp.Url)
}
// DownloadAttachmentAction downloads the attachment specified with attachmentId for given alert
func DownloadAttachmentAction(c *gcli.Context) {
var destinationPath string
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.GetAttachmentRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("attachmentId", c); success {
req.AttachmentId = val
}
if val, success := getVal("destinationPath", c); success {
destinationPath = val
}
printMessage(DEBUG,"Download alert attachment request prepared from flags, sending request to Opsgenie..")
resp, err := cli.GetAlertAttachment(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
fileName := resp.Name
downloadLink := resp.Url
var output *os.File
if destinationPath != "" {
output, err = os.Create(destinationPath + "/" + fileName)
} else {
output, err = os.Create(fileName)
}
if err != nil {
printMessage(ERROR, "Error while creating " + fileName + "-" + err.Error())
return
}
defer output.Close()
response, err := http.Get(downloadLink)
if err != nil {
printMessage(ERROR, "Error while downloading " + fileName + "-" + err.Error())
return
}
defer response.Body.Close()
_, err = io.Copy(output, response.Body)
if err != nil {
printMessage(ERROR,"Error while downloading " + fileName + " - " + err.Error())
return
}
}
// ListAlertAttachmentsAction returns a list of attachment meta information for specified alert
func ListAlertAttachmentsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.ListAttachmentsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
printMessage(DEBUG,"List alert attachments request prepared from flags, sending request to Opsgenie..")
resp, err := cli.ListAlertsAttachments(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"List Alert Attachment successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp.Attachment)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp.Attachment, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
}
}
// DeleteAlertAttachmentAction deletes the specified alert attachment from alert
func DeleteAlertAttachmentAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.DeleteAttachmentRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("attachmentId", c); success {
req.AttachmentId = val
}
printMessage(DEBUG,"Delete alert attachment request prepared from flags, sending request to OpsGenie..")
resp, err := cli.DeleteAlertAttachment(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Alert attachment will be deleted. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
printMessage(INFO,"Result: " + resp.Result)
}
// AcknowledgeAction acknowledges an alert at Opsgenie.
func AcknowledgeAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AcknowledgeAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Acknowledge alert request prepared from flags, sending request to Opsgenie..")
resp, err := cli.Acknowledge(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Acknowledge request will be processed. RequestID " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// AssignOwnerAction assigns the specified user as the owner of the alert at Opsgenie.
func AssignOwnerAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AssignRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("owner", c); success {
req.Owner = alert.User{Username: val}
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Assign ownership request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AssignAlert(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Ownership assignment request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// AddTeamAction adds a team to an alert at Opsgenie.
func AddTeamAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AddTeamRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("team", c); success {
req.Team = alert.Team{Name: val}
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Add team request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AddTeam(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Add team request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// AddResponderAction adds responder to an alert at Opsgenie.
func AddResponderAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AddResponderRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if valType, success := getVal("type", c); success {
if val, success := getVal("responder", c); success {
req.Responder = alert.Responder{
Type: alert.ResponderType(valType),
Name: val,
Username: val,
}
}
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Add responder request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AddResponder(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Add responder request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// AddTagsAction adds tags to an alert at Opsgenie.
func AddTagsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AddTagsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("tags", c); success {
req.Tags = strings.Split(val, ",")
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Add tag request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AddTags(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Add tags request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// AddNoteAction adds a note to an alert at Opsgenie.
func AddNoteAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AddNoteRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Add note request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AddNote(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Add note request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// ExecuteActionAction executes a custom action on an alert at Opsgenie.
func ExecuteActionAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.ExecuteCustomActionAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if action, success := getVal("action", c); success {
req.Action = action
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Execute action request prepared from flags, sending request to Opsgenie..")
resp, err := cli.ExecuteCustomAction(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Execute custom action request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// CloseAlertAction closes an alert at Opsgenie.
func CloseAlertAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.CloseAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Close alert request prepared from flags, sending request to Opsgenie..")
resp, err := cli.Close(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Alert will be closed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// DeleteAlertAction deletes an alert at Opsgenie.
func DeleteAlertAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.DeleteAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("source", c); success {
req.Source = val
}
printMessage(DEBUG,"Delete alert request prepared from flags, sending request to Opsgenie..")
resp, err := cli.Delete(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Alert will be deleted. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// ListAlertsAction retrieves alert details from Opsgenie.
func ListAlertsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := generateListAlertRequest(c)
printMessage(DEBUG,"List alerts request prepared from flags, sending request to Opsgenie..")
resp, err := cli.List(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"Got Alerts successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp.Alerts)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp.Alerts, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO,output)
}
}
func generateListAlertRequest(c *gcli.Context) alert.ListAlertRequest {
req := alert.ListAlertRequest{}
if val, success := getVal("limit", c); success {
limit, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
req.Limit = int(limit)
}
if val, success := getVal("sort", c); success {
req.Sort = alert.SortField(val)
}
if val, success := getVal("order", c); success {
req.Order = alert.Order(val)
}
if val, success := getVal("searchIdentifier", c); success {
req.SearchIdentifier = val
}
if val, success := getVal("searchIdentifierType", c); success {
if alert.SearchIdentifierType(val) == alert.NAME {
req.SearchIdentifierType = alert.NAME
} else {
req.SearchIdentifierType = alert.ID
}
}
if val, success := getVal("offset", c); success {
offset, err := strconv.Atoi(val)
if err != nil {
os.Exit(2)
}
req.Offset = offset
}
if val, success := getVal("query", c); success {
req.Query = val
} else {
generateQueryUsingOldStyleParams(c, &req)
}
return req
}
func generateQueryUsingOldStyleParams(c *gcli.Context, req *alert.ListAlertRequest) {
var queries []string
if val, success := getVal("createdAfter", c); success {
createdAfter, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
queries = append(queries, "createdAt > "+strconv.FormatUint(createdAfter, 10))
}
if val, success := getVal("createdBefore", c); success {
createdBefore, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
queries = append(queries, "createdAt < "+strconv.FormatUint(createdBefore, 10))
}
if val, success := getVal("updatedAfter", c); success {
updatedAfter, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
queries = append(queries, "updatedAt > "+strconv.FormatUint(updatedAfter, 10))
}
if val, success := getVal("updatedBefore", c); success {
updatedBefore, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
queries = append(queries, "updatedAt < "+strconv.FormatUint(updatedBefore, 10))
}
if val, success := getVal("status", c); success {
queries = append(queries, "status: "+val)
}
if val, success := getVal("teams", c); success {
for _, teamName := range strings.Split(val, ",") {
queries = append(queries, "teams: "+teamName)
}
}
if val, success := getVal("tags", c); success {
var tags []string
operator := "AND"
if val, success := getVal("tagsOperator", c); success {
operator = val
}
for _, tag := range strings.Split(val, ",") {
tags = append(tags, tag)
}
tagsPart := "tag: (" + strings.Join(tags, " "+operator+" ") + ")"
queries = append(queries, tagsPart)
}
if len(queries) != 0 {
req.Query = strings.Join(queries, " AND ")
}
}
// CountAlertsAction retrieves number of alerts from Opsgenie.
func CountAlertsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := generateListAlertRequest(c)
printMessage(DEBUG,"Count alerts request prepared from flags, sending request to Opsgenie..")
resp, err := cli.List(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, string(len(resp.Alerts)))
}
// ListAlertNotesAction retrieves specified alert notes from Opsgenie.
func ListAlertNotesAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.ListAlertNotesRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("limit", c); success {
limit, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
req.Limit = uint32(limit)
}
if val, success := getVal("order", c); success {
req.Order = alert.Order(val)
}
if val, success := getVal("direction", c); success {
req.Direction = alert.RequestDirection(val)
}
if val, success := getVal("offset", c); success {
req.Offset = val
}
printMessage(DEBUG,"List alert notes request prepared from flags, sending request to Opsgenie..")
resp, err := cli.ListAlertNotes(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"Alert notes listed successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp.AlertLog)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp.AlertLog, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
}
}
// ListAlertLogsAction retrieves specified alert logs from Opsgenie.
func ListAlertLogsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.ListAlertLogsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("limit", c); success {
limit, err := strconv.ParseUint(val, 10, 64)
if err != nil {
os.Exit(2)
}
req.Limit = uint32(limit)
}
if val, success := getVal("order", c); success {
req.Order = alert.Order(val)
}
if val, success := getVal("direction", c); success {
req.Direction = alert.RequestDirection(val)
}
if val, success := getVal("offset", c); success {
req.Offset = val
}
printMessage(DEBUG,"List alert notes request prepared from flags, sending request to Opsgenie..")
resp, err := cli.ListAlertLogs(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"Alert notes listed successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp.AlertLog)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp.AlertLog, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
}
}
// ListAlertRecipientsAction retrieves specified alert recipients from Opsgenie.
func ListAlertRecipientsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.ListAlertRecipientRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
printMessage(DEBUG,"List alert recipients request prepared from flags, sending request to Opsgenie..")
resp, err := cli.ListAlertRecipients(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
outputFormat := strings.ToLower(c.String("output-format"))
printMessage(DEBUG,"Alert recipients listed successfully, and will print as " + outputFormat)
switch outputFormat {
case "yaml":
output, err := resultToYAML(resp.AlertRecipients)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
default:
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp.AlertRecipients, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO, output)
}
}
// UnAcknowledgeAction unAcknowledges an alert at Opsgenie.
func UnAcknowledgeAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.UnacknowledgeAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"UnAcknowledge alert request prepared from flags, sending request to Opsgenie..")
resp, err := cli.Unacknowledge(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Alert will be unAcknowledged. RequestID: " + resp.RequestId)
printMessage(INFO, "RequestID: " + resp.RequestId)
}
// SnoozeAction snoozes an alert at Opsgenie.
func SnoozeAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.SnoozeAlertRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
if val, success := getVal("endDate", c); success {
endTime, err := time.Parse(time.RFC3339, val)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
req.EndTime = endTime
}
printMessage(DEBUG,"Snooze request prepared from flags, sending request to Opsgenie..")
resp, err := cli.Snooze(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"will be snoozed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// RemoveTagsAction removes tags from an alert at Opsgenie.
func RemoveTagsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.RemoveTagsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("tags", c); success {
req.Tags = val
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Remove tags request prepared from flags, sending request to Opsgenie..")
resp, err := cli.RemoveTags(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Tags will be removed. RequestID: " + resp.RequestId)
printMessage(INFO, "RequestID: " + resp.RequestId)
}
// AddDetailsAction adds details to an alert at Opsgenie.
func AddDetailsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.AddDetailsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
if c.IsSet("D") {
req.Details = extractDetailsFromCommand(c)
}
printMessage(DEBUG,"Add details request prepared from flags, sending request to Opsgenie..")
resp, err := cli.AddDetails(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Details will be added. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// RemoveDetailsAction removes details from an alert at Opsgenie.
func RemoveDetailsAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.RemoveDetailsRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("keys", c); success {
req.Keys = val
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Remove details request prepared from flags, sending request to Opsgenie..")
resp, err := cli.RemoveDetails(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Details will be removed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
// EscalateToNextAction processes the next available rule in the specified escalation.
func EscalateToNextAction(c *gcli.Context) {
cli, err := NewAlertClient(c)
if err != nil {
os.Exit(1)
}
req := alert.EscalateToNextRequest{}
if val, success := getVal("id", c); success {
req.IdentifierValue = val
}
req.IdentifierType = grabIdentifierType(c)
if val, success := getVal("escalationId", c); success {
req.Escalation.ID = val
}
if val, success := getVal("escalationName", c); success {
req.Escalation.Name = val
}
req.User = grabUsername(c)
if val, success := getVal("source", c); success {
req.Source = val
}
if val, success := getVal("note", c); success {
req.Note = val
}
printMessage(DEBUG,"Escalate to next request prepared from flags, sending request to Opsgenie..")
resp, err := cli.EscalateToNext(nil, &req)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(DEBUG,"Escalated to next request will be processed. RequestID: " + resp.RequestId)
printMessage(INFO,"RequestID: " + resp.RequestId)
}
func grabIdentifierType(c *gcli.Context) alert.AlertIdentifier {
if val, success := getVal("identifier", c); success {
if val == "tiny" {
return alert.TINYID
} else if val == "alias" {
return alert.ALIAS
}
}
return alert.ALERTID
}
|
package ohdear
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"time"
)
type (
Sleeper interface {
Sleep(time.Duration)
}
StdLibSleeper struct{}
)
func (s StdLibSleeper) Sleep(seconds time.Duration) {
time.Sleep(seconds)
}
type Client struct {
BaseURL *url.URL
UserAgent string
APIToken string
httpClient *http.Client
RateLimitOver time.Time // When rate-limiting ends
SiteService *SiteService
CheckService *CheckService
TeamService *TeamService
Sleeper
}
func NewClient(baseURL string, apiToken string, httpClient *http.Client) (*Client, error) {
if httpClient == nil {
httpClient = http.DefaultClient
}
u, err := url.Parse(baseURL)
if err != nil {
return nil, fmt.Errorf("Invalid base URL provided to SDK, error: %v", err)
}
c := &Client{
APIToken: apiToken,
BaseURL: u,
httpClient: httpClient,
}
c.SiteService = &SiteService{client: c}
c.CheckService = &CheckService{client: c}
c.TeamService = &TeamService{client: c}
c.Sleeper = StdLibSleeper{}
return c, nil
}
func (c *Client) NewRequest(method, path string, body interface{}) (*http.Request, error) {
rel := &url.URL{Path: path}
u := c.BaseURL.ResolveReference(rel)
var buf io.ReadWriter
if body != nil {
buf = new(bytes.Buffer)
err := json.NewEncoder(buf).Encode(body)
if err != nil {
return nil, err
}
}
req, err := http.NewRequest(method, u.String(), buf)
if err != nil {
return nil, err
}
if body != nil {
req.Header.Set("Content-Type", "application/json")
}
req.Header.Set("Accept", "application/json")
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer "+c.APIToken)
req.Header.Set("UserAgent", c.UserAgent)
return req, nil
}
func (c *Client) timeLeftToWait() time.Duration {
return time.Until(c.RateLimitOver)
}
func (c *Client) do(req *http.Request, v interface{}) (*http.Response, error) {
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
} else if resp.StatusCode == 429 {
secLeft, err := strconv.Atoi(resp.Header.Get("X-RateLimit-Reset"))
if err != nil {
err = fmt.Errorf("Error while parsing backoff header: %v", err)
return resp, err
}
durSeconds := time.Duration(secLeft) * time.Second
c.RateLimitOver = time.Now().Add(durSeconds)
timeLeft := c.timeLeftToWait()
fmt.Printf("[WARN] Rate limiting in effect, retrying in %s sec...", timeLeft)
c.Sleeper.Sleep(timeLeft)
return c.do(req, v)
} else if resp.StatusCode >= 300 {
var apiErr *APIError
err = json.NewDecoder(resp.Body).Decode(apiErr)
if err != nil {
return resp, fmt.Errorf("API Error: %s", resp.Status)
}
return resp, apiErr
}
if v != nil {
err = json.NewDecoder(resp.Body).Decode(v)
}
return resp, err
}
|
package main
// Remove main_wasm.go to update it in case of vugu upgrade.
//go:generate rm -f main_wasm.go
//go:generate gobin -m -run github.com/vugu/vugu/cmd/vugugen -skip-go-mod
|
package main
import "fmt"
func main() {
fmt.Println(findMaxAverage([]int{
0, 1, 1, 3, 3,
}, 4))
}
// 1,12,-5,-6,50,3
func findMaxAverage(nums []int, k int) float64 {
//win := make([]int, 0, k)
sum := 0
for i := 0; i < k; i++ {
sum += nums[i]
//win = append(win, nums[i])
}
mx := sum
for i := 1; i+k <= len(nums); i++ {
sum += nums[i+k-1]
sum -= nums[i-1]
if sum > mx {
mx = sum
}
}
return float64(mx) / float64(k)
}
|
package models
import (
"github.com/astaxie/beego/logs"
"github.com/astaxie/beego/orm"
)
type MvList struct {
Id int
Url string
ImgSrc string
Description string
DescriptionPoster string
OriginSrc string
Star string
Title string
PageViews int
Director string
Label string
CategoryId int
ImgCover string
}
func (mvList *MvList) TableName() string {
return "mv_list"
}
func GetMvListById(id int) (*MvList, error) {
o := orm.NewOrm()
mvList := &MvList{Id: id}
err := o.QueryTable("mv_list").Filter("id", id).One(mvList)
if err != nil {
logs.Error("无此电影 id", id)
}
return mvList, err
}
func GetMvListByCategory(category int, size int) (*[]*MvList, int, error) {
o := orm.NewOrm()
mvList := &[]*MvList{}
num, err := o.QueryTable("mv_list").Filter("category_id", category).Limit(size).All(mvList)
if err != nil {
logs.Error("无此电影 id", category)
}
return mvList, (int(num)/size + 1), err
}
func GetMvListByCategoryWithPage(category int, size int, page int) (*[]*MvList, int, error) {
o := orm.NewOrm()
mvList := &[]*MvList{}
num, err := o.QueryTable("mv_list").Filter("category_id", category).Limit(size, getPosWithPage(page, size)).All(mvList)
if err != nil {
logs.Error("无此电影 id", category)
}
return mvList, (int(num)/size + 1), err
}
func GetMvListByTitleWithPage(title string, size int, page int) (*[]*MvList, int, error) {
o := orm.NewOrm()
mvList := &[]*MvList{}
num, err := o.QueryTable("mv_list").Filter("title__icontains", title).Limit(size, getPosWithPage(page, size)).All(mvList)
if err != nil {
logs.Error("无此电影 id", title)
}
return mvList, (int(num)/size + 1), err
}
func getPosWithPage(page int, size int) int {
return (page - 1) * size
}
func GetMvList(conf map[string]interface{}) (*[]*MvList, error) {
o := orm.NewOrm()
mvList := &[]*MvList{}
if _, ok := conf["limit"]; ok {
_, err := o.QueryTable("mv_list").Limit(conf["num"], conf["offset"]).All(mvList)
if err != nil {
logs.Error("电影列表获取出错")
}
return mvList, err
} else {
_, err := o.QueryTable("mv_list").All(mvList)
if err != nil {
logs.Error("电影列表获取出错")
}
return mvList, err
}
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/rcrowley/go-metrics"
log "github.com/sirupsen/logrus"
"os"
"os/signal"
"strings"
"sync"
"syscall"
)
// Producer implements a High-level Apache Kafka Producer instance ZE 2018
// This allows Mocking producers w/o actual contact to kafka broker for testing purposes
type WrappedProducer interface {
String() string
Produce(msg *kafka.Message, deliveryChan chan kafka.Event) error
Events() chan kafka.Event
ProduceChannel() chan *kafka.Message
Len() int
Flush(timeoutMs int) int
Close()
GetMetadata(topic *string, allTopics bool, timeoutMs int) (*kafka.Metadata, error)
QueryWatermarkOffsets(topic string, partition int32, timeoutMs int) (low, high int64, err error)
OffsetsForTimes(times []kafka.TopicPartition, timeoutMs int) (offsets []kafka.TopicPartition, err error)
}
type KafkaOutput struct {
brokers []string
topicSuffix string
topic string
producers []WrappedProducer
droppedEventCount int64
eventSentCount int64
EventSent metrics.Meter
EventSentBytes metrics.Meter
DroppedEvent metrics.Meter
sync.RWMutex
}
type KafkaStatistics struct {
DroppedEventCount int64 `json:"dropped_event_count"`
EventSentCount int64 `json:"event_sent_count"`
}
func (o *KafkaOutput) Initialize(unused string) error {
o.Lock()
defer o.Unlock()
o.brokers = strings.Split(*(config.KafkaBrokers), ",")
o.topicSuffix = config.KafkaTopicSuffix
o.topic = config.KafkaTopic
o.producers = make([]WrappedProducer, len(o.brokers))
o.EventSent = metrics.NewRegisteredMeter("output.kafka.events_sent", metrics.DefaultRegistry)
o.DroppedEvent = metrics.NewRegisteredMeter("output.kafka.events_dropped", metrics.DefaultRegistry)
o.EventSentBytes = metrics.NewRegisteredMeter("output.kafka.data_sent", metrics.DefaultRegistry)
var kafkaConfig kafka.ConfigMap = nil
//PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL
if config.KafkaProducerProps == nil {
switch config.KafkaProtocol {
case "PLAINTEXT":
kafkaConfig = kafka.ConfigMap{"bootstrap.servers": *config.KafkaBrokers}
case "SASL":
kafkaConfig = kafka.ConfigMap{"bootstrap.servers": *config.KafkaBrokers,
"security.protocol": config.KafkaProtocol,
"sasl.mechanism": config.KafkaMechanism,
"sasl.username": config.KafkaUsername,
"sasl.password": config.KafkaPassword}
case "SSL":
kafkaConfig = kafka.ConfigMap{"bootstrap.servers": *config.KafkaBrokers,
"security.protocol": config.KafkaProtocol}
if config.KafkaSSLCertificateLocation != nil {
kafkaConfig["ssl_certificate_location"] = config.KafkaSSLCertificateLocation
}
if config.KafkaSSLKeyLocation != nil && config.KafkaSSLKeyPassword != nil {
kafkaConfig["ssl_key_location"] = config.KafkaSSLKeyLocation
kafkaConfig["ssl_Key_password"] = config.KafkaSSLKeyPassword
}
if config.KafkaSSLEnabledProtocols != nil {
kafkaConfig["ssl.enabled.protocols"] = config.KafkaSSLEnabledProtocols
}
if config.KafkaSSLCALocation != nil {
kafkaConfig["ssl.ca.location"] = config.KafkaSSLCALocation
}
default:
kafkaConfig = kafka.ConfigMap{"bootstrap.servers": *config.KafkaBrokers}
}
if config.KafkaCompressionType != nil {
kafkaConfig["compression.type"] = *config.KafkaCompressionType
}
} else {
kafkaConfig = config.KafkaProducerProps
}
for index, _ := range o.brokers {
if config.DryRun {
p, err := NewMockedKafkaProducer(".")
if err != nil {
panic(err)
}
o.producers[index] = p
} else {
p, err := kafka.NewProducer(&kafkaConfig)
if err != nil {
panic(err)
}
o.producers[index] = p
}
}
return nil
}
func (o *KafkaOutput) Go(messages <-chan string, errorChan chan<- error) error {
joinEventsChan := make(chan (kafka.Event), 100000)
sigs := make(chan os.Signal, 1)
stopProdChans := make([]chan struct{}, len(o.producers))
signal.Notify(sigs, syscall.SIGHUP)
signal.Notify(sigs, syscall.SIGTERM)
signal.Notify(sigs, syscall.SIGINT)
defer signal.Stop(sigs)
for workernum, producer := range o.producers {
stopProdChans[workernum] = make(chan struct{}, 1)
go func(workernum int32, producer WrappedProducer, stopProdChan <-chan struct{}) {
defer producer.Close()
partition := kafka.PartitionAny
shouldStop := false
if (len(o.producers)) > 0 {
partition = workernum
}
for {
select {
case message := <-messages:
var topic string = config.KafkaTopic
if topic == "" {
var parsedMsg map[string]interface{}
json.Unmarshal([]byte(message), &parsedMsg)
topicRaw := parsedMsg["type"]
if topicString, ok := topicRaw.(string); ok {
topicString = strings.Replace(topicString, "ingress.event.", "", -1)
topicString += o.topicSuffix
topic = topicString
} else {
log.Info("ERROR: Topic was not a string")
}
}
partition := kafka.TopicPartition{Topic: &topic, Partition: partition}
output(message, o.producers[workernum], partition)
o.EventSent.Mark(1)
o.EventSentBytes.Mark(int64(len(message)))
case <-stopProdChan:
shouldStop = true
case e := <-producer.Events():
joinEventsChan <- e
default:
if shouldStop {
return
}
}
}
}(int32(workernum), producer, stopProdChans[workernum])
}
go func() {
for {
select {
case e := <-joinEventsChan:
m := e.(*kafka.Message)
if m.TopicPartition.Error != nil {
o.DroppedEvent.Mark(1)
errorChan <- m.TopicPartition.Error
}
case sig := <-sigs:
switch sig {
case syscall.SIGTERM, syscall.SIGINT:
for _, stopChan := range stopProdChans {
stopChan <- struct{}{}
}
return
default:
log.Debugf("Signal was %s", sig)
}
}
}
}()
return nil
}
func (o *KafkaOutput) Statistics() interface{} {
o.RLock()
defer o.RUnlock()
return KafkaStatistics{DroppedEventCount: o.droppedEventCount, EventSentCount: o.eventSentCount}
}
func (o *KafkaOutput) String() string {
o.RLock()
defer o.RUnlock()
return fmt.Sprintf("Brokers %s", o.brokers)
}
func (o *KafkaOutput) Key() string {
o.RLock()
defer o.RUnlock()
return fmt.Sprintf("brokers:%s", o.brokers)
}
func output(m string, producer WrappedProducer, partition kafka.TopicPartition) {
kafkamsg := &kafka.Message{
TopicPartition: partition,
Value: []byte(m),
}
var err error = producer.Produce(kafkamsg, nil)
for err != nil {
log.Errorf("ERROR PRODUCING TO KAFKA %v", err)
producer.Flush(1)
err = producer.Produce(kafkamsg, nil)
}
}
|
package main
import "fmt"
import _ "thorium-go/process"
func main(){
fmt.Println("hello world")
}
|
package greetingspackage
import "fmt"
// We indicate to Go that we want to export a function by upper casing the function's
// first letter.
func PrintGreetings() {
fmt.Println("I'm priting a message from the PrintGreetings() function!")
}
// This function is unexported (since it has a lowercase first letter in the function name)
// Since it's unexported it will only be visible to functions that are within the greetingspackage
func printGreetingsUnexported() {
fmt.Println("I'm printing a message from the printGreetingsUnexported() function!")
}
|
package infra
import (
"log"
"time"
"github.com/caarlos0/env/v6"
)
type Config struct {
WebConfig
DbConfig
OutboxHeartbeat time.Duration `env:"OUTBOX_HEARTBEAT" envDefault:"5s"`
}
type WebConfig struct {
Port string `env:"PORT" envDefault:":8080"`
}
type DbConfig struct {
DbName string `env:"DB_NAME" envDefault:"postgres"`
DbHost string `env:"DB_HOST" envDefault:"localhost"`
DbPort int `env:"DB_PORT" envDefault:"5432"`
DbUser string `env:"DB_USER" envDefault:"postgres"`
DbPassword string `env:"DB_PASSWORD" envDefault:"secret"`
}
func LoadEnvVars() Config {
cfg := Config{}
if err := env.Parse(&cfg); err != nil {
if err != nil {
log.Fatalf("[ERROR] %+v", err)
}
}
return cfg
}
|
package v1
import (
"blog/app/models"
"blog/app/repositories"
"blog/app/web/responses/admin"
"blog/database"
"github.com/kataras/iris/v12"
"github.com/mlogclub/simple"
)
type TagController struct {
Ctx iris.Context
TagRepository *repositories.TagRepository
TagResponse admin.TagResponse
}
func NewTagController() *TagController {
return &TagController{
TagRepository: repositories.NewTagRepository(
database.DB())}
}
/**
* 新增标签列表
*/
func (c TagController) PostCreate() *simple.JsonResult {
var (
name = c.Ctx.FormValue("name")
description = c.Ctx.FormValue("description")
)
if name == "" {
return simple.JsonErrorMsg("参数不能为空")
}
tag, err := c.TagRepository.Create(&models.Tag{
Name: name,
Description: description,
Status: 0,
})
if err != nil {
return simple.JsonErrorMsg(err.Error())
}
return simple.JsonData(
c.TagResponse.Tag(tag))
}
/**
* 获取标签列表
*/
func (c TagController) GetList() *simple.JsonResult {
var page = &simple.Paging{
Page: simple.FormValueIntDefault(c.Ctx, "page", 1),
Limit: simple.FormValueIntDefault(c.Ctx, "limit", 10),
Total: 0,
}
list := c.TagRepository.TagList(page)
return simple.JsonData(
simple.PageResult{
Page: page,
Results: c.TagResponse.Tags(list),
})
}
/**
* 编辑标签
*/
func (c TagController) PostUpdate() *simple.JsonResult {
id, err := simple.FormValueInt(c.Ctx, "id")
if err != nil {
return simple.JsonErrorMsg(err.Error())
}
data := repositories.UpdateData{
"name": c.Ctx.FormValue("name"),
"description": c.Ctx.FormValue("description"),
}
tag, err := c.TagRepository.UpdateById(id, data)
if err != nil {
return simple.JsonErrorMsg(err.Error())
}
return simple.JsonData(
c.TagResponse.Tag(tag))
}
/**
* 删除标签列表
*/
func (c TagController) PostDel() *simple.JsonResult {
id, err := simple.FormValueInt(c.Ctx, "id")
if err != nil {
return simple.JsonErrorMsg(err.Error())
}
err = c.TagRepository.DelById(id)
if err != nil {
return simple.JsonErrorMsg(err.Error())
}
return simple.JsonSuccess()
}
|
package psql
import (
"testing"
)
func TestContractDaoFindById(t *testing.T) {
dao := ContractDao(db)
_, err := dao.FindById(1)
if err != nil {
t.Error(err)
}
}
func TestContractDaoFindDetails(t *testing.T) {
dao := ContractDao(db)
_, err := dao.FindDetails(1)
if err != nil {
t.Error(err)
}
}
|
package main
import (
"flag"
"fmt"
"log"
"os"
"runtime"
"sync"
)
// Overwrite flag
var _OW bool
// Mute flag
var _MUTE bool
func handleError(err error) {
if err != nil {
log.Fatalln(err)
}
}
func consoleOut(message string) {
if !_MUTE {
fmt.Println(message)
}
}
func findDumpJobs(path string, jobsCh chan string, exit *sync.WaitGroup) {
filter := func(f os.FileInfo) bool {
if f.Name() == ".DS_Store" {
return false
}
if f.Name() == ".git" {
return false
}
return true
}
files := readDir(path, filter)
nextDirs := make([]string, 0)
for _, file := range files {
if isNCM(file.Name()) {
exit.Add(1)
jobsCh <- (path + "/" + file.Name())
} else if file.IsDir() {
nextDirs = append(nextDirs, path+"/"+file.Name())
}
}
for _, dirPath := range nextDirs {
findDumpJobs(dirPath, jobsCh, exit)
}
}
func processJobs(jobsCh chan string, exit *sync.WaitGroup) {
for path := range jobsCh {
dump(path, _OW)
exit.Done()
}
}
// TODO: improve error handle
func main() {
var rootPath string
flag.BoolVar(&_MUTE, "m", true, "是否静音执行")
flag.BoolVar(&_OW, "o", false, "是否覆盖已经存在的结果文件")
flag.StringVar(&rootPath, "p", "", "NCM文件所在目录")
flag.Parse()
var exit sync.WaitGroup
concurrency := runtime.NumCPU()
jobsCh := make(chan string, concurrency)
for i := 0; i < concurrency; i++ {
go processJobs(jobsCh, &exit)
}
findDumpJobs(rootPath, jobsCh, &exit)
close(jobsCh)
exit.Wait()
}
|
package main
/*
#ctype Stmt *
*/
type stmtHandle uintptr
/*
#cmethod Open
#cmethod Close
*/
type dbIf struct {
handle stmtHandle
dbName string
}
/*
Enum type for operand
#ctype operKind
enum operKind: int32_t {
Get = 0,
Put = 1,
Delete = 2
};
*/
type OperKind int32
const (
Get = OperKind(0)
Put = OperKind(1)
Delete = OperKind(2)
)
/*
*/
type dbOper struct {
kind OperKind
key string
value []byte
}
/*
#cmethod Do
*/
type dbBatch struct {
operations []dbOper
}
|
package services
import (
"github.com/spf13/viper"
jwt "github.com/dgrijalva/jwt-go"
)
type UserJWT struct {
ID uint `json:"id"`
UniqUserKey string `json:"uniq_user_key"`
jwt.StandardClaims
}
func CryptJWT(id uint, uKey string) (string, error) {
token := jwt.NewWithClaims(jwt.GetSigningMethod("HS256"), &UserJWT{
ID: id,
UniqUserKey: uKey,
})
tokenString, err := token.SignedString([]byte(viper.GetString("jwt.secret_key")))
if err != nil {
return "", err
}
return tokenString, nil
}
func DecryptJWT(tokenString string) (*UserJWT, bool) {
user := &UserJWT{}
token, _ := jwt.ParseWithClaims(tokenString, user, func(token *jwt.Token) (interface{}, error) {
return []byte(viper.GetString("jwt.secret_key")), nil
})
return user, token.Valid
} |
// @Description mysql
// @Author jiangyang
// @Created 2020/10/30 3:44 下午
// Example Config:
// mysql:
// user: root
// password: 123456
// host: 127.0.0.1
// port: 3306
// dbname: demo
// max_idle_conn: 10
// max_open_conn: 100
// debug: true
package mysql
import (
"fmt"
"gorm.io/gorm/logger"
"sync"
"time"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
var (
// mysql连接
db *gorm.DB
// 保证只建立一次连接
once sync.Once
)
// Mysql配置结构体
type Config struct {
User string `json:"user" yaml:"user"` // 用户名
Password string `json:"password" yaml:"password"` // 密码
Host string `json:"host" yaml:"host"` // 主机地址
Port int `json:"port" yaml:"port"` // 端口号
Dbname string `json:"dbname" yaml:"dbname"` // 数据库名
MaxIdleConn int `json:"max_idle_conn" yaml:"max_idle_conn" mapstructure:"max_idle_conn"` // 最大空闲连接
MaxOpenConn int `json:"max_open_conn" yaml:"max_open_conn" mapstructure:"max_open_conn" ` // 最大活跃连接
Debug bool `json:"debug" yaml:"debug"` // 是否开启Debug(开启Debug会打印数据库操作日志)
}
// 初始化数据库
func Init(mysqlConfig Config) {
once.Do(func() {
dsn := fmt.Sprintf(
"%s:%s@tcp(%s:%d)/%s?charset=utf8mb4&parseTime=true&loc=Local",
mysqlConfig.User,
mysqlConfig.Password,
mysqlConfig.Host,
mysqlConfig.Port,
mysqlConfig.Dbname,
)
conn, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
Logger: logger.New(
logrus.StandardLogger(),
logger.Config{
SlowThreshold: time.Second,
},
),
})
if err != nil {
logrus.Fatalf("mysql connect failed: %v", err)
}
sqlDB, err := conn.DB()
if err != nil {
logrus.Fatalf("mysql connPool failed: %v", err)
}
sqlDB.SetMaxIdleConns(mysqlConfig.MaxIdleConn)
sqlDB.SetMaxOpenConns(mysqlConfig.MaxOpenConn)
sqlDB.SetConnMaxLifetime(time.Hour)
db = conn
logrus.Info("mysql connect successfully")
})
}
// 获取Mysql连接
func Conn() *gorm.DB {
return db
}
// Close method
func Close() error {
if db != nil {
sqlDB, err := db.DB()
if err != nil {
return errors.WithStack(err)
}
if err := sqlDB.Close(); err != nil {
return errors.WithStack(err)
}
}
logrus.Info("mysql connect closed")
return nil
}
|
// test-multi-var project main.go
package main
import (
"fmt"
)
func myfunc(args ...int) {
for _, arg := range args {
fmt.Println(arg)
}
}
func rawPrint(rawList ...interface{}) {
for _, a := range rawList {
fmt.Println(a)
}
}
func print(slist ...interface{}) {
rawPrint(slist...)
}
func main() {
fmt.Println("Multi vars! \n")
myfunc(1)
myfunc(1, 2, 3, 4)
myfunc(6, 5, 4)
print(1, 2, 3)
print(8, 9, 10)
print(5, 6, "testjames")
rawPrint(4, 5, "test2")
}
|
package exasol
import (
"fmt"
"regexp"
"strconv"
"strings"
)
type DSNConfig struct {
host string
port int
user string
password string
autocommit *bool
encryption *bool
compression *bool
clientName string
clientVersion string
fetchSize int
useTLS *bool
}
func NewConfig(user, password string) *DSNConfig {
return &DSNConfig{
host: "localhost",
port: 8563,
user: user,
password: password,
}
}
func (c *DSNConfig) Compression(enabled bool) *DSNConfig {
c.compression = &enabled
return c
}
func (c *DSNConfig) Encryption(enabled bool) *DSNConfig {
c.encryption = &enabled
return c
}
func (c *DSNConfig) Autocommit(enabled bool) *DSNConfig {
c.autocommit = &enabled
return c
}
func (c *DSNConfig) UseTLS(enabled bool) *DSNConfig {
c.useTLS = &enabled
return c
}
func (c *DSNConfig) FetchSize(size int) *DSNConfig {
c.fetchSize = size
return c
}
func (c *DSNConfig) ClientName(name string) *DSNConfig {
c.clientName = name
return c
}
func (c *DSNConfig) ClientVersion(version string) *DSNConfig {
c.clientVersion = version
return c
}
func (c *DSNConfig) Host(host string) *DSNConfig {
c.host = host
return c
}
func (c *DSNConfig) Port(port int) *DSNConfig {
c.port = port
return c
}
func (c *DSNConfig) String() string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("exa:%s:%d;user=%s;password=%s;", c.host, c.port, c.user, c.password))
if c.autocommit != nil {
sb.WriteString(fmt.Sprintf("autocommit=%d;", boolToInt(*c.autocommit)))
}
if c.compression != nil {
sb.WriteString(fmt.Sprintf("compression=%d;", boolToInt(*c.compression)))
}
if c.encryption != nil {
sb.WriteString(fmt.Sprintf("encryption=%d;", boolToInt(*c.encryption)))
}
if c.useTLS != nil {
sb.WriteString(fmt.Sprintf("usetls=%d;", boolToInt(*c.useTLS)))
}
if c.fetchSize != 0 {
sb.WriteString(fmt.Sprintf("fetchsize=%d;", c.fetchSize))
}
if c.clientName != "" {
sb.WriteString(fmt.Sprintf("clientname=%s;", c.clientName))
}
if c.clientVersion != "" {
sb.WriteString(fmt.Sprintf("clientversion=%s;", c.clientVersion))
}
return strings.TrimRight(sb.String(), ";")
}
func parseDSN(dsn string) (*config, error) {
if !strings.HasPrefix(dsn, "exa:") {
return nil, fmt.Errorf("invalid connection string, must start with 'exa:'")
}
splitDsn := splitIntoConnectionStringAndParameters(dsn)
host, port, err := extractHostAndPort(splitDsn[0])
if err != nil {
return nil, err
}
if len(splitDsn) < 2 {
return getDefaultConfig(host, port), nil
} else {
return getConfigWithParameters(host, port, splitDsn[1])
}
}
func splitIntoConnectionStringAndParameters(dsn string) []string {
cleanDsn := strings.Replace(dsn, "exa:", "", 1)
return strings.SplitN(cleanDsn, ";", 2)
}
func extractHostAndPort(connectionString string) (string, int, error) {
hostPort := strings.Split(connectionString, ":")
if len(hostPort) != 2 {
return "", 0, fmt.Errorf("invalid host or port, expected format: <host>:<port>")
}
port, err := strconv.Atoi(hostPort[1])
if err != nil {
return "", 0, fmt.Errorf("invalid `port` value, numeric port expected")
}
return hostPort[0], port, nil
}
func getDefaultConfig(host string, port int) *config {
return &config{
Host: host,
Port: port,
ApiVersion: 2,
Autocommit: true,
Encryption: true,
Compression: false,
UseTLS: true,
ClientName: "Go client",
Params: map[string]string{},
FetchSize: 128 * 1024,
}
}
func getConfigWithParameters(host string, port int, parametersString string) (*config, error) {
config := getDefaultConfig(host, port)
parameters := extractParameters(parametersString)
for _, parameter := range parameters {
parameter = strings.TrimRight(parameter, ";")
keyValuePair := strings.SplitN(parameter, "=", 2)
if len(keyValuePair) != 2 {
return nil, fmt.Errorf("invalid parameter %s, expected format <parameter>=<value>", parameter)
}
key := keyValuePair[0]
value := keyValuePair[1]
switch key {
case "password":
config.Password = unescape(value, ";")
case "user":
config.User = unescape(value, ";")
case "autocommit":
config.Autocommit = value == "1"
case "encryption":
config.Encryption = value == "1"
case "usetls":
config.UseTLS = value == "1"
case "compression":
config.Compression = value == "1"
case "clientname":
config.ClientName = value
case "clientversion":
config.ClientVersion = value
case "schema":
config.Schema = value
case "fetchsize":
value, err := strconv.Atoi(value)
if err != nil {
return nil, fmt.Errorf("invalid `fetchsize` value, numeric expected")
}
config.FetchSize = value
case "resultsetmaxrows":
value, err := strconv.Atoi(value)
if err != nil {
return nil, fmt.Errorf("invalid `resultsetmaxrows` value, numeric expected")
}
config.ResultSetMaxRows = value
default:
config.Params[key] = unescape(value, ";")
}
}
return config, nil
}
func extractParameters(parametersString string) []string {
reg := regexp.MustCompile(`[\w];`)
return splitAfter(parametersString, reg)
}
func unescape(s, char string) string {
return strings.ReplaceAll(s, `\`+char, char)
}
func splitAfter(s string, re *regexp.Regexp) []string {
var (
r []string
p int
)
is := re.FindAllStringIndex(s, -1)
if is == nil {
return append(r, s)
}
for _, i := range is {
r = append(r, s[p:i[1]])
p = i[1]
}
return append(r, s[p:])
}
|
package game
import (
"errors"
"github.com/golang/glog"
"github.com/noxue/utils/fsm"
"qipai/dao"
"qipai/utils"
"sync"
"zero"
)
const (
ReadyState = iota + 1 // 准备中
SelectBankerState // 抢庄中
SetScoreState // 下注中
ShowCardState // 看牌中
CompareCardState // 比牌中
GameOverState // 结束
GameDeletedState // 游戏已删除状态
)
const (
StartAction = iota + 1
SetTimesAction // 抢庄
SetScoreAction // 下注,下注完毕,自动把牌算好
ShowCardAction // 看牌
CompareCardAction // 比牌
GameOverAction // 结束游戏
)
type gamesType struct {
Games map[uint]*Game
lock sync.Mutex
}
type Game struct {
lock sync.Mutex
Fsm *fsm.FSM
RoomId uint
AutoPlayers map[uint]bool // 记录玩家是否托管
OnlinePlayer map[uint]bool // 记录玩家是否在线
}
var Games *gamesType
func init() {
Games = &gamesType{
Games: map[uint]*Game{},
}
}
func (me *gamesType) NewGame(roomId uint) (game *Game, err error) {
me.lock.Lock()
defer me.lock.Unlock()
game = &Game{
Fsm: fsm.New(StartAction),
RoomId: roomId,
}
_, ok := me.Games[roomId]
if ok {
err = errors.New("该房间已创建游戏")
return
}
// 添加不同状态调用的函数
game.Fsm.AddState(ReadyState, StateReady)
game.Fsm.AddState(SelectBankerState, StateSelectBanker)
game.Fsm.AddState(SetScoreState, StateSetScore)
game.Fsm.AddState(ShowCardState, StateShowCard)
game.Fsm.AddState(CompareCardState, StateCompareCard)
game.Fsm.AddState(GameOverState, StateGameOver)
// 保存到map中统一管理
me.Games[roomId] = game
return
}
// 将消息通知给当前房间所属茶楼的所有处在茶楼中的在线用户,比如当前房间开始游戏了,解散了等等消息
// msg 如果不为nil,表示发送msg指定的消息
func NotifyClubPlayers(msgId int32, roomId uint, msg *utils.Message) {
room, _ := dao.Room.Get(roomId)
if room.ClubId == 0 {
return
}
ClubPlayers.Do(room.ClubId, func(s *zero.Session) {
if msg == nil {
utils.Msg("").AddData("clubId", room.ClubId).AddData("roomId", roomId).Send(msgId, s)
} else {
msg.AddData("clubId", room.ClubId).AddData("roomId", roomId).Send(msgId, s)
}
})
}
func (me *gamesType) Get(roomId uint) (g *Game, err error) {
me.lock.Lock()
defer me.lock.Unlock()
g, ok := me.Games[roomId]
if !ok {
err = errors.New("该房间未开始游戏")
return
}
return
}
func (me *gamesType) GameOver(roomId uint) (err error) {
me.lock.Lock()
defer me.lock.Unlock()
_, ok := me.Games[roomId]
if !ok {
err = errors.New("该房间没有创建游戏,无须结束")
return
}
// 通知当前房间所在茶楼所有在线用户,该房间游戏结束
room, e := dao.Room.Get(roomId)
if e!=nil {
glog.Error(e)
err = e
return
}
NotifyClubPlayers(BroadcastGameOver, roomId, utils.Msg("").AddData("tableId", room.TableId))
delete(me.Games, roomId)
return
}
func (me *Game) Start() {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(StartAction, me.RoomId)
// 如果是属于某个茶楼的房间,就直接通知正在该茶楼的玩家,指定房间开始了,用于更新茶楼大厅中房间的信息
NotifyClubPlayers(ResGameStart, me.RoomId, nil)
}
func (me *Game) SetTimes(uid uint, times int, auto bool) {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(SetTimesAction, me.RoomId, uid, times, auto)
}
func (me *Game) SetScore(uid uint, score int, auto bool) {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(SetScoreAction, me.RoomId, uid, score, auto)
}
func (me *Game) ShowCard() {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(ShowCardAction, me.RoomId)
}
// 比牌
func (me *Game) CompareCard() {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(CompareCardAction, me.RoomId)
}
// 游戏结束
func (me *Game) GameOver() {
me.lock.Lock()
defer me.lock.Unlock()
me.Fsm.Do(GameOverAction, me.RoomId)
}
|
package main
import (
"db_analyze_pro"
"encoding/csv"
"fmt"
"io"
"os"
"strconv"
)
//var a = 12
// const A = 12
//var b = "string"
//var c interface{}
//
//type d interface {
//
//}
//type e int
var arr [10] int
var slice [6]int
var m map[int]string
type KK struct {
a int
b int
}
// 统计数据
type StData struct {
caseID string
sex int
minTemperatureInOperation int
between350And360InOperation int64 // 手术中大于350,低于360
below360InOperation int64 // 手术中低于360
between375And380InOperation int64 // 手术中大于375,低于380
exceed380InOperation int64 // 手术中大于380
exceed385InOperation int64 // 手术中大于385
continueTimeInOperation int64 // 手术中总测量时长
between350And360PostOperation int64 // 手术后大于350,小于360时长
below360PostOperation int64
exceed375TimePostOperation int64
between375And380PostOperation int64
between380And385PostOperation int64
exceed385TimePostOperation int64
maxTemperaturePostOperation int
continueTimePostOperation int64
hangzhanCountPostOperation int // 手术后寒战次数
zhanwangCountPostOperation int // 手术后谵妄次数
}
func main() {
fmt.Println(db_analyze_pro.GetTimeNow())
}
func main01() {
csvFile, err := os.Open("X:/Golang/GO_Study/trunk/temp_file.csv") //创建文件
if err != nil {
fmt.Println(err)
return
}
defer csvFile.Close()
fmt.Println("文件名称," + csvFile.Name())
//_, _ = csvFile.WriteString("\xEF\xBB\xBF") // 写入UTF-8 BOM
stDatas := make(map[string]*StData, 10)
r := csv.NewReader(csvFile)
for {
records, err := r.Read()
if err == io.EOF {
fmt.Println(err)
break
}
var stData StData
//var caseID string
//for _, data := range records {
// print(data, ",")
//
//}
//println()
stData.caseID = records[0]
//println(stData.caseID)
intValue, err := strconv.Atoi(records[1])
stData.sex = intValue
intValue, err = strconv.Atoi(records[2])
stData.minTemperatureInOperation = intValue
intValue, err = strconv.Atoi(records[3])
stData.between350And360InOperation = int64(intValue)
intValue, err = strconv.Atoi(records[4])
stData.below360InOperation = int64(intValue)
intValue, err = strconv.Atoi(records[5])
stData.between375And380InOperation = int64(intValue)
intValue, err = strconv.Atoi(records[6])
stData.exceed380InOperation = int64(intValue)
intValue, err = strconv.Atoi(records[7])
stData.exceed385InOperation = int64(intValue)
intValue, err = strconv.Atoi(records[8])
stData.continueTimeInOperation = int64(intValue)
intValue, err = strconv.Atoi(records[9])
stData.between350And360PostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[10])
stData.below360PostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[11])
stData.exceed375TimePostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[12])
stData.between375And380PostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[13])
stData.between380And385PostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[14])
stData.exceed385TimePostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[15])
stData.maxTemperaturePostOperation = intValue
intValue, err = strconv.Atoi(records[16])
stData.continueTimePostOperation = int64(intValue)
intValue, err = strconv.Atoi(records[17])
stData.hangzhanCountPostOperation = intValue
intValue, err = strconv.Atoi(records[18])
stData.zhanwangCountPostOperation = intValue
value, ok := stDatas[stData.caseID ]
if ok {
if value.sex == 0 {
value.sex = stData.sex
}
if value.minTemperatureInOperation == 0 {
value.sex = stData.minTemperatureInOperation
}
if value.maxTemperaturePostOperation == 0 {
value.maxTemperaturePostOperation = stData.maxTemperaturePostOperation
}
value.between350And360InOperation += stData.between350And360InOperation
value.below360InOperation += stData.below360InOperation
value.between375And380InOperation += stData.between375And380InOperation
value.exceed380InOperation += stData.exceed380InOperation
value.exceed385InOperation += stData.exceed385InOperation
value.continueTimeInOperation += stData.continueTimeInOperation
value.between350And360PostOperation += stData.between350And360PostOperation
value.below360PostOperation += stData.below360PostOperation
value.exceed375TimePostOperation += stData.exceed375TimePostOperation
value.between375And380PostOperation += stData.between375And380PostOperation
value.between380And385PostOperation += stData.between380And385PostOperation
value.exceed385TimePostOperation += stData.exceed385TimePostOperation
value.continueTimePostOperation += stData.continueTimePostOperation
value.hangzhanCountPostOperation += stData.hangzhanCountPostOperation
value.zhanwangCountPostOperation += stData.zhanwangCountPostOperation
} else {
stDatas[stData.caseID] = &stData
}
}
for k := range stDatas {
fmt.Println(k)
}
//data := make(chan []string, 10)
//
//count := 0
//
//go func() {
// for {
// count++
// data <- []string{"raiing" + strconv.Itoa(count)}
// fmt.Println("子协程", "raiing"+strconv.Itoa(count))
// }
//}()
//
//for {
// data2, ok := <-data
// if ok {
// fmt.Println("主协程,", data2)
// }
// time.Sleep(time.Second)
//}
//return
//inta := 10
//intb := 3
////fmt.Println(float64(inta) / float64(intb))
//s := fmt.Sprintf("%f", float64(inta)/float64(intb))
//fmt.Println(s)
//
//cc := KK{a: 12}
//fmt.Println(cc)
//cc.b = 13
//fmt.Println(cc)
//dd := make(map[string]*KK, 10)
//dd["1223"] = &KK{a: 12}
//fmt.Println(dd)
//ee := dd["1223"]
//ee.b = 14
//for k, v := range dd {
// fmt.Println(k, v.a, v.b)
//}
//
//return
//m = make(map[int]string)
//m[1] = "1"
//m[2] = "2"
//for index := range m {
// fmt.Println(index, m[index])
//}
//delete(m, 2)
//data, ok := m[2]
//if ok {
// fmt.Println(ok, data)
//} else {
// fmt.Println(ok)
//}
//for i := 0; i < len(arr); i++ {
// fmt.Println(arr[i])
//}
//for i, data := range arr {
// fmt.Printf("索引: %d, 值: %d", i, data)
// fmt.Printf("\n")
//}
//slice = make([]int, 10)
//slice[8] = 10
//slice = [6]int{1, 2}
//numbers := append(slice, 11)
//fmt.Println("切片长度: " + strconv.Itoa(len(slice)) + "切片容量: " + strconv.Itoa(cap(slice)))
//fmt.Println("切片长度: " + strconv.Itoa(len(numbers)) + "切片容量: " + strconv.Itoa(cap(numbers)))
//ch := make(chan int)
//count := 0
//go func() {
// for {
// fmt.Println(<-ch)
// }
//}()
//
//for {
// count ++
// ch <- count
// time.Sleep(time.Second * 2)
//}
//fmt.Println("hello world!")
//fmt.Println("你好",a,b)
//fmt.Println("你好",c)
//fmt.Println("你好",new(d))
//e := 100
//fmt.Println("你好",e)
}
|
package gocql
import (
"context"
"github.com/gocql/gocql"
)
type SessionChecker struct {
session *gocql.Session
}
func (c *SessionChecker) Check(ctx context.Context) error {
return c.session.Query("void").Exec()
}
func NewSessionChecker(session *gocql.Session) *SessionChecker {
return &SessionChecker{session: session}
}
|
// golang中没有构造模式~所以用工厂模式
// type student struct 这里student实例首字母是小写,但是要再其他包里用这个~~ 就用到了工厂模式
package main
import (
"fmt"
"go_project/9method/factory/student"
)
func main(){
// 第一种方法,因为Student 的首字母是大写的,所以再其他包可以直接使用,但是要是小写就会有报错`~~
// 报错内容如下,意思就是没有这个结构体~~~·
// 要想正常使用,那就要用到工厂模式
/* .\main.go:13:11: cannot refer to unexported name student.student
.\main.go:13:11: undefined: student.student */
/* var stu1 student.student
stu1.Name ="matianqi"
stu1.Age = 30
stu1.Score = 95.0 */
var stu2 = student.NewStu("matianqi~",30,99.0)
fmt.Println(stu2) // &{matianqi~ 30 98} 用的是地址
fmt.Printf("name=%v\n",stu2.Name)
fmt.Printf("age=%v\n",stu2.Age)
//fmt.Printf("scoer=%v\n",stu2.Score)
fmt.Printf("score=%v\n",stu2.GetScore()) // 因为小写字母,所以单独给这个score一个方法,让他再student包中传出score
} |
package main
import (
"flag"
"fmt"
"math/rand"
"time"
_ "github.com/manishrjain/gocrud/drivers/elasticsearch"
"github.com/manishrjain/gocrud/search"
"github.com/manishrjain/gocrud/x"
)
var eip = flag.String("ipaddr", "", "IP address of Elastic Search")
var num = flag.Int("num", 1, "Number of results")
type Author struct {
Id string
Ts int
}
func main() {
rand.Seed(time.Now().UnixNano())
flag.Parse()
if *eip == "" {
flag.Usage()
return
}
engine := search.Get()
engine.Init("http://" + *eip + ":9200")
r := rand.Intn(100)
uid := fmt.Sprintf("uid_%d", r)
var au Author
au.Id = fmt.Sprintf("mrjn-%d", r)
au.Ts = r
doc := x.Doc{Kind: "test", Id: uid, NanoTs: time.Now().UnixNano(), Data: au}
if err := engine.Update(doc); err != nil {
fmt.Printf("Error: %v\n", err)
return
}
q := engine.NewQuery("test").Order("-Data.Ts").Limit(*num)
q.NewAndFilter().AddExact("Data.Id", "mrjn")
docs, err := q.Run()
if err != nil {
fmt.Printf("Error: %v\n", err)
return
}
for _, doc := range docs {
fmt.Printf("Doc: %+v\n", doc)
}
return
}
|
package main
import (
"fmt"
"os"
"errors"
)
func warn(format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(os.Stderr, format, a...)
}
func croak(e error) {
warn("%s\n", e)
}
func die(e error) {
croak(e)
os.Exit(1)
}
var (
hashFunction HashValue
cwd string
dryRun bool
silent bool
)
var preferredHashes = []string{"SHA512", "SHA1", "MD5"}
func findChecksumFile() (hash *HashValue, file *os.File, err error) {
for _, tryHash := range preferredHashes {
hash := &HashValue{}
hash.Set(tryHash)
file, err := os.Open(hash.Filename())
if err == nil {
return hash, file, err
}
}
return nil, nil, errors.New("No known checksum files found in directory")
}
func setDirAndHashOptions() (hash *HashValue, checksums *os.File) {
var err error
if err := os.Chdir(cwd); err != nil {
die(err)
}
if hashFunction.Hash == 0x0 {
hash, checksums, err = findChecksumFile()
if err != nil {
die(err)
}
} else {
hash = &hashFunction
checksums, err = os.Open(hash.Filename())
if err != nil {
die(err)
}
}
return
}
|
package version
import (
"github.com/Masterminds/semver"
)
var (
v1 = mustVersion("1")
v2 = mustVersion("2")
v21 = mustVersion("2.1")
v22 = mustVersion("2.2")
v23 = mustVersion("2.3")
)
// IsV1 returns if is a given Taskfile version is version 1
func IsV1(v *semver.Constraints) bool {
return v.Check(v1)
}
// IsV2 returns if is a given Taskfile version is at least version 2
func IsV2(v *semver.Constraints) bool {
return v.Check(v2)
}
// IsV21 returns if is a given Taskfile version is at least version 2.1
func IsV21(v *semver.Constraints) bool {
return v.Check(v21)
}
// IsV22 returns if is a given Taskfile version is at least version 2.2
func IsV22(v *semver.Constraints) bool {
return v.Check(v22)
}
// IsV23 returns if is a given Taskfile version is at least version 2.3
func IsV23(v *semver.Constraints) bool {
return v.Check(v23)
}
func mustVersion(s string) *semver.Version {
v, err := semver.NewVersion(s)
if err != nil {
panic(err)
}
return v
}
|
package main
import (
"fmt"
"log"
"net"
)
func main() {
udp, err := net.DialUDP("udp", nil, &net.UDPAddr{
IP: net.ParseIP("127.0.0.1"),
Port: 8080,
})
if err != nil {
log.Fatalf("DialUDP error:%v\n", err)
}
defer udp.Close()
// 发送数据
_, err = udp.Write([]byte("Hello Server"))
if err != nil {
log.Fatalf("Write error:%v\n", err)
}
// 接收数据
buf := make([]byte, 1024)
n, addr, err := udp.ReadFromUDP(buf)
if err != nil {
log.Fatalf("ReadFromUDP error:%v\n", err)
}
fmt.Printf("[%s] >>> %s\n", addr, string(buf[:n]))
}
|
package authorize
import (
"context"
"sync/atomic"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/sets"
"github.com/pomerium/pomerium/pkg/grpc/databroker"
"github.com/pomerium/pomerium/pkg/grpc/session"
"github.com/pomerium/pomerium/pkg/grpc/user"
)
const (
accessTrackerMaxSize = 1_000
accessTrackerDebouncePeriod = 10 * time.Second
accessTrackerUpdateTimeout = 3 * time.Second
)
// A AccessTrackerProvider provides the databroker service client for tracking session access.
type AccessTrackerProvider interface {
GetDataBrokerServiceClient() databroker.DataBrokerServiceClient
}
// A AccessTracker tracks accesses to sessions
type AccessTracker struct {
provider AccessTrackerProvider
sessionAccesses chan string
serviceAccountAccesses chan string
maxSize int
debouncePeriod time.Duration
droppedAccesses int64
}
// NewAccessTracker creates a new SessionAccessTracker.
func NewAccessTracker(
provider AccessTrackerProvider,
maxSize int,
debouncePeriod time.Duration,
) *AccessTracker {
return &AccessTracker{
provider: provider,
sessionAccesses: make(chan string, maxSize),
serviceAccountAccesses: make(chan string, maxSize),
maxSize: maxSize,
debouncePeriod: debouncePeriod,
}
}
// Run runs the access tracker.
func (tracker *AccessTracker) Run(ctx context.Context) {
ticker := time.NewTicker(tracker.debouncePeriod)
defer ticker.Stop()
sessionAccesses := sets.NewSizeLimited[string](tracker.maxSize)
serviceAccountAccesses := sets.NewSizeLimited[string](tracker.maxSize)
runTrackSessionAccess := func(sessionID string) {
sessionAccesses.Add(sessionID)
}
runTrackServiceAccountAccess := func(serviceAccountID string) {
serviceAccountAccesses.Add(serviceAccountID)
}
runSubmit := func() {
if dropped := atomic.SwapInt64(&tracker.droppedAccesses, 0); dropped > 0 {
log.Error(ctx).
Int64("dropped", dropped).
Msg("authorize: failed to track all session accesses")
}
client := tracker.provider.GetDataBrokerServiceClient()
var err error
sessionAccesses.ForEach(func(sessionID string) bool {
err = tracker.updateSession(ctx, client, sessionID)
return err == nil
})
if err != nil {
log.Error(ctx).Err(err).Msg("authorize: error updating session last access timestamp")
return
}
serviceAccountAccesses.ForEach(func(serviceAccountID string) bool {
err = tracker.updateServiceAccount(ctx, client, serviceAccountID)
return err == nil
})
if err != nil {
log.Error(ctx).Err(err).Msg("authorize: error updating service account last access timestamp")
return
}
sessionAccesses = sets.NewSizeLimited[string](tracker.maxSize)
serviceAccountAccesses = sets.NewSizeLimited[string](tracker.maxSize)
}
for {
select {
case <-ctx.Done():
return
case id := <-tracker.sessionAccesses:
runTrackSessionAccess(id)
case id := <-tracker.serviceAccountAccesses:
runTrackServiceAccountAccess(id)
case <-ticker.C:
runSubmit()
}
}
}
// TrackServiceAccountAccess tracks a service account access.
func (tracker *AccessTracker) TrackServiceAccountAccess(serviceAccountID string) {
select {
case tracker.serviceAccountAccesses <- serviceAccountID:
default:
atomic.AddInt64(&tracker.droppedAccesses, 1)
}
}
// TrackSessionAccess tracks a session access.
func (tracker *AccessTracker) TrackSessionAccess(sessionID string) {
select {
case tracker.sessionAccesses <- sessionID:
default:
atomic.AddInt64(&tracker.droppedAccesses, 1)
}
}
func (tracker *AccessTracker) updateServiceAccount(
ctx context.Context,
client databroker.DataBrokerServiceClient,
serviceAccountID string,
) error {
ctx, clearTimeout := context.WithTimeout(ctx, accessTrackerUpdateTimeout)
defer clearTimeout()
sa, err := user.GetServiceAccount(ctx, client, serviceAccountID)
if status.Code(err) == codes.NotFound {
return nil
} else if err != nil {
return err
}
sa.AccessedAt = timestamppb.Now()
_, err = user.PutServiceAccount(ctx, client, sa)
return err
}
func (tracker *AccessTracker) updateSession(
ctx context.Context,
client databroker.DataBrokerServiceClient,
sessionID string,
) error {
ctx, clearTimeout := context.WithTimeout(ctx, accessTrackerUpdateTimeout)
defer clearTimeout()
s, err := session.Get(ctx, client, sessionID)
if status.Code(err) == codes.NotFound {
return nil
} else if err != nil {
return err
}
s.AccessedAt = timestamppb.Now()
_, err = session.Put(ctx, client, s)
return err
}
|
package oss
import (
"bytes"
"crypto/hmac"
"crypto/md5"
"crypto/sha1"
"encoding/base64"
"errors"
"io/ioutil"
"net/http"
"sort"
"strings"
)
type authorization struct {
req *http.Request
bucket string
object string
secret []byte
}
// ContentMD5 is the option for calculating and adding a Content-Md5 header for an HTTP request
func ContentMD5(req *http.Request) error {
if _, ok := req.Header["Content-Md5"]; ok {
return errors.New("Content-Md5 is already set")
}
if req.Body == nil {
return errors.New("Content-Md5 requires non-nil body")
}
buf, _ := ioutil.ReadAll(req.Body)
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
sum := md5.Sum(buf)
b64 := base64.StdEncoding.EncodeToString(sum[:])
req.Header.Set("Content-Md5", b64)
return nil
}
func (a *authorization) canonicalizedOSSHeaders() []byte {
var kvs kvSlice
for key, vs := range a.req.Header {
key = strings.ToLower(key)
if strings.HasPrefix(key, "x-oss-") {
for _, val := range vs {
kvs = append(kvs, kv{key, val})
}
}
}
sort.Sort(kvs)
var buf bytes.Buffer
for _, kv := range kvs {
buf.WriteString(kv.key)
buf.WriteByte(':')
buf.WriteString(kv.val)
buf.WriteByte('\n')
}
return buf.Bytes()
}
func (a *authorization) data() []byte {
var w bytes.Buffer
w.WriteString(a.req.Method)
w.WriteByte('\n')
w.WriteString(a.req.Header.Get("Content-Md5"))
w.WriteByte('\n')
w.WriteString(a.req.Header.Get("Content-Type"))
w.WriteByte('\n')
w.WriteString(a.req.Header.Get("Date"))
w.WriteByte('\n')
w.Write(a.canonicalizedOSSHeaders())
w.WriteString(a.canonicalizedResource())
return w.Bytes()
}
func (a *authorization) canonicalizedResource() string {
uri := *a.req.URL
uri.Scheme = ""
uri.Host = ""
uri.Path = a.bucket + uri.Path
return "/" + uri.String()
}
func (a *authorization) value() string {
return hmacSHA1(a.data(), a.secret)
}
func hmacSHA1(data []byte, secret []byte) string {
h := hmac.New(sha1.New, secret)
h.Write(data)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
type (
kv struct {
key string
val string
}
kvSlice []kv
)
func (s kvSlice) Len() int { return len(s) }
func (s kvSlice) Less(i, j int) bool { return s[i].key < s[j].key }
func (s kvSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
package telegram
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"github.com/spotify-bot/server/pkg/spotify"
"github.com/spotify-bot/telegram/internal/config"
)
func getRecentlyPlayed(userID string) (track *spotify.Track, err error) {
track, err = getCurrentlyPlayingSong(userID)
if err != nil {
track, err = getRecentlyPlayedSong(userID)
}
return
}
func getRecentlyPlayedSong(userID string) (*spotify.Track, error) {
path := spotify.RecentlyPlayedEndpoint + "?limit=1"
resp, err := sendRequest("GET", path, userID, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var response spotify.RecentlyPlayedResponse
if err = json.Unmarshal(body, &response); err != nil {
return nil, err
}
if len(response.Items) < 1 {
return nil, fmt.Errorf("Empty track")
}
return &response.Items[0].Track, nil
}
func getCurrentlyPlayingSong(userID string) (*spotify.Track, error) {
path := spotify.CurrentlyPlayingEndpoint
resp, err := sendRequest("GET", path, userID, nil)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, spotify.CallbackError{
spotify.CurrentlyPlayingEndpoint,
resp.StatusCode,
}
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
var response spotify.CurrentlyPlayingResponse
if err = json.Unmarshal(body, &response); err != nil {
return nil, err
}
return &response.Track, nil
}
func addSongToQueue(userID string, songURI string) error {
resp, err := sendRequest("POST", spotify.AddToQueueEndpoint+"?uri="+songURI, userID, nil)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("request error code: %d\n", resp.StatusCode)
}
return nil
}
func playSong(userID string, songURI string) error {
var jsonStr = []byte(`{"uris":["` + songURI + `"]}`)
resp, err := sendRequest("PUT", spotify.PlaySongEndpoint, userID, bytes.NewBuffer(jsonStr))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("request error code: %d", resp.StatusCode)
}
return nil
}
func sendRequest(method string, path string, userID string, body io.Reader) (*http.Response, error) {
client := &http.Client{}
url := config.AppConfig.APIServerAddress + "/spotify/telegram/" + userID + path
req, err := http.NewRequest(method, url, body)
if err != nil {
return nil, err
}
return client.Do(req)
}
|
package model
import "fmt"
type ErrorMessage struct {
Code int `json:"code"`
Message string `json:"message"`
Details string `json:"detail"`
}
const (
ErrorCodeParameter = iota
ErrorCodeReadBody
ErrorCodeUnmarshalJSON
)
var ErrorCodes = map[int]string{
ErrorCodeParameter: "Parameter error",
ErrorCodeReadBody: "Read body error",
ErrorCodeUnmarshalJSON: "Unmarshal JSON error",
}
func NewErrorMessage(code int, err error) *ErrorMessage {
var errStr string
if err != nil {
errStr = err.Error()
}
return &ErrorMessage{code, ErrorCodes[code], errStr}
}
func NewCodeErrorMarkDownDoc(errorCodes map[int]string) string {
doc := "**Error code reference table**\n"
doc += "| code | message |\n"
doc += "| --- | --- |\n"
for code, message := range errorCodes {
doc += fmt.Sprintf("| %d | %s |\n", code, message)
}
return doc
}
|
package main
import (
"fmt"
"github.com/jinzhu/gorm"
_ "github.com/lib/pq"
)
const (
host = "localhost"
port = 5432
user = "postgres"
password = "temppassword"
dbname = "priva_dev"
)
func main() {
// Creating the connection string.
psqlInfo := fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable",
host, port, user, password, dbname)
// Connecting to database
us, err := models.NewUserService(psqlInfo)
if err != nil {
panic(err)
}
// Defer close and destroy/rebuild database.
defer us.Close()
us.DestructiveReset
// Lookup user with id of 1.
user, err := us.ByID(1)
if err != nil {
panic(err)
}
// Print user to console.
fmt.Println(user)
} |
// This file was generated for SObject ProcessInstanceHistory, API Version v43.0 at 2018-07-30 03:47:22.162998645 -0400 EDT m=+8.505924250
package sobjects
import (
"fmt"
"strings"
)
type ProcessInstanceHistory struct {
BaseSObject
ActorId string `force:",omitempty"`
Comments string `force:",omitempty"`
CreatedById string `force:",omitempty"`
CreatedDate string `force:",omitempty"`
ElapsedTimeInDays float64 `force:",omitempty"`
ElapsedTimeInHours float64 `force:",omitempty"`
ElapsedTimeInMinutes float64 `force:",omitempty"`
Id string `force:",omitempty"`
IsDeleted bool `force:",omitempty"`
IsPending bool `force:",omitempty"`
OriginalActorId string `force:",omitempty"`
ProcessInstanceId string `force:",omitempty"`
ProcessNodeId string `force:",omitempty"`
RemindersSent int `force:",omitempty"`
StepStatus string `force:",omitempty"`
SystemModstamp string `force:",omitempty"`
TargetObjectId string `force:",omitempty"`
}
func (t *ProcessInstanceHistory) ApiName() string {
return "ProcessInstanceHistory"
}
func (t *ProcessInstanceHistory) String() string {
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("ProcessInstanceHistory #%s - %s\n", t.Id, t.Name))
builder.WriteString(fmt.Sprintf("\tActorId: %v\n", t.ActorId))
builder.WriteString(fmt.Sprintf("\tComments: %v\n", t.Comments))
builder.WriteString(fmt.Sprintf("\tCreatedById: %v\n", t.CreatedById))
builder.WriteString(fmt.Sprintf("\tCreatedDate: %v\n", t.CreatedDate))
builder.WriteString(fmt.Sprintf("\tElapsedTimeInDays: %v\n", t.ElapsedTimeInDays))
builder.WriteString(fmt.Sprintf("\tElapsedTimeInHours: %v\n", t.ElapsedTimeInHours))
builder.WriteString(fmt.Sprintf("\tElapsedTimeInMinutes: %v\n", t.ElapsedTimeInMinutes))
builder.WriteString(fmt.Sprintf("\tId: %v\n", t.Id))
builder.WriteString(fmt.Sprintf("\tIsDeleted: %v\n", t.IsDeleted))
builder.WriteString(fmt.Sprintf("\tIsPending: %v\n", t.IsPending))
builder.WriteString(fmt.Sprintf("\tOriginalActorId: %v\n", t.OriginalActorId))
builder.WriteString(fmt.Sprintf("\tProcessInstanceId: %v\n", t.ProcessInstanceId))
builder.WriteString(fmt.Sprintf("\tProcessNodeId: %v\n", t.ProcessNodeId))
builder.WriteString(fmt.Sprintf("\tRemindersSent: %v\n", t.RemindersSent))
builder.WriteString(fmt.Sprintf("\tStepStatus: %v\n", t.StepStatus))
builder.WriteString(fmt.Sprintf("\tSystemModstamp: %v\n", t.SystemModstamp))
builder.WriteString(fmt.Sprintf("\tTargetObjectId: %v\n", t.TargetObjectId))
return builder.String()
}
type ProcessInstanceHistoryQueryResponse struct {
BaseQuery
Records []ProcessInstanceHistory `json:"Records" force:"records"`
}
|
package main
import (
"flag"
"fmt"
"github.com/justfallingup/gocore/hw03-gosearch01/pkg/crawler"
"github.com/justfallingup/gocore/hw03-gosearch01/pkg/crawler/spider"
"log"
"strings"
)
func main() {
token := flag.String("s", "", "a word you're searching for")
flag.Parse()
urls := []string{
"https://go.dev",
"https://golang.org",
}
if *token != "" {
s := spider.New()
const depth = 2
var docs []crawler.Document
for _, u := range urls {
res, err := s.Scan(u, depth)
if err != nil {
log.Println("scanner error")
continue
}
docs = append(docs, res...)
}
fmt.Println("Search results:")
for _, d := range docs {
if strings.Contains(strings.ToLower(d.Title), strings.ToLower(*token)) {
fmt.Println(d.URL, d.Title)
}
}
}
}
|
package admin
import (
"fmt"
"net/http"
)
func (s *Service) Status(writer http.ResponseWriter, request *http.Request) {
writer.WriteHeader(http.StatusOK)
fmt.Println("test")
_, _ = writer.Write([]byte("I am alive"))
}
|
package qzxing
/*
#cgo CPPFLAGS: -DQZXING_QML -I ${SRCDIR}/qzxing/src/zxing
#cgo darwin,amd64,!ios LDFLAGS: -L ${SRCDIR}/qzxing/src/darwin
#cgo linux,amd64 LDFLAGS: -L ${SRCDIR}/qzxing/src/linux
#cgo windows,amd64 LDFLAGS: -L ${SRCDIR}/qzxing/src/windows
#cgo ios LDFLAGS: -L ${SRCDIR}/qzxing/src/ios
#cgo android,arm LDFLAGS: -L ${SRCDIR}/qzxing/src/android
#cgo android,386 LDFLAGS: -L ${SRCDIR}/qzxing/src/android_emulator
#cgo LDFLAGS: -lQZXing
#include "qzxing.h"
*/
import "C"
import (
"github.com/therecipe/qt/core"
"github.com/therecipe/qt/qml"
)
type stub struct{ core.QObject } //needed to make QtCore available to qzxing/src/QZXing.h
func RegisterQMLTypes() {
C.QZXing_registerQMLTypes()
}
func RegisterQMLImageProvider(engine qml.QQmlEngine_ITF) {
C.QZXing_registerQMLImageProvider(engine.QQmlEngine_PTR().Pointer())
}
|
package main
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"strings"
"time"
)
//card struct
type Card struct {
suit, value string
}
//deck slice
type deck []Card
func newDeck() deck {
//create a list of playing cards
//essentially an array of strings
cards := deck{}
//we will use an unconvetional way to create a basic deck
card_suits := []string{"Spades", "Diamonds", "Clubs", "Hearts"}
card_values := []string{"Ace", "Two", "Three", "Four"}
for _, suit := range card_suits {
for _, value := range card_values {
cards = append(cards, Card{suit, value})
}
}
return cards
}
func deal(d deck, handsize int) (deck, deck) {
return d[:handsize], d[handsize:]
}
func (d deck) print() {
for i, c := range d {
fmt.Printf("%v %s of %s\n", i, c.value, c.suit)
}
}
func (d deck) toString() string {
s := []string{}
for _, c := range d {
s = append(s, c.value+" of "+c.suit)
}
return strings.Join([]string(s), ",")
}
func (d deck) deckToFile(filename string) error {
return ioutil.WriteFile(filename, []byte(d.toString()), 0666)
}
func deckFromFile(filename string) deck {
content, err := ioutil.ReadFile(filename)
if err != nil {
//log the error and exit
fmt.Println("Error:", err)
os.Exit(1)
}
cards := deck{}
s := strings.Split(string(content), ",")
for _, c := range s {
card_str := strings.Split(c, " of ")
cards = append(cards, Card{card_str[1], card_str[0]})
}
return cards
}
func (d deck) shuffle() {
//deck shuffle
//lets create a source of randomness with time.Now().UnixNano() as our seed
source := rand.NewSource(time.Now().UnixNano())
//lets now create an rng
r := rand.New(source)
for i := range d {
rint := r.Intn(len(d) - 1)
d[i], d[rint] = d[rint], d[i]
}
}
|
package golden
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path/filepath"
"regexp"
"strings"
"testing"
"github.com/fatih/color"
"github.com/pmezard/go-difflib/difflib"
)
var (
// Extension that is added to the name of the input file to identify the
// matching golden file.
Extension = ".golden"
// BasePath is put in front of paths passed to any of the Dir* functions.
BasePath = "."
// ChannelSize used by Dir() is arbitrary ┐( ̄ヘ ̄)┌
ChannelSize = 32
)
// Dir returns a Case channel from a given directory.
//
// See ChannelSize for the channel size to be used.
// Any errors while walking the file system will fail and are not ignored.
func Dir(t *testing.T, path string) <-chan Case {
path = filepath.Join(BasePath, path)
ch := make(chan Case)
walker := func(path string, info os.FileInfo, err error) error {
must(t, err)
if info.Mode().IsRegular() && !strings.HasSuffix(path, Extension) {
ch <- NewCase(t, path)
}
return nil
}
go func() {
must(t, filepath.Walk(path, walker))
close(ch)
}()
return ch
}
// TestDir calls fn with each Case in path. Each Case i is bound to a sub test
// named after the input file.
func TestDir(t *testing.T, path string, fn func(Case)) {
for tc := range Dir(t, path) {
tc.Test(fn)
}
}
// DirSlice returns a Case slice from a given directory.
//
// Any errors while walking the file system will fail and are not ignored.
func DirSlice(t *testing.T, path string) []Case {
sl := []Case{}
for c := range Dir(t, path) {
sl = append(sl, c)
}
return sl
}
// File provides read/write access to test files.
type File struct {
Case *Case // The case this file belongs to.
Path string
}
func newFile(c *Case, path string) File {
return File{c, path}
}
// Update the file by writing b to it.
func (f File) Update(b []byte) {
if f.Case.T != nil {
f.Case.T.Logf("updating golden file: %s", f.Path)
}
before := []byte{}
if f.Exists() {
before = f.Bytes()
}
f.Case.T.Log(diff(f.Case.T, before, b))
must(f.Case.T, ioutil.WriteFile(f.Path, b, 0644))
}
// Reader returns a ReadCloser.
//
// This is basically os.File: remember to call Close(), especially if you have
// many files or read them multiple times.
func (f File) Reader() io.ReadCloser {
fr, err := os.Open(f.Path)
must(f.Case.T, err)
return fr
}
// Bytes returns the content as a byte slice.
//
// It will fail when the file could not be read.
func (f File) Bytes() []byte {
b, err := ioutil.ReadFile(f.Path)
must(f.Case.T, err)
return b
}
// String returns content as a string.
//
// It will fail when the file could not be read.
func (f File) String() string {
return string(f.Bytes())
}
// Split the file into a string slice using separator sep.
func (f File) Split(sep string) []string {
pat := fmt.Sprintf("\r?\n{0,1}%s\r?\n{0,1}", regexp.QuoteMeta(sep))
re := regexp.MustCompile(pat)
return re.Split(f.String(), -1)
}
func (f File) Exists() bool {
_, err := os.Stat(f.Path)
return err == nil
}
// Case provides input and expected output for a single test case.
type Case struct {
In File
Out File
T *testing.T
}
// NewCase returns a Case based on the given input file.
func NewCase(t *testing.T, path string) Case {
c := Case{T: t}
c.In = newFile(&c, path)
c.Out = newFile(&c, path+Extension)
return c
}
// Diff the given actual string with the expected content of c.Out.
// Fails a test if contents are different.
func (c Case) Diff(actual string) {
exp := c.Out.Bytes()
act := []byte(actual)
if !bytes.Equal(exp, act) {
must(c.T, errors.New(diff(c.T, exp, act)))
}
}
// Test runs fn in a sub test named after the input file.
func (c Case) Test(fn func(Case)) {
c.T.Run(c.In.Path, func(t *testing.T) {
tc := c
tc.T = t
fn(tc)
})
}
func diff(t *testing.T, exp, act []byte) string {
context := 1
if testing.Verbose() {
context = 3
}
a := difflib.SplitLines(string(exp))
b := difflib.SplitLines(string(act))
ud := difflib.UnifiedDiff{
A: a,
B: b,
Context: context,
FromFile: "Expected",
ToFile: "Actual",
}
diff, err := difflib.GetUnifiedDiffString(ud)
lines := difflib.SplitLines(diff)
for i, line := range lines {
switch line[0] {
case '+':
line = color.GreenString("%s", line)
case '-':
line = color.RedString("%s", line)
case '@':
line = color.YellowString("%s", line)
}
lines[i] = line
}
must(t, err)
return fmt.Sprintf(
"Bytes/Lines: %+d/%+d\n%s",
len(act)-len(exp),
len(b)-len(a),
strings.Join(lines, ""),
)
}
// must call t.Error or log.Println
func must(t *testing.T, err error) {
if err == nil {
return
}
if t == nil {
log.Println(err)
}
t.Error(err)
}
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package osd for the Ceph OSDs.
package osd
import (
cephosd "github.com/rook/rook/pkg/ceph/osd"
"k8s.io/api/core/v1"
)
// StorageSpec CRD settings
type StorageSpec struct {
Nodes []Node `json:"nodes,omitempty"`
UseAllNodes bool `json:"useAllNodes,omitempty"`
Selection
Config
}
// Node specific CRD settings
type Node struct {
Name string `json:"name,omitempty"`
Devices []Device `json:"devices,omitempty"`
Resources v1.ResourceRequirements `json:"resources,omitempty"`
Selection
Config
}
// Device CRD settings
type Device struct {
Name string `json:"name,omitempty"`
}
// Directory CRD settings
type Directory struct {
Path string `json:"path,omitempty"`
}
// Selection CRD settings
type Selection struct {
// Whether to consume all the storage devices found on a machine
UseAllDevices *bool `json:"useAllDevices,omitempty"`
// A regular expression to allow more fine-grained selection of devices on nodes across the cluster
DeviceFilter string `json:"deviceFilter,omitempty"`
MetadataDevice string `json:"metadataDevice,omitempty"`
Directories []Directory `json:"directories,omitempty"`
}
// Config CRD settings
type Config struct {
StoreConfig cephosd.StoreConfig `json:"storeConfig,omitempty"`
Location string `json:"location,omitempty"`
}
|
/*
Write a function that takes an IP address and returns the domain name using PTR DNS records.
Example
get_domain("8.8.8.8") ➞ "dns.google"
get_domain("8.8.4.4") ➞ "dns.google"
Notes
You may want to import socket.
Don't cheat and just print the domain name, you need to make a real DNS request.
Return as a string.
*/
package main
import (
"fmt"
"net"
"strings"
)
func main() {
fmt.Println(domain("8.8.8.8"))
fmt.Println(domain("8.8.4.4"))
}
func domain(h string) string {
p, err := net.LookupAddr(h)
if err != nil {
return ""
}
return strings.TrimSuffix(p[0], ".")
}
|
package main
import (
"fmt"
"time"
)
func main() {
// START OMIT
i := 0
for {
i++
fmt.Printf("%d\n", i)
time.Sleep(100 * time.Millisecond)
}
// END OMIT
}
|
package queue
import (
"context"
"encoding/base64"
"github.com/go-redis/redis"
"github.com/pkg/errors"
)
// Ensure RedisAdapter implements Queue.
var _ Queue = (*RedisAdapter)(nil)
// NewRedisAdapter creates a new RedisAdapter.
func NewRedisAdapter(c *redis.Client) *RedisAdapter {
if c == nil {
panic("nil queue client")
}
return &RedisAdapter{
c: c,
unread: make(map[string][][]byte),
}
}
// RedisAdapter for a Redis client to implement the Queue interface.
type RedisAdapter struct {
c *redis.Client
unread map[string][][]byte
}
func bytesToString(b []byte) string {
return base64.StdEncoding.EncodeToString(b)
}
func stringToBytes(s string) ([]byte, error) {
//fmt.Println("STRING S IS...", s)
b, err := base64.StdEncoding.DecodeString(s)
//fmt.Printf("STRING S DECODED IS...%s -> %s\n", s, b)
if err != nil {
return nil, errors.Wrap(err, "unable to decode string into bytes")
}
return b, nil
}
// stringsToMultiBytes attempts to base64 decode all the strings in s.
//
// Invalid strings will be dropped.
//
// Only the first error encountered will be returned, if there is one.
func stringsToMultiBytes(s []string) ([][]byte, error) {
// TODO: Handle invalid strings somehow instead of dropping them.
out := make([][]byte, 0, len(s))
var err error
for _, v := range s {
b, e := stringToBytes(v)
if e != nil {
if err == nil {
err = errors.WithStack(e)
}
}
out = append(out, b)
}
return out, err
}
func bytesToInterfaces(b [][]byte) []interface{} {
out := make([]interface{}, len(b))
for i, v := range b {
out[i] = bytesToString(v)
}
return out
}
// Push pushes a number of messages to a queue queue.
//
// This function function is NOT thread-safe.
func (r *RedisAdapter) Push(ctx context.Context, channel string, data [][]byte) error {
// TODO: Handle message trace from ctx.
if len(data) == 0 {
return nil
}
client := r.c.WithContext(ctx)
messages := bytesToInterfaces(data)
err := client.RPush(channel, messages...).Err()
return errors.Wrapf(err, "error pushing to Redis list \"%s\"", channel)
}
// Pull pulls values from the queue in Redis.
//
// If there is an error, nil will be returned for the bytes. Subsequent calls
// will return the retrieved values, if there were any.
//
// This function is thread-safe.
func (r *RedisAdapter) Pull(ctx context.Context, channel string) ([]byte, error) {
// TODO: Handle message trace from ctx.
getUnread := func() []byte {
unread := r.unread[channel]
if len(unread) > 0 {
out := unread[0]
r.unread[channel] = unread[1:]
return out
}
return nil
}
if tmp := getUnread(); tmp != nil {
return tmp, nil
}
client := r.c.WithContext(ctx)
// Block forever since context cancellation can be used.
values, err := client.BLPop(0, channel).Result()
if err != nil {
return nil, errors.Wrapf(err, "error reading from Redis list \"%s\"", channel)
}
// Ignore the channel name.
if len(values) >= 1 {
values = values[1:]
}
b, err := stringsToMultiBytes(values)
r.unread[channel] = append(r.unread[channel], b...)
if err != nil {
return nil, errors.WithStack(err)
}
return getUnread(), nil
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
"strings"
)
func readdata(fname string) (lines []string) {
f, err := os.Open(fname)
if err != nil {
log.Fatalf("Error opening dataset '%s': %s", fname, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
lines = append(lines, strings.Trim(scanner.Text(), " "))
}
if err := scanner.Err(); err != nil {
fmt.Fprintln(os.Stderr, "reading standard input:", err)
}
return lines
}
func parsedata(lines []string) (cubes map[string]bool) {
cubes = make(map[string]bool)
corr := 0
for y, line := range lines {
for x, v := range line {
fmt.Printf("x:%d y:%d ? %s - %s\n", x, y, string(v), coord2String([]int{x - corr, y - corr, 0}))
if string(v) == active {
cubes[coord2String([]int{x - corr, y - corr, 0})] = true
} else {
cubes[coord2String([]int{x - corr, y - corr, 0})] = false
}
}
}
return cubes
}
|
package ilock
type IQueue interface {
/*
获取队列锁
timeSleep int 毫秒 设置每次获取队列的间隔时间
timeOut int64 毫秒 设置无法获取队列退出时间
成功返回nil
失败返回error
*/
Lock(timeSleep int, timeOut int64) error
/*
队列解锁
成功返回nil
失败返回error
*/
UnLock() error
/*
获取队列ID
返回值int64
*/
GetId() int64
/*
获取队列Key
返回值string
*/
GetKey() string
/*
获取锁的时效
返回值int64
*/
GetTimeOut() int64
}
|
package dushengchen
/*
question:
https://leetcode.com/problems/reverse-integer/
Submission:
https://leetcode.com/submissions/detail/231980096/
*/
func reverse(x int) int {
if x == 0 {
return 0
} else if x < 0 {
return -reverse(-x)
}
max := 1<<31 - 1
res := 0
for {
res = res*10 + x%10
x = x / 10
if res > max {
return 0
}
if x == 0 {
break
}
}
return res
}
|
package cinii
import (
"net/url"
"testing"
)
func TestSearchBooks(t *testing.T) {
q := url.Values{}
q.Set("q", "ソフトウェア")
q.Set("type", "0")
q.Set("sortorder", "3")
q.Set("count", "5")
resobj, err := client.SearchBooks(q)
if err != nil {
t.Error("Failed to get Response:", err)
return
}
if len(resobj.Graph[0].Items) != 5 {
t.Error("Wrong number of received articles, expected 5 but:", len(resobj.Graph[0].Items))
return
}
}
|
package utils
import (
"encoding/json"
"errors"
"fmt"
"github.com/shopspring/decimal"
"regexp"
"math"
"strconv"
"strings"
"time"
)
func ToString(x interface{}) string {
var v2 string
switch v := x.(type) {
case bool:
if true == x {
v2 = "1"
} else {
v2 = "0"
}
case int:
v2 = strconv.Itoa(v)
case int32:
v2 = strconv.Itoa(int(v))
case int64:
v2 = strconv.FormatInt(v, 10)
case float64:
v2 = strconv.FormatFloat(v, 'f', -1, 64)
case string:
v2 = v
case []byte:
v2 = string(v)
case map[string]interface{}:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case []map[string]interface{}:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case []string:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case nil:
v2 = ""
}
return v2
}
func ToStringNoPoint(x interface{}) string {
var v2 string
switch v := x.(type) {
case bool:
if true == x {
v2 = "1"
} else {
v2 = "0"
}
case int:
v2 = strconv.Itoa(v)
case int32:
v2 = strconv.Itoa(int(v))
case int64:
v2 = strconv.FormatInt(v, 10)
case float64:
v2 = RemoveZero(v)
case string:
v2 = v
case []byte:
v2 = string(v)
case map[string]interface{}:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case *map[string]interface{}:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case []map[string]interface{}:
v2b, _ := json.Marshal(v)
v2 = string(v2b)
case []string:
v2 = strings.Join(v, ",")
case []float64:
tmp := []string{}
for _, f := range v {
tmp = append(tmp, ToStringNoPoint(f))
}
v2 = ToJson(tmp)
case []interface{}:
tmp := []string{}
for _, v3 := range v {
tmp = append(tmp, ToStringNoPoint(v3))
}
v2 = ToJson(tmp)
case time.Time:
v2 = v.Format("2006-01-02 15:04:05")
case time.Duration:
v2 = v.String()
case nil:
v2 = ""
}
return strings.Trim(v2, " ")
}
func ToFloat64(x interface{}) float64 {
var v2 float64
switch v := x.(type) {
case bool:
if true == x {
v2 = 1
} else {
v2 = 0
}
case int:
v2 = float64(v * 1.0)
case int32:
v2 = float64(v * 1.0)
case int64:
v2 = float64(v * 1.0)
case float32:
v2 = float64(v)
case float64:
v2 = v
case string:
v2, _ = strconv.ParseFloat(v, 64)
case []byte:
v2, _ = strconv.ParseFloat(string(v), 64)
case nil:
v2 = 0.0
}
return v2
}
func ToFloat32(x interface{}) float32 {
var v2 float32
switch v := x.(type) {
case bool:
if true == x {
v2 = 1
} else {
v2 = 0
}
case int:
v2 = float32(v * 1.0)
case int32:
v2 = float32(v * 1.0)
case int64:
v2 = float32(v * 1.0)
case float32:
v2 = v
case float64:
v2 = float32(v)
case string:
vTmp, _ := strconv.ParseFloat(string(v), 64)
v2 = float32(vTmp)
case []byte:
vTmp, _ := strconv.ParseFloat(string(v), 64)
v2 = float32(vTmp)
case nil:
v2 = float32(0.0)
}
return v2
}
func ToInt(x interface{}) int {
var v2 int
switch v := x.(type) {
case bool:
if true == x {
v2 = 1
} else {
v2 = 0
}
case int:
v2 = v
case int32:
v2 = int(v)
case int64:
v2 = int(v)
case float32:
v2 = int(v)
case float64:
v2 = int(v)
case string:
v2, _ = strconv.Atoi(v)
case []byte:
v2, _ = strconv.Atoi(string(v))
case nil:
v2 = int(0)
}
return v2
}
func ToInt16(x interface{}) int16 {
var v2 int16
switch v := x.(type) {
case bool:
if true == x {
v2 = 1
} else {
v2 = 0
}
case int:
v2 = int16(v)
case int16:
v2 = int16(v)
case int32:
v2 = int16(v)
case int64:
v2 = int16(v)
case float32:
v2 = int16(v)
case float64:
v2 = int16(v)
case string:
t, _ := strconv.Atoi(v)
v2 = int16(t)
case []byte:
t, _ := strconv.Atoi(string(v))
v2 = int16(t)
case nil:
v2 = int16(0)
}
return v2
}
func ToUint16(x interface{}) uint16 {
var v2 uint16
switch v := x.(type) {
case bool:
if true == x {
v2 = 1
} else {
v2 = 0
}
case int:
v2 = uint16(v)
case int16:
v2 = uint16(v)
case int32:
v2 = uint16(v)
case int64:
v2 = uint16(v)
case float32:
v2 = uint16(v)
case float64:
v2 = uint16(v)
case string:
t, _ := strconv.Atoi(v)
v2 = uint16(t)
case []byte:
t, _ := strconv.Atoi(string(v))
v2 = uint16(t)
case nil:
v2 = uint16(0)
}
return v2
}
func ToInt64(x interface{}) int64 {
var v2 int64
switch v := x.(type) {
case bool:
if true == x {
v2 = int64(1)
} else {
v2 = 0
}
case int:
v2 = int64(v)
case int32:
v2 = int64(v)
case int64:
v2 = int64(v)
case float32:
v2 = int64(v)
case float64:
v2 = int64(v)
case []byte:
vTmp, _ := strconv.Atoi(string(v))
v2 = int64(vTmp)
case string:
vTmp, _ := strconv.Atoi(v)
v2 = int64(vTmp)
case nil:
v2 = int64(0)
}
return v2
}
func ToInt32(x interface{}) int32 {
var v2 int32
switch v := x.(type) {
case bool:
if true == x {
v2 = int32(1)
} else {
v2 = int32(0)
}
case int:
v2 = int32(v)
case int32:
v2 = int32(v)
case int64:
v2 = int32(v)
case float32:
v2 = int32(v)
case float64:
v2 = int32(v)
case string:
vTmp, _ := strconv.Atoi(v)
v2 = int32(vTmp)
case []byte:
vTmp, _ := strconv.Atoi(string(v))
v2 = int32(vTmp)
case nil:
v2 = int32(0)
}
return v2
}
func ToBool(x interface{}) bool {
v2 := false
switch v := x.(type) {
case bool:
v2 = v
case int:
v2 = v == 1
case int32:
v2 = v == 1
case int64:
v2 = v == 1
case float32:
v2 = v == 1
case float64:
v2 = v == 1
case string:
if string(v) == "true" || v == "1" || v == fmt.Sprint(0x01) {
v2 = true
}
case []byte:
if string(v) == "true" || string(v) == "1" || string(v) == fmt.Sprint(0x01) {
v2 = true
}
case nil:
v2 = false
}
return v2
}
func NewUUIDNoSplit() string {
return strings.Replace(NewUUID(), "-", "", -1)
}
func ToStringList(r []interface{}) []string {
var lst []string
for _, v := range r {
lst = append(lst, v.(string))
}
return lst
}
func ToUUid(x interface{}) string {
var v2 string
switch v := x.(type) {
case bool:
if true == x {
v2 = "1"
} else {
v2 = "0"
}
case int:
v2 = strconv.Itoa(v)
case int32:
v2 = strconv.Itoa(int(v))
case int64:
v2 = strconv.FormatInt(v, 10)
case float64:
v2 = strconv.FormatFloat(v, 'f', 2, 64)
case string:
v2 = v
case []byte:
v2 = string(v)
case nil:
v2 = ""
}
if v2 == "" {
return "00000000-0000-0000-0000-000000000000"
} else {
return v2
}
}
func RemoveZero(num float64) string {
numStr := ToString(num)
lenNumStr := len(numStr)
j := lenNumStr
isRemove := false
for i := lenNumStr; i > 0; i-- {
if numStr[i-1:i] == "." {
isRemove = true
}
if numStr[i-1:i] == "." || numStr[i-1:i] == "0" {
j--
} else {
break
}
}
if len(numStr[:j]) <= 0 {
return "0"
}
if isRemove {
return numStr[:j]
}
return numStr
}
func DeSensitive(code string) string {
lenCode := len(code)
if lenCode < 7 {
return code
}
return code[:3] + strings.Repeat("*", lenCode-7) + code[(lenCode-4):]
}
/**
* 四舍五入
*/
func RoundingOff(f float64, n int) float64 {
pow10_n := math.Pow10(n)
return math.Trunc((f+0.5/pow10_n)*pow10_n) / pow10_n
}
/**
* 去掉空格和开头的0
* XXX 一定要注意,如果字串中间存在空格,空格后面的值会抹掉
*/
func CleanSpace(str0 string) string {
var str1 string
fGot := false
for _, v := range str0 {
v0 := string(v)
if !fGot && (v0 == "0" || v0 == " ") {
continue
}
if !fGot && v0 != "0" && v0 != " " {
fGot = true
}
if fGot && v0 == " " {
break
}
str1 += v0
}
return str1
}
func CleanSpaceTail(str0 string) string {
if str0 == "" {
return ""
}
var str1 string
lenStr := len(str0)
if lenStr <= 2 {
if string(str0[0]) != " " {
str1 += string(str0[0])
}
if string(str0[1]) != " " {
str1 += string(str0[1])
}
return str1
}
i := 0
for i = lenStr; i > 1; i-- {
//log.Printf("str0=[%v:%v:%v]", str0, str0[i-1:i], i)
v0 := string(str0[i-1 : i])
if v0 != " " {
break
}
}
return str0[:i]
}
func ToStringNoPointNCleanSpace(var0 interface{}, defValue ...string) string {
var1 := CleanSpace(ToStringNoPoint(var0))
if var1 == "" && len(defValue) > 0 {
return defValue[0]
}
return var1
}
func ToStringNoPointNCleanSpaceTail(var0 interface{}, defValue ...string) string {
var1 := CleanSpaceTail(ToStringNoPoint(var0))
if var1 == "" && len(defValue) > 0 {
return defValue[0]
}
return var1
}
func IsJson(jsonStr string) bool {
return json.Valid([]byte(jsonStr))
}
func Float64ToStringFixPoint(v float64, bit int) string {
return strconv.FormatFloat(v, 'f', bit, 64)
}
func Float64ToPrice(v float64) string {
return Float64ToStringFixPoint(v, 2)
}
func Int64ToPrice(v int64) string {
return fmt.Sprintf("%d.%d", v/100, v%100)
}
func StringToPrice(v string) string {
v2 := ToInt64(v)
return fmt.Sprintf("%d.%02d", v2/100, v2%100)
}
func Float64Mul(a, b float64) decimal.Decimal {
f1 := decimal.NewFromFloat(a)
f2 := decimal.NewFromFloat(b)
return f1.Mul(f2)
}
func RandomIp() string {
ip1 := Random(1, 255)
ip2 := Random(0, 255)
ip3 := Random(0, 255)
ip4 := Random(1, 255)
return fmt.Sprintf("%d.%d.%d.%d", ip1, ip2, ip3, ip4)
}
func Json2Map(jsonB interface{}) map[string]interface{} {
m := map[string]interface{}{}
var jsonB2 []byte
switch v := jsonB.(type) {
case string:
jsonB2 = []byte(v)
case []byte:
jsonB2 = v
}
err := json.Unmarshal(jsonB2, &m)
if err != nil {
fmt.Printf("err=[%v]\n", err)
return nil
}
return m
}
func Json2StrList(jsonB interface{}) []string {
m := []string{}
var jsonB2 []byte
switch v := jsonB.(type) {
case string:
jsonB2 = []byte(v)
case []byte:
jsonB2 = v
}
if len(jsonB2) <= 0 {
return m
}
err := json.Unmarshal(jsonB2, &m)
if err != nil {
fmt.Printf("err=[%v]\n", err)
return nil
}
return m
}
func ToCapExt(a, sep string) (c string) {
b := strings.Split(a, sep)
for _, v := range b {
c = fmt.Sprintf("%s%s%s", c, strings.ToUpper(v[:1]), v[1:])
}
return c
}
func ToCapExtNSkip(a, sep string, isCapLower bool) (c string) {
b := strings.Split(a, sep)
for _, v := range b {
c = fmt.Sprintf("%s%s%s", c, strings.ToUpper(v[:1]), v[1:])
}
var c2 string
if isCapLower {
c2 = strings.ToLower(string(c[:1])) + c[1:]
} else {
c2 = c
}
return c2
}
func ToJson(m interface{}) string {
b, err := json.Marshal(m)
if err != nil {
fmt.Printf("toJsonErr=[%v]", err)
return ""
}
return string(b)
}
func ToJsonNotChange(m interface{}) string {
b, err := json.Marshal(m)
if err != nil {
fmt.Printf("toJsonErr=[%v]", err)
return ""
}
return strings.Replace(string(b), "\\u0026", "&", -1)
}
func UnderLineToCamel(field string) string {
reg := regexp.MustCompile("_")
found := reg.FindAllStringIndex(field, -1)
var extStr string
lenFound := len(found)
if lenFound > 0 {
for k2, v2 := range found {
if k2+1 < lenFound {
extStr += strings.ToUpper(string(field[v2[1]:v2[1]+1])) + field[v2[1]+1:found[k2+1][0]]
} else {
extStr += strings.ToUpper(string(field[v2[1]:v2[1]+1])) + field[v2[1]+1:]
}
}
return strings.ToUpper(string(field[0])) + field[1:found[0][0]] + extStr
} else {
field = strings.Replace(field, `"`, "", -1)
field = strings.Replace(field, "`", "", -1)
return strings.ToUpper(string(field[0])) + field[1:]
}
}
func UnderLineToCamel1stLower(field string) string {
reg := regexp.MustCompile("_")
found := reg.FindAllStringIndex(field, -1)
var extStr string
lenFound := len(found)
if lenFound > 0 {
for k2, v2 := range found {
if k2+1 < lenFound {
extStr += strings.ToUpper(string(field[v2[1]:v2[1]+1])) + field[v2[1]+1:found[k2+1][0]]
} else {
extStr += strings.ToUpper(string(field[v2[1]:v2[1]+1])) + field[v2[1]+1:]
}
}
return strings.ToLower(string(field[0])) + field[1:found[0][0]] + extStr
} else {
field = strings.Replace(field, `"`, "", -1)
field = strings.Replace(field, "`", "", -1)
return strings.ToLower(string(field[0])) + field[1:]
}
}
func ForShort(msg string, n int) string {
if len(msg) < n {
return msg
}
return msg[:n]
}
func StringToFloat64(s string) float64 {
fromString, _ := decimal.NewFromString(s)
f, _ := fromString.Float64()
return f
}
func Float64ToString(f float64) string {
return decimal.NewFromFloat(f).String()
}
func FLoatCmp(a, b interface{}) (int, error) {
switch v := a.(type) {
case float32:
switch v2 := b.(type) {
case float32:
return decimal.NewFromFloat32(v).Cmp(decimal.NewFromFloat32(v2)), nil
case float64:
return decimal.NewFromFloat32(v).Cmp(decimal.NewFromFloat(v2)), nil
}
case float64:
switch v2 := b.(type) {
case float32:
return decimal.NewFromFloat(v).Cmp(decimal.NewFromFloat32(v2)), nil
case float64:
return decimal.NewFromFloat(v).Cmp(decimal.NewFromFloat(v2)), nil
}
default:
return 1, errors.New("not support type")
}
return 1, errors.New("not support type")
}
|
package enemy
// Enemy is an opponent to the player and will be fed
// into the battle manager after configuration.
type Enemy struct {
}
|
package bit_map
import (
"github.com/stretchr/testify/assert"
"testing"
)
func TestByte2String(t *testing.T) {
t.Log(byte2String(byte(21)))
}
func TestBitMap_Set(t *testing.T) {
bm := &BitMap{
Size: 20,
}
bm.Init()
t.Logf("start: [%s]", bm)
err := bm.Set(10)
assert.Nil(t, err)
err = bm.Set(13)
assert.Nil(t, err)
err = bm.Set(99)
assert.NotNil(t, err)
exists := bm.Exists(10)
assert.True(t, exists)
exists = bm.Exists(11)
assert.False(t, exists)
exists = bm.Exists(13)
assert.True(t, exists)
t.Logf("end: [%s]", bm)
}
|
package connector
import (
"errors"
"net/http"
"net/url"
"strings"
"github.com/mayflower/docker-ls/lib/auth"
)
type tokenAuthConnector struct {
cfg Config
httpClient *http.Client
authenticator auth.Authenticator
semaphore semaphore
tokenCache *tokenCache
stat *statistics
}
func (r *tokenAuthConnector) Delete(url *url.URL, headers map[string]string, hint string) (*http.Response, error) {
return r.Request("DELETE", url, headers, hint)
}
func (r *tokenAuthConnector) Get(url *url.URL, headers map[string]string, hint string) (*http.Response, error) {
return r.Request("GET", url, headers, hint)
}
func (r *tokenAuthConnector) Request(
method string,
url *url.URL,
headers map[string]string,
hint string,
) (response *http.Response, err error) {
r.semaphore.Lock()
defer r.semaphore.Unlock()
r.stat.Request()
var token auth.Token
request, err := http.NewRequest(method, url.String(), strings.NewReader(""))
if err != nil {
return
}
for header, value := range headers {
request.Header.Set(header, value)
}
if hint != "" {
if token = r.tokenCache.Get(hint); token != nil {
r.stat.CacheHitAtApiLevel()
} else {
r.stat.CacheMissAtApiLevel()
}
}
resp, err := r.attemptRequestWithToken(request, token)
if err != nil || resp.StatusCode != http.StatusUnauthorized {
response = resp
return
}
if token != nil {
r.stat.CacheFailAtApiLevel()
}
if resp.Close {
resp.Body.Close()
}
challenge, err := auth.ParseChallenge(resp.Header.Get("www-authenticate"))
if err != nil {
err = errors.New(err.Error() +
" Are you shure that you are using the correct (token) auth scheme?")
return
}
token, err = r.authenticator.Authenticate(challenge, false)
if err != nil {
return
}
if token != nil {
if token.Fresh() {
r.stat.CacheMissAtAuthLevel()
} else {
r.stat.CacheHitAtAuthLevel()
}
}
response, err = r.attemptRequestWithToken(request, token)
if err == nil &&
response.StatusCode == http.StatusUnauthorized &&
!token.Fresh() {
r.stat.CacheFailAtAuthLevel()
token, err = r.authenticator.Authenticate(challenge, true)
if err == nil {
return
}
response, err = r.attemptRequestWithToken(request, token)
}
if hint != "" && err == nil && response.StatusCode != http.StatusUnauthorized {
r.tokenCache.Set(hint, token)
}
return
}
func (r *tokenAuthConnector) attemptRequestWithToken(request *http.Request, token auth.Token) (*http.Response, error) {
if token != nil {
request.Header.Set("Authorization", "Bearer "+token.Value())
}
return r.httpClient.Do(request)
}
func (r *tokenAuthConnector) GetStatistics() Statistics {
return r.stat
}
func NewTokenAuthConnector(cfg Config) Connector {
connector := tokenAuthConnector{
cfg: cfg,
httpClient: createHttpClient(cfg),
semaphore: newSemaphore(cfg.MaxConcurrentRequests()),
tokenCache: newTokenCache(),
stat: new(statistics),
}
connector.authenticator = auth.NewAuthenticator(connector.httpClient, cfg.Credentials())
return &connector
}
|
package PDU
import "github.com/andrewz1/gosmpp/Data"
type BindTransmitterResp struct {
BindResponse
}
func NewBindTransmitterResp() *BindTransmitterResp {
a := &BindTransmitterResp{}
a.Construct()
return a
}
func (c *BindTransmitterResp) Construct() {
defer c.SetRealReference(c)
c.BindResponse.Construct()
c.SetCommandId(Data.BIND_TRANSMITTER_RESP)
}
func (c *BindTransmitterResp) GetInstance() (IPDU, error) {
return NewBindTransmitterResp(), nil
}
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package multicluster
import (
"context"
"fmt"
"time"
pkgmulticluster "github.com/kubevela/pkg/multicluster"
"github.com/oam-dev/cluster-gateway/pkg/apis/cluster/v1alpha1"
clustercommon "github.com/oam-dev/cluster-gateway/pkg/common"
errors2 "github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/oam-dev/kubevela/pkg/oam"
"github.com/oam-dev/kubevela/pkg/utils/common"
errors3 "github.com/oam-dev/kubevela/pkg/utils/errors"
)
const (
// ClusterLocalName specifies the local cluster
ClusterLocalName = pkgmulticluster.Local
)
var (
// ClusterGatewaySecretNamespace the namespace where cluster-gateway secret locates
ClusterGatewaySecretNamespace = "vela-system"
)
// ClusterNameInContext extract cluster name from context
func ClusterNameInContext(ctx context.Context) string {
cluster, _ := pkgmulticluster.ClusterFrom(ctx)
return cluster
}
// ContextWithClusterName create context with multi-cluster by cluster name
func ContextWithClusterName(ctx context.Context, clusterName string) context.Context {
return pkgmulticluster.WithCluster(ctx, clusterName)
}
// ContextInLocalCluster create context in local cluster
func ContextInLocalCluster(ctx context.Context) context.Context {
return pkgmulticluster.WithCluster(ctx, ClusterLocalName)
}
// ResourcesWithClusterName set cluster name for resources
func ResourcesWithClusterName(clusterName string, objs ...*unstructured.Unstructured) []*unstructured.Unstructured {
var _objs []*unstructured.Unstructured
for _, obj := range objs {
if obj != nil {
oam.SetClusterIfEmpty(obj, clusterName)
_objs = append(_objs, obj)
}
}
return _objs
}
// GetClusterGatewayService get cluster gateway backend service reference
// if service is ready, service is returned and no error is returned
// if service exists but is not ready, both service and error are returned
// if service does not exist, only error is returned
func GetClusterGatewayService(ctx context.Context, c client.Client) (*apiregistrationv1.ServiceReference, error) {
gv := v1alpha1.SchemeGroupVersion
apiService := &apiregistrationv1.APIService{}
apiServiceName := gv.Version + "." + gv.Group
if err := c.Get(ctx, types.NamespacedName{Name: apiServiceName}, apiService); err != nil {
if errors.IsNotFound(err) {
return nil, fmt.Errorf("ClusterGateway APIService %s is not found", apiServiceName)
}
return nil, errors2.Wrapf(err, "failed to get ClusterGateway APIService %s", apiServiceName)
}
if apiService.Spec.Service == nil {
return nil, fmt.Errorf("ClusterGateway APIService should use the service exposed by dedicated apiserver instead of being handled locally")
}
svc := apiService.Spec.Service
status := apiregistrationv1.ConditionUnknown
for _, condition := range apiService.Status.Conditions {
if condition.Type == apiregistrationv1.Available {
status = condition.Status
}
}
if status == apiregistrationv1.ConditionTrue {
return svc, nil
}
return svc, fmt.Errorf("ClusterGateway APIService (%s/%s:%d) is not ready, current status: %s", svc.Namespace, svc.Name, svc.Port, status)
}
// WaitUntilClusterGatewayReady wait cluster gateway service to be ready to serve
func WaitUntilClusterGatewayReady(ctx context.Context, c client.Client, maxRetry int, interval time.Duration) (svc *apiregistrationv1.ServiceReference, err error) {
for i := 0; i < maxRetry; i++ {
if svc, err = GetClusterGatewayService(ctx, c); err != nil {
klog.Infof("waiting for cluster gateway service: %v", err)
time.Sleep(interval)
} else {
return
}
}
return nil, errors2.Wrapf(err, "failed to wait cluster gateway service (retry=%d)", maxRetry)
}
// Initialize prepare multicluster environment by checking cluster gateway service in clusters and hack rest config to use cluster gateway
// if cluster gateway service is not ready, it will wait up to 5 minutes
func Initialize(restConfig *rest.Config, autoUpgrade bool) (client.Client, error) {
c, err := client.New(restConfig, client.Options{Scheme: common.Scheme})
if err != nil {
return nil, errors2.Wrapf(err, "unable to get client to find cluster gateway service")
}
svc, err := WaitUntilClusterGatewayReady(context.Background(), c, 60, 5*time.Second)
if err != nil {
return nil, ErrDetectClusterGateway
}
ClusterGatewaySecretNamespace = svc.Namespace
if autoUpgrade {
if err = UpgradeExistingClusterSecret(context.Background(), c); err != nil {
// this error do not affect the running of current version
klog.ErrorS(err, "error encountered while grading existing cluster secret to the latest version")
}
}
return c, nil
}
// UpgradeExistingClusterSecret upgrade outdated cluster secrets in v1.1.1 to latest
func UpgradeExistingClusterSecret(ctx context.Context, c client.Client) error {
const outdatedClusterCredentialLabelKey = "cluster.core.oam.dev/cluster-credential"
secrets := &v1.SecretList{}
if err := c.List(ctx, secrets, client.InNamespace(ClusterGatewaySecretNamespace), client.HasLabels{outdatedClusterCredentialLabelKey}); err != nil {
if err != nil {
return errors2.Wrapf(err, "failed to find outdated cluster secrets to do upgrade")
}
}
errs := errors3.ErrorList{}
for _, item := range secrets.Items {
credType := item.Labels[clustercommon.LabelKeyClusterCredentialType]
if credType == "" && item.Type == v1.SecretTypeTLS {
item.Labels[clustercommon.LabelKeyClusterCredentialType] = string(v1alpha1.CredentialTypeX509Certificate)
if err := c.Update(ctx, item.DeepCopy()); err != nil {
errs = append(errs, errors2.Wrapf(err, "failed to update outdated secret %s", item.Name))
}
}
}
if errs.HasError() {
return errs
}
return nil
}
// ListExistingClusterSecrets list existing cluster secrets
func ListExistingClusterSecrets(ctx context.Context, c client.Client) ([]v1.Secret, error) {
secrets := &v1.SecretList{}
if err := c.List(ctx, secrets, client.InNamespace(ClusterGatewaySecretNamespace), client.HasLabels{clustercommon.LabelKeyClusterCredentialType}); err != nil {
return nil, errors2.Wrapf(err, "failed to list cluster secrets")
}
return secrets.Items, nil
}
|
package ignite
import (
"fmt"
"net"
"path/filepath"
"github.com/weaveworks/footloose/pkg/config"
"github.com/weaveworks/footloose/pkg/exec"
)
const (
IgniteName = "ignite"
)
// This offset is incremented for each port so we avoid
// duplicate port bindings (and hopefully port collisions).
var portOffset uint16
// Create creates a container with "docker create", with some error handling
// it will return the ID of the created container if any, even on error
func Create(name string, spec *config.Machine, pubKeyPath string) (id string, err error) {
runArgs := []string{
"run",
spec.Image,
fmt.Sprintf("--name=%s", name),
fmt.Sprintf("--cpus=%d", spec.IgniteConfig().CPUs),
fmt.Sprintf("--memory=%s", spec.IgniteConfig().Memory),
fmt.Sprintf("--size=%s", spec.IgniteConfig().Disk),
fmt.Sprintf("--kernel-image=%s", spec.IgniteConfig().Kernel),
fmt.Sprintf("--ssh=%s", pubKeyPath),
}
copyFiles := spec.IgniteConfig().CopyFiles
if copyFiles == nil {
copyFiles = make(map[string]string)
}
for _, v := range setupCopyFiles(copyFiles) {
runArgs = append(runArgs, v)
}
for _, mapping := range spec.PortMappings {
if mapping.HostPort == 0 {
// If not defined, set the host port to a random free ephemeral port
var err error
if mapping.HostPort, err = freePort(); err != nil {
return "", err
}
} else {
// If defined, apply an offset so all VMs won't use the same port
mapping.HostPort += portOffset
portOffset++
}
runArgs = append(runArgs, fmt.Sprintf("--ports=%d:%d", int(mapping.HostPort), mapping.ContainerPort))
}
_, err = exec.ExecuteCommand(execName, runArgs...)
return "", err
}
func setupCopyFiles(copyFiles map[string]string) []string {
ret := []string{}
for k, v := range copyFiles {
s := fmt.Sprintf("--copy-files=%s:%s", toAbs(k), v)
ret = append(ret, s)
}
return ret
}
func toAbs(p string) string {
if ap, err := filepath.Abs(p); err == nil {
return ap
}
// if Abs reports an error just return the original path 'p'
return p
}
func IsCreated(name string) bool {
_, err := exec.ExecuteCommand(execName, "logs", name)
if err != nil {
return false
}
return true
}
// freePort requests a free/open ephemeral port from the kernel
// Heavily inspired by https://github.com/phayes/freeport/blob/master/freeport.go
func freePort() (uint16, error) {
addr, err := net.ResolveTCPAddr("tcp", "localhost:0")
if err != nil {
return 0, err
}
l, err := net.ListenTCP("tcp", addr)
if err != nil {
return 0, err
}
defer l.Close()
return uint16(l.Addr().(*net.TCPAddr).Port), nil
}
|
/*
Copyright 2020 Humio https://humio.com
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubernetes
import (
"context"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// ListPersistentVolumeClaims grabs the list of all persistent volume claims associated to a an instance of HumioCluster
func ListPersistentVolumeClaims(ctx context.Context, c client.Client, humioClusterNamespace string, matchingLabels client.MatchingLabels) ([]corev1.PersistentVolumeClaim, error) {
var foundPersistentVolumeClaimList corev1.PersistentVolumeClaimList
err := c.List(ctx, &foundPersistentVolumeClaimList, client.InNamespace(humioClusterNamespace), matchingLabels)
if err != nil {
return nil, err
}
return foundPersistentVolumeClaimList.Items, nil
}
|
package client
import (
"io/ioutil"
"net/http"
"github.com/devspace-cloud/devspace/pkg/devspace/cloud/token"
"github.com/pkg/errors"
)
// TokenEndpoint is the endpoint where to get a token from
const TokenEndpoint = "/auth/token"
// GetToken returns a valid access token to the provider
func (c *client) GetToken() (string, error) {
if c.accessKey == "" {
return "", errors.New("Provider has no key specified")
}
if c.token != "" && token.IsTokenValid(c.token) {
return c.token, nil
}
resp, err := http.Get(c.host + TokenEndpoint + "?key=" + c.accessKey)
if err != nil {
return "", errors.Wrap(err, "token request")
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", errors.Wrap(err, "read request body")
}
if resp.StatusCode != http.StatusOK {
return "", errors.Errorf("Error retrieving token: Code %v => %s. Try to relogin with 'devspace login'", resp.StatusCode, string(body))
}
c.token = string(body)
if token.IsTokenValid(c.token) == false {
return "", errors.New("Received invalid token from provider")
}
err = c.saveToken()
if err != nil {
return "", errors.Wrap(err, "token save")
}
return c.token, nil
}
func (c *client) saveToken() error {
providerConfig, err := c.loader.Load()
if err != nil {
return err
}
for idx, provider := range providerConfig.Providers {
if provider.Name == c.provider {
providerConfig.Providers[idx].Token = c.token
return c.loader.Save(providerConfig)
}
}
return errors.Errorf("Couldn't find provider %s", c.provider)
}
|
/*
Copyright (c) 2014, Thomas Lingefelt <thomasrling@gmail.com>
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
StringSet - Maintaining a unique set of strings
A StringSet is just a map[string]bool so things like len() will work on it.
Example
ss := stringset.New()
ss.Add("This", "is", "a", "test")
if ss.Has("test") {
fmt.Println("TESTING!")
}
*/
package stringset
type StringSet map[string]bool
func FromSlice(strs []string) StringSet {
ss := New()
ss.Add(strs...)
return ss
}
func New() StringSet {
return make(StringSet)
}
// Collect the keys in the map and return them as a slice.
func (ss StringSet) Slice() []string {
slice := make([]string, len(ss), len(ss))
i := 0
for k, _ := range ss {
slice[i] = k
i++
}
return slice
}
// Check if a string is in the set.
func (ss StringSet) Has(str string) bool {
_, ok := ss[str]
return ok
}
// Add strings to the set.
func (ss StringSet) Add(vs... string) {
for _, v := range vs {
ss[v] = true
}
}
// Delete strings from the set
func (ss StringSet) Del(str string) {
delete(ss, str)
}
|
package ctree
import (
"fmt"
"strings"
"testing"
)
// Testing structure
type mys struct {
s string
}
func (m mys) String() string {
return m.s
}
func TestExample(t *testing.T) {
r := &mys{}
ct := NewTree("T", r)
a := mys{"a"}
ct.Add(r, &a)
b := mys{"b"}
c := mys{"c"}
ct.Add(&a, &b)
ct.Add(&a, &c)
ct.Add(&a, &c) // ignored as c already in the tree
ct.Walk(p)
fmt.Println()
slice := &myslice{s: []int{1, 2, 3, 4}}
BuildTree_MutableNodes("T", &mys{}).
Add(mys{"a"}).Down().
/**/ Add(&mys{"b"}).
/**/ Add(mys{"c"}).
Add(slice).
Build().Walk(p)
}
type myslice struct {
s []int
}
func (m myslice) String() string {
return fmt.Sprintf("%v", m.s)
}
// Function given to the walker for printing nodes
func p(d int, n INode) bool {
s := strings.Repeat(" ", d)
// fmt.Println( reflect.TypeOf( n ) )
switch v := n.(type) {
case *mys:
fmt.Printf("1)%s%v\n", s, v.s)
default:
fmt.Printf("*)%s%v - %T\n", s, n, n)
}
return true
}
func TestSExpr(t *testing.T) {
r := &mys{"root"}
e := &mys{"="}
p := &mys{"+"}
o := &mys{"1"}
two := &mys{"2"}
tests := []struct {
name string
input *ctree
expected string
}{
{"simple", BuildTree("", r).Add(e).Down().Add(p).Add(o).Add(two).Up().Build().(*ctree), " (= (+ 1 2))"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rec := tt.input.SExpr(tt.input.Children(tt.input.Root())[0])
if tt.expected != rec {
t.Errorf(`
Expected %s
Received %s
`, tt.expected, rec)
}
})
}
}
|
package model
type UserModel struct {
Id int
Email string
Name string
Value int
} |
package flow
import (
"encoding/json"
"testing"
)
func TestFlowJSON(t *testing.T) {
str1 := "code token"
flow, _ := JudgeByResponseType(str1)
b, _ := json.Marshal(flow)
actual := string(b)
expected := `{"type":"hybrid","require_access_token":true,"require_id_token":false}`
if actual != expected {
t.Errorf("flow JSON:\n - got: %v\n - want: %v\n", actual, expected)
}
var f Flow
json.Unmarshal([]byte(expected), &f)
if f.Type != Hybrid {
t.Errorf("'code token' should be hybrid: %v", f)
}
if !f.RequireAccessToken {
t.Error("'code token' requires access token")
}
if f.RequireIdToken {
t.Error("'code token' doesn't requires id_token")
}
}
func TestFlow(t *testing.T) {
_, err := JudgeByResponseType("invalid code")
if err == nil {
t.Error("invalid repsonse_type should fail")
}
str1 := "code token"
flow, err := JudgeByResponseType(str1)
if err != nil {
t.Error("failed to judge flow")
}
if flow.Type != Hybrid {
t.Errorf("'code token' should be hybrid")
}
if !flow.RequireAccessToken {
t.Error("'code token' requires access token")
}
if flow.RequireIdToken {
t.Error("'code token' doesn't requires id_token")
}
str2 := "code"
flow, err = JudgeByResponseType(str2)
if err != nil {
t.Error("failed to judge flow")
}
if flow.Type != AuthorizationCode {
t.Errorf("'code token' should be hybrid")
}
if flow.RequireAccessToken {
t.Error("'code token' requires access token")
}
if flow.RequireIdToken {
t.Error("'code token' doesn't requires id_token")
}
}
|
package client
import "fmt"
func wrapError(customMsg string, originalError error) error {
return fmt.Errorf("%s : %v", customMsg, originalError)
}
|
package sqls
// DelResults is SQL.
const DelResults = `
DELETE FROM results
WHERE
competition_id = ?
AND user_id = ?
`
|
package postal_test
import (
"encoding/json"
"errors"
"github.com/cloudfoundry-incubator/notifications/cf"
"github.com/cloudfoundry-incubator/notifications/fakes"
"github.com/cloudfoundry-incubator/notifications/postal"
"github.com/pivotal-cf/uaa-sso-golang/uaa"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("UAA Recipe", func() {
var uaaRecipe postal.UserRecipe
var options postal.Options
var tokenLoader *fakes.TokenLoader
var userLoader *fakes.UserLoader
var templatesLoader *fakes.TemplatesLoader
var mailer *fakes.Mailer
var clientID string
var receiptsRepo *fakes.ReceiptsRepo
var conn *fakes.DBConn
BeforeEach(func() {
clientID = "mister-client"
tokenHeader := map[string]interface{}{
"alg": "FAST",
}
tokenClaims := map[string]interface{}{
"client_id": "mister-client",
"exp": int64(3404281214),
"scope": []string{"notifications.write"},
}
tokenLoader = fakes.NewTokenLoader()
tokenLoader.Token = fakes.BuildToken(tokenHeader, tokenClaims)
receiptsRepo = fakes.NewReceiptsRepo()
mailer = fakes.NewMailer()
userLoader = fakes.NewUserLoader()
userLoader.Users["user-123"] = uaa.User{
ID: "user-123",
Emails: []string{"user-123@example.com"},
}
templatesLoader = &fakes.TemplatesLoader{}
uaaRecipe = postal.NewUserRecipe(tokenLoader, userLoader, templatesLoader, mailer, receiptsRepo)
})
Describe("Dispatch", func() {
BeforeEach(func() {
options = postal.Options{
KindID: "forgot_password",
KindDescription: "Password reminder",
SourceDescription: "Login system",
Text: "Please reset your password by clicking on this link...",
HTML: postal.HTML{
BodyContent: "<p>Please reset your password by clicking on this link...</p>",
},
}
})
It("records a receipt for the user", func() {
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("user-123"), options, conn)
if err != nil {
panic(err)
}
Expect(receiptsRepo.CreateUserGUIDs).To(Equal([]string{"user-123"}))
Expect(receiptsRepo.ClientID).To(Equal(clientID))
Expect(receiptsRepo.KindID).To(Equal(options.KindID))
})
It("calls mailer.Deliver with the correct arguments for a user", func() {
templates := postal.Templates{
Subject: "default-missing-subject",
Text: "default-space-text",
HTML: "default-space-html",
}
templatesLoader.Templates = templates
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("user-123"), options, conn)
if err != nil {
panic(err)
}
user := uaa.User{
ID: "user-123",
Emails: []string{"user-123@example.com"},
}
users := map[string]uaa.User{"user-123": user}
Expect(templatesLoader.ContentSuffix).To(Equal("user_body"))
Expect(mailer.DeliverArguments).To(ContainElement(conn))
Expect(mailer.DeliverArguments).To(ContainElement(templates))
Expect(mailer.DeliverArguments).To(ContainElement(users))
Expect(mailer.DeliverArguments).To(ContainElement(options))
Expect(mailer.DeliverArguments).To(ContainElement(cf.CloudControllerOrganization{}))
Expect(mailer.DeliverArguments).To(ContainElement(cf.CloudControllerSpace{}))
Expect(mailer.DeliverArguments).To(ContainElement(clientID))
})
Context("failure cases", func() {
Context("when a token cannot be loaded", func() {
It("returns the error", func() {
loadError := errors.New("BOOM!")
tokenLoader.LoadError = loadError
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("user-123"), options, conn)
Expect(err).To(Equal(loadError))
})
})
Context("when a user cannot be loaded", func() {
It("returns the error", func() {
loadError := errors.New("BOOM!")
userLoader.LoadError = loadError
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("user-123"), options, conn)
Expect(err).To(Equal(loadError))
})
})
Context("when a template cannot be loaded", func() {
It("returns a TemplateLoadError", func() {
templatesLoader.LoadError = errors.New("BOOM!")
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("user-123"), options, conn)
Expect(err).To(BeAssignableToTypeOf(postal.TemplateLoadError("")))
})
})
Context("when create receipts call returns an err", func() {
It("returns an error", func() {
receiptsRepo.CreateReceiptsError = true
_, err := uaaRecipe.Dispatch(clientID, postal.UserGUID("space-001"), options, conn)
Expect(err).ToNot(BeNil())
})
})
})
})
Describe("Trim", func() {
Describe("TrimFields", func() {
It("trims the specified fields from the response object", func() {
responses, err := json.Marshal([]postal.Response{
{
Status: "delivered",
Recipient: "user-123",
NotificationID: "123-456",
},
})
trimmedResponses := uaaRecipe.Trim(responses)
var result []map[string]string
err = json.Unmarshal(trimmedResponses, &result)
if err != nil {
panic(err)
}
Expect(result).To(ContainElement(map[string]string{"status": "delivered",
"recipient": "user-123",
"notification_id": "123-456",
}))
})
})
})
})
|
package main
import "fmt"
func changeFirst(slice []int) {
slice[0] = 1000
}
func main() {
var x []int = []int{3, 4, 5} //just create slice[]int on `mem-map`
fmt.Println(x) // pass that in to console
changeFirst(x) // dup `the value of` 1st x, pass to another `mem-map` bottom
fmt.Println(x) // here, by the mem addr. Actualy the 1st x and 2nd one are competely differ
/*
// SLICES => mutable actualy. but...(can't add more length inside)
var x []int = []int{ // [] => denoted as slices, and followed by datatype's of slices
10,
100,
}
// x []= 18 // even though you add more them to indexes.. it's immutable !!
y := x // not duplicated on differ `map-mem` actualy. it's just aliases
y[0] = 5 // and here we go.. now you can see
fmt.Println(x, y)
// MAP => mutable, but... (can add more length inside)
var x map[string]int = map[string]int{ // map[type]type => denoted as pairing of key and value
"motor": 10,
"mobil": 5,
}
x["oke"]=108 // it's Mutable data types !!, it will add to most significant index
y:=x // no coppies actualy, :/ just aliases. dunnowhy
y["dari-y"]=10 // will add to last significant index
fmt.Println(x,y)
// LIST => still mutable, but... (more friendly than two other's)
// and you can douplicate them on differ `map-memory`
// remember !! golang is datatype functional language
// then, it's cannot be `var x [3]int = {3, 4,}`. so must be `var x [3]int = [3]int{3, 4,}`
// uncomment code below. to see their interaction
// var x [3]int = {3, 4,}
var x [3]int = [3]int{3, 4} // yap sangat merepotkan !!
// var x []int = []int{1,2,}
y := x
y[2] = 8 // will duplicate/copy and change on differ `map-mem`
fmt.Println(x, y)
*/
}
|
package types
import (
"fmt"
"strings"
codec "github.com/hashrs/blockchain/framework/chain-app/codec"
sdk "github.com/hashrs/blockchain/framework/chain-app/types"
)
const (
// ModuleName is the name of the module
ModuleName = "greeter"
// StoreKey is used to register the module's store
StoreKey = ModuleName
)
var (
// ModuleCdc contains the types for the module that require encoding in amino
ModuleCdc = codec.New()
)
// struct containing the data of the Greeting. json and yaml tags are used to specify field names
// when marshalled to json
type Greeting struct {
Sender sdk.AccAddress `json:"sender" yaml:"sender"` // address of the account "sending" the greeting
Recipient sdk.AccAddress `json:"receiver" yaml:"receiver"` // address of the account "receiving" the greeting
Body string `json:"body" yaml:"body"` // string body of the greeting
}
// GreetingsList stores all the greeting for a given address
type GreetingsList []Greeting
// NewGreeting Returns a new Greeting
func NewGreeting(sender sdk.AccAddress, body string, receiver sdk.AccAddress) Greeting {
return Greeting{
Recipient: receiver,
Sender: sender,
Body: body,
}
}
// implement fmt.Stringer
func (g Greeting) String() string {
return strings.TrimSpace(
fmt.Sprintf(`Sender: %s Recipient: %s Body: %s`, g.Sender.String(), g.Recipient.String(),
g.Body),
)
}
// QueryResGreetings defines the response to our Querier, containing greetings for a given address
type QueryResGreetings map[string][]Greeting
func (q QueryResGreetings) String() string {
b := ModuleCdc.MustMarshalJSON(q)
return string(b)
}
// NewQueryResGreetings constructs a new instance
func NewQueryResGreetings() QueryResGreetings {
return make(map[string][]Greeting)
}
|
package main
import (
"fmt"
)
/*
Go语言中没有类的概念,也不支持类的继承等面向对象的概念,Go语言中通过
结构体的内嵌再配合接口比面向对象具有更高的扩展性和灵活性
自定义类型
type MyInt int //将MyInt定义为int类型
通过type关键字的定义,MyInt是一种新的类型,具有int的特性
类型别名
是Go1.9版本添加的新功能
type TypeAlias = Type //TypeAlias只是Type的别名,本质上TypeAlias与Type是同一个类型
比如系统的
type byte = int8
type rune = int32
结构体
封装多个基本数据类型,struct来定义自己的类型,进而实现面向对象
定义格式
type 类型名 struct{
字段名 字段类型
......
}
结构体实例化时才会真正地分配内存,声明结构体类型格式
var 结构体实例 结构体类型
匿名结构体
在定义一些临时数据结构等场景下可以使用匿名结构体
var user struct{Name string; Age int}
user.Name = "xyz"
user.Age = 22
fmt.Println(user)
创建指针类型结构体
我们可以通过使用new关键字对结构体进行实例化,得到的是结构体的地址,格式如下:
var p2 = new(Person)
fmt.Printf("%T\n",p2) //*main.Person
fmt.Printf("p2=%#v\n",p2) //p2=&main.person{name:"", city:"", age:0}
Go语言中支持对结构体指针直接使用.来访问结构体的成员
使用&对结构体进行取地址相当于对结构体类型进行了一次new实例化操作
结构体初始化
没有初始化的结构体,其成员变量都是的对应的零值
使用键值对初始化
使用键值对对结构体进行初始化时,键对应结构体的字段,值对应该字段的初始值。
也可以对结构体指针进行键值对初始化
当某些字段没有初始值的时候,该字段可以不写。此时,没有指定初始值的字段的值就是该字段类型的零值。
使用值的列表初始化
初始化结构体的时候可以简写,也就是初始化的时候不写键,直接写值
注意
必须初始化结构体的所有字段。
初始值的填充顺序必须与字段在结构体中的声明顺序一致。
该方式不能和键值初始化方式混用。
结构体内存布局
结构体占用一块连续的内存
构造函数
Go语言的结构体没有构造函数,我们可以自己实现。如下:
func newPerson(name, city string, age int8) *person {
return &person{
name: name,
city: city,
age: age,
}//直接返回指针,性能开销小
}
方法和接收者
Go语言中的方法是一种作用于特定类型变量的函数,这种特定类型变量叫做接收者,接收者
的概念类似于其他语言中的this或self
格式如下:
func (接收者变量 接收者类型) 方法名(参数列表) (返回参数){
函数体
}
方法和函数的区别是,函数不属于任何类型,方法属于特定类型
值类型接收者使用情况
1、需要修改接收者中的值
2、接收者是拷贝代价比较大的大对象
3、保证一致性,如果有某个方法使用了指针接收者,那么其他的方法也应该使用指针接收者。
任意类型添加方法
方法的接收者可以是任意类型,不仅仅是结构体,任何类型都可以拥有方法,比如下边的MyInt,NewInt不行
非本地类型不能定义方法,我们不能给别的包的类型定义方法
结构体匿名字段
结构体允许其成员字段在声明时没有字段名而只有类型,这种没有名字的字段就称为匿名字段
//Person 结构体Person类型
type Person struct {
string
int
}
p1 := Person{
"小王子",
18,
}
fmt.Printf("%#v\n", p1) //main.Person{string:"北京", int:18}
fmt.Println(p1.string, p1.int) //北京 18
*/
//自定义类型
type MyInt int
//类型别名
type NewInt = int
//Person的自定义类型
type Person struct {
age int8
city string
name string
}
func newPerson(name string, city string, age int8) *Person {
return &Person{
name: name,
city: city,
age: age,
}
}
//Person.Dream()的方法
func (p *Person) Dream() {
fmt.Printf("%s的梦想是干%s\n", p.name, p.city)
}
type test struct {
a int8
b int8
c int8
d int8
}
func main() {
var a MyInt
var b NewInt
fmt.Printf("type of a:%T\n", a) //main.MyInt
fmt.Printf("type of b:%T\n", b) //int
var p Person
p.name = "guan"
p.city = "beijing"
p.age = 33
fmt.Println(p)
var user struct { //匿名结构体
Age int
Name string
}
user.Name = "xyz"
user.Age = 22
fmt.Println(user)
var p2 = new(Person)
fmt.Printf("type of p2 is :%T\n", p2) //*main.Person
p2.city = "aa"
p2.name = "hello"
p2.age = 22
fmt.Println(p2) //&{aa hello 22}
p3 := newPerson("大黄", "河南", 23)
p3.Dream()
var t test
t = test{
1, 2, 3, 4,
}
fmt.Printf("t.a %p\n", &t.a)
fmt.Printf("t.b %p\n", &t.b)
fmt.Printf("t.c %p\n", &t.c)
fmt.Printf("t.d %p\n", &t.d)
m := make(map[string]*student)
stus := []student{
{name: "小王子", age: 18},
{name: "娜扎", age: 23},
{name: "大王八", age: 9000},
}
for _, stu := range stus {
// stu1 := stu //打开这个后就是一一对应了
m[stu.name] = &stu
}
for k, v := range m {
fmt.Println(k, "=>", v.name)
// 娜扎 => 大王八
// 大王八 => 大王八
// 小王子 => 大王八
}
//匿名字段默认采用类型名作为字段名
//结构体要求字段名称必须唯一,因此一个结构体中同种类型的匿名字段只能有一个。
niMing := NiMing{
string: "sss",
int: 33,
}
fmt.Println(niMing)
}
type student struct {
age int
name string
}
type NiMing struct {
int
string
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docgen
import (
"bytes"
"fmt"
"strings"
"github.com/getkin/kin-openapi/openapi3"
"github.com/olekukonko/tablewriter"
)
// GenerateConsoleDocument generate the document shown on the console.
func GenerateConsoleDocument(title string, schema *openapi3.Schema) (string, error) {
var buffer = &bytes.Buffer{}
var printSubProperties []*openapi3.Schema
if len(schema.Properties) > 0 {
var propertiesTable = tablewriter.NewWriter(buffer)
propertiesTable.SetHeader([]string{"NAME", "TYPE", "DESCRIPTION", "REQUIRED", "OPTIONS", "DEFAULT"})
for key, subSchema := range schema.Properties {
name := subSchema.Value.Title
if title != "" {
name = fmt.Sprintf("(%s).%s", title, name)
}
defaultValue := fmt.Sprintf("%v", subSchema.Value.Default)
if subSchema.Value.Default == nil {
defaultValue = ""
}
var options = ""
for _, enum := range subSchema.Value.Enum {
options += fmt.Sprintf("%v", enum)
}
propertiesTable.Append([]string{
name,
subSchema.Value.Type,
subSchema.Value.Description,
fmt.Sprintf("%t", strings.Contains(strings.Join(schema.Required, "/"), subSchema.Value.Title)),
options,
defaultValue,
})
if len(subSchema.Value.Properties) > 0 {
printSubProperties = append(printSubProperties, schema.Properties[key].Value)
}
}
buffer.WriteString(title + "\n")
propertiesTable.Render()
}
for _, sub := range printSubProperties {
next := strings.Join([]string{title, sub.Title}, ".")
if title == "" {
next = sub.Title
}
re, err := GenerateConsoleDocument(next, sub)
if err != nil {
return "", err
}
buffer.WriteString(re)
}
return buffer.String(), nil
}
|
package btrfs
/*
#include <stdlib.h>
#include <dirent.h>
*/
import "C"
import (
"fmt"
"unsafe"
)
func free(p *C.char) {
C.free(unsafe.Pointer(p))
}
func openDir(path string) (*C.DIR, error) {
Cpath := C.CString(path)
defer free(Cpath)
dir := C.opendir(Cpath)
if dir == nil {
return nil, fmt.Errorf("Can't open dir")
}
return dir, nil
}
func closeDir(dir *C.DIR) {
if dir != nil {
C.closedir(dir)
}
}
func getDirFd(dir *C.DIR) uintptr {
return uintptr(C.dirfd(dir))
}
|
package model
type Menu struct {
Info string `json:"info,omitempty"`
Restaurant string `json:"restaurant"`
Url string `json:"url"`
Soup MenuItem `json:"soup,omitempty"`
Menus []MenuItem `json:"menus,omitempty"`
SpecialMenus *[]MenuItem `json:"specialMenus,omitempty"`
Date string `json:"date,omitempty"`
}
|
package main
type Marker struct {
left int
len int
}
func getMaxLen(nums []int) int {
return mlps(nums, 0, len(nums)).len
}
func mlps(nums []int, j int, k int) Marker {
/** Helpers */
const IntMax = int(^uint(0) >> 1)
const IntMin = -int(^uint(0)>>1) - 1
MaxInt := func(args ...int) int {
if len(args) == 0 {
return IntMax
}
r := IntMin
for _, e := range args {
if e > r {
r = e
}
}
return r
}
MinInt := func(args ...int) int {
if len(args) == 0 {
return IntMin
}
r := IntMax
for _, e := range args {
if e < r {
r = e
}
}
return r
}
MaxLenMarker := func(args ...Marker) Marker {
r := Marker{-1, 0}
if len(args) == 0 {
return r
}
for _, e := range args {
if e.len > r.len {
r = e
}
}
return r
}
MarkersValid := func(args ...Marker) bool {
for _, e := range args {
if e.left < 0 || e.len < 0 {
return false
}
}
return true
}
/** Solution */
n := k - j
if n == 0 {
return Marker{left: -1, len: 0}
} else if n == 1 {
if nums[j] > 0 {
return Marker{left: j, len: 1}
} else {
return Marker{left: -1, len: 0}
}
}
m := j + ((k - j) / 2)
maxLeft := mlps(nums, j, m)
maxRight := mlps(nums, m, k)
// cross left
leftNegCount, leftmostNeg, leftmostPos := 0, IntMax, IntMax
for i := m - 1; i >= j; i-- {
if nums[i] == 0 {
break
} else if nums[i] < 0 {
leftNegCount++
leftmostNeg = i
} else {
leftmostPos = i
}
}
leftmostPosProdIdx := IntMax
leftmostNegProdIdx := IntMax
if leftNegCount%2 == 0 {
leftmostPosProdIdx = MinInt(leftmostNeg, leftmostPos)
if leftNegCount > 0 {
leftmostNegProdIdx = leftmostNeg + 1
}
} else {
leftmostNegProdIdx = MinInt(leftmostNeg, leftmostPos)
if leftmostNegProdIdx < m-1 {
leftmostPosProdIdx = leftmostNeg + 1
}
}
maxCrossLeftPosProd := Marker{left: -1, len: 0}
if leftmostPosProdIdx != IntMax {
maxCrossLeftPosProd = Marker{
left: leftmostPosProdIdx,
len: m - leftmostPosProdIdx,
}
}
maxCrossLeftNegProd := Marker{left: -1, len: 0}
if leftmostNegProdIdx != IntMax {
maxCrossLeftNegProd = Marker{
left: leftmostNegProdIdx,
len: m - leftmostNegProdIdx,
}
}
// cross right
rightNegCount, rightmostNeg, rightmostPos := 0, IntMin, IntMin
for i := m; i < k; i++ {
if nums[i] == 0 {
break
} else if nums[i] < 0 {
rightNegCount++
rightmostNeg = i
} else {
rightmostPos = i
}
}
rightmostPosProdIdx := IntMin
rightmostNegProdIdx := IntMin
if rightNegCount%2 == 0 {
rightmostPosProdIdx = MaxInt(rightmostNeg, rightmostPos)
if rightNegCount > 0 {
rightmostNegProdIdx = rightmostNeg - 1
}
} else {
rightmostNegProdIdx = MaxInt(rightmostNeg, rightmostPos)
if rightmostNegProdIdx > m {
rightmostPosProdIdx = rightmostNeg - 1
}
}
maxCrossRightPosProd := Marker{left: -1, len: 0}
if rightmostPosProdIdx != IntMin {
maxCrossRightPosProd = Marker{
left: m,
len: rightmostPosProdIdx - m + 1,
}
}
maxCrossRightNegProd := Marker{-1, -1}
if rightmostNegProdIdx != IntMin {
maxCrossRightNegProd = Marker{
left: m,
len: rightmostNegProdIdx - m + 1,
}
}
maxCross := Marker{left: -1, len: 0}
if MarkersValid(maxCrossLeftPosProd, maxCrossRightPosProd, maxCrossLeftNegProd, maxCrossRightNegProd) {
if maxCrossLeftPosProd.len+maxCrossRightPosProd.len > maxCrossLeftNegProd.len+maxCrossRightNegProd.len {
maxCross = Marker{
left: maxCrossLeftPosProd.left,
len: maxCrossLeftPosProd.len + maxCrossRightPosProd.len,
}
} else {
maxCross = Marker{
left: maxCrossLeftNegProd.left,
len: maxCrossLeftNegProd.len + maxCrossRightNegProd.len,
}
}
} else if MarkersValid(maxCrossLeftPosProd, maxCrossRightPosProd) {
maxCross = Marker{
left: maxCrossLeftPosProd.left,
len: maxCrossLeftPosProd.len + maxCrossRightPosProd.len,
}
} else if MarkersValid(maxCrossLeftNegProd, maxCrossRightNegProd) {
maxCross = Marker{
left: maxCrossLeftNegProd.left,
len: maxCrossLeftNegProd.len + maxCrossRightNegProd.len,
}
}
return MaxLenMarker(maxLeft, maxRight, maxCross)
}
|
package webreg
import (
"bytes"
"errors"
"fmt"
"html/template"
"net/http"
"path"
"strings"
"time"
"github.com/alecthomas/chroma"
"github.com/alecthomas/chroma/formatters/html"
"github.com/alecthomas/chroma/lexers"
"github.com/alecthomas/chroma/styles"
"github.com/sirupsen/logrus"
"k8s.io/test-infra/prow/repoowners"
"github.com/openshift/ci-tools/pkg/api"
"github.com/openshift/ci-tools/pkg/load"
"github.com/openshift/ci-tools/pkg/load/agents"
"github.com/openshift/ci-tools/pkg/registry"
)
const (
OrgQuery = "org"
RepoQuery = "repo"
BranchQuery = "branch"
VariantQuery = "variant"
TestQuery = "test"
)
const htmlPageStart = `
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8"><title>%s</title>
<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css" integrity="sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO" crossorigin="anonymous">
<script src="https://code.jquery.com/jquery-3.3.1.slim.min.js" integrity="sha384-q8i/X+965DzO0rT7abK41JStQIAqVgRVzpbzo5smXKp4YfRvH+8abtTE1Pi6jizo" crossorigin="anonymous"></script>
<script src="https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/js/bootstrap.min.js" integrity="sha384-ChfqqxuZUCnJSK3+MXmPNIyE6ZbWh2IMqE241rYiqJxyMiZ6OW/JmZQ5stwEULTy" crossorigin="anonymous"></script>
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<style>
@namespace svg url(http://www.w3.org/2000/svg);
svg|a:link, svg|a:visited {
cursor: pointer;
}
svg|a text,
text svg|a {
fill: #007bff;
text-decoration: none;
background-color: transparent;
-webkit-text-decoration-skip: objects;
}
svg|a:hover text, svg|a:active text {
fill: #0056b3;
text-decoration: underline;
}
pre {
border: 10px solid transparent;
}
h1, h2, p {
padding-top: 10px;
}
h1 a:link,
h2 a:link,
h3 a:link,
h4 a:link,
h5 a:link {
color: inherit;
text-decoration: none;
}
h1 a:hover,
h2 a:hover,
h3 a:hover,
h4 a:hover,
h5 a:hover {
text-decoration: underline;
}
h1 a:visited,
h2 a:visited,
h3 a:visited,
h4 a:visited,
h5 a:visited {
color: inherit;
text-decoration: none;
}
.info {
text-decoration-line: underline;
text-decoration-style: dotted;
text-decoration-color: #c0c0c0;
}
button {
padding:0.2em 1em;
border-radius: 8px;
cursor:pointer;
}
td {
vertical-align: middle;
}
</style>
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-light bg-light">
<a class="navbar-brand" href="/">Openshift CI Step Registry</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarSupportedContent" aria-controls="navbarSupportedContent" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarSupportedContent">
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="/search">Jobs</a>
</li>
<li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#" id="navbarDropdown" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false">
Help
</a>
<div class="dropdown-menu" aria-labelledby="navbarDropdown">
<a class="dropdown-item" href="/help">Getting Started</a>
<a class="dropdown-item" href="/help/ci-operator">CI Operator Overview</a>
<a class="dropdown-item" href="/help/leases">Leases and Quota</a>
<a class="dropdown-item" href="/help/private-repositories">Private Repositories</a>
<a class="dropdown-item" href="/help/adding-components">Adding and Changing Content</a>
<a class="dropdown-item" href="/help/release">Contributing to <code>openshift/release</code></a>
<a class="dropdown-item" href="/help/operators">OLM Operator Support</a>
<a class="dropdown-item" href="/help/examples">Examples</a>
<a class="dropdown-item" href="/help/links">Useful links</a>
</div>
</li>
</ul>
<form class="form-inline my-2 my-lg-0" role="search" action="/search" method="get">
<input class="form-control mr-sm-2" type="search" placeholder="Prow Job" aria-label="Search" name="job">
<button class="btn btn-outline-success my-2 my-sm-0" type="submit">Search Jobs</button>
</form>
</div>
</nav>
<div class="container">
`
const htmlPageEnd = `
<p class="small">Source code for this page located on <a href="https://github.com/openshift/ci-tools">GitHub</a></p>
</div>
</body>
</html>`
const errPage = `
{{ . }}
`
const mainPage = `
{{ template "workflowTable" .Workflows }}
{{ template "chainTable" .Chains }}
{{ template "referenceTable" .References}}
`
const referencePage = `
<h2 id="title"><a href="#title">Step:</a> <nobr style="font-family:monospace">{{ .Reference.As }}</nobr></h2>
<p id="documentation">{{ .Reference.Documentation }}</p>
<h2 id="image"><a href="#image">Container image used for this step:</a> <span style="font-family:monospace">{{ .Reference.From }}</span></h2>
<h2 id="source"><a href="#source">Source Code</a></h2>
{{ syntaxedSource .Reference.Commands }}
<h2 id="github"><a href="#github">GitHub Link:</a></h2>{{ githubLink .Metadata.Path }}
{{ ownersBlock .Metadata.Owners }}
`
const chainPage = `
<h2 id="title"><a href="#title">Chains:</a> <nobr style="font-family:monospace">{{ .Chain.As }}</nobr></h2>
<p id="documentation">{{ .Chain.Documentation }}</p>
<h2 id="steps" title="Step run by the chain, in runtime order"><a href="#steps">Steps</a></h2>
{{ template "stepTable" .Chain.Steps}}
<h2 id="graph" title="Visual representation of steps run by this chain"><a href="#graph">Step Graph</a></h2>
{{ chainGraph .Chain.As }}
<h2 id="github"><a href="#github">GitHub Link:</a></h2>{{ githubLink .Metadata.Path }}
{{ ownersBlock .Metadata.Owners }}
`
// workflowJobPage defines the template for both jobs and workflows
const workflowJobPage = `
{{ $type := .Workflow.Type }}
<h2 id="title"><a href="#title">{{ $type }}:</a> <nobr style="font-family:monospace">{{ .Workflow.As }}</nobr></h2>
{{ if .Workflow.Documentation }}
<p id="documentation">{{ .Workflow.Documentation }}</p>
{{ end }}
{{ if .Workflow.Steps.ClusterProfile }}
<h2 id="cluster_profile"><a href="#cluster_profile">Cluster Profile:</a> <span style="font-family:monospace">{{ .Workflow.Steps.ClusterProfile }}</span></h2>
{{ end }}
<h2 id="pre" title="Steps run by this {{ toLower $type }} to set up and configure the tests, in runtime order"><a href="#pre">Pre Steps</a></h2>
{{ template "stepTable" .Workflow.Steps.Pre }}
<h2 id="test" title="Steps in the {{ toLower $type }} that run actual tests, in runtime order"><a href="#test">Test Steps</a></h2>
{{ template "stepTable" .Workflow.Steps.Test }}
<h2 id="post" title="Steps run by this {{ toLower $type }} to clean up and teardown test resources, in runtime order"><a href="#post">Post Steps</a></h2>
{{ template "stepTable" .Workflow.Steps.Post }}
<h2 id="graph" title="Visual representation of steps run by this {{ toLower $type }}"><a href="#graph">Step Graph</a></h2>
{{ workflowGraph .Workflow.As .Workflow.Type }}
{{ if eq $type "Workflow" }}
<h2 id="github"><a href="#github">GitHub Link:</a></h2>{{ githubLink .Metadata.Path }}
{{ ownersBlock .Metadata.Owners }}
{{ end }}
`
const jobSearchPage = `
{{ template "jobTable" . }}
`
const templateDefinitions = `
{{ define "nameWithLink" }}
<nobr><a href="/{{ .Type }}/{{ .Name }}" style="font-family:monospace">{{ .Name }}</a></nobr>
{{ end }}
{{ define "nameWithLinkReference" }}
<nobr><a href="/reference/{{ . }}" style="font-family:monospace">{{ . }}</a></nobr>
{{ end }}
{{ define "nameWithLinkChain" }}
<nobr><a href="/chain/{{ . }}" style="font-family:monospace">{{ . }}</a></nobr>
{{ end }}
{{ define "nameWithLinkWorkflow" }}
<nobr><a href="/workflow/{{ . }}" style="font-family:monospace">{{ . }}</a></nobr>
{{ end }}
{{ define "stepTable" }}
{{ if not . }}
<p>No test steps configured.</p>
{{ else }}
<table class="table">
<thead>
<tr>
<th title="The name of the step or chain" class="info">Name</th>
<th title="The documentation for the step or chain" class="info">Description</th>
</tr>
</thead>
<tbody>
{{ range $index, $step := . }}
<tr>
{{ $nameAndType := testStepNameAndType $step }}
{{ $doc := docsForName $nameAndType.Name }}
{{ if not $step.LiteralTestStep }}
<td>{{ template "nameWithLink" $nameAndType }}</td>
{{ else }}
<td>{{ $nameAndType.Name }}</td>
{{ end }}
<td>{{ noescape $doc }}</td>
</tr>
{{ end }}
</tbody>
</table>
{{ end }}
{{ end }}
{{ define "stepList" }}
<ul>
{{ range $index, $step := .}}
{{ $nameAndType := testStepNameAndType $step }}
<li>{{ template "nameWithLink" $nameAndType }}</li>
{{ end }}
</ul>
{{ end }}
{{ define "workflowTable" }}
<h2 id="workflows"><a href="#workflows">Workflows</a></h2>
<p>Workflows are the highest level registry components, defining a test from start to finish.</p>
<table class="table">
<thead>
<tr>
<th title="The name of the workflow and what the workflow is supposed to do" class="info">Name and Description</th>
<th title="The registry components used during the Pre, Test, and Post sections of the workflow" class="info">Steps</th>
</tr>
</thead>
<tbody>
{{ range $name, $config := . }}
<tr>
<td><b>Name:</b> {{ template "nameWithLinkWorkflow" $name }}<p>
<b>Description:</b><br>{{ docsForName $name }}
</td>
<td>{{ if gt (len $config.Pre) 0 }}<b>Pre:</b>{{ template "stepList" $config.Pre }}{{ end }}
{{ if gt (len $config.Test) 0 }}<b>Test:</b>{{ template "stepList" $config.Test }}{{ end }}
{{ if gt (len $config.Post) 0 }}<b>Post:</b>{{ template "stepList" $config.Post }}{{ end }}
</td>
</tr>
{{ end }}
</tbody>
</table>
{{ end }}
{{ define "chainTable" }}
<h2 id="chains"><a href="#chains">Chains</a></h2>
<p>Chains are registry components that allow users to string together multiple registry components under one name. These components can be steps and other chains.</p>
<table class="table">
<thead>
<tr>
<th title="The name of the chain" class="info">Name</th>
<th title="What the chain is supposed to do" class="info">Description</th>
<th title="The components (steps and other chains) that the chain runs (in order)" class="info">Steps</th>
</tr>
</thead>
<tbody>
{{ range $name, $config := . }}
<tr>
<td>{{ template "nameWithLinkChain" $name }}</td>
<td>{{ docsForName $name }}</td>
<td>{{ template "stepList" $config.Steps }}</td>
</tr>
{{ end }}
</tbody>
</table>
{{ end }}
{{ define "referenceTable" }}
<h2 id="steps"><a href="#steps">Steps</a></h2>
<p>Steps are the lowest level registry components, defining a command to run and a container to run the command in.</p>
<table class="table">
<thead>
<tr>
<th title="The name of the step" class="info">Name</th>
<th title="The documentation for the step" class="info">Description</th>
</tr>
</thead>
<tbody>
{{ range $name, $config := . }}
<tr>
<td>{{ template "nameWithLinkReference" $name }}</td>
<td>{{ docsForName $name }}</td>
</tr>
{{ end }}
</tbody>
</table>
{{ end }}
{{ define "jobTable" }}
<h2 id="jobs"><a href="#jobs">Jobs</a></h2>
<table class="table">
{{ $containsVariant := .ContainsVariant }}
<thead>
<tr>
<th title="GitHub organization that the job is from" class="info">Org</th>
<th title="GitHub repo that the job is from" class="info">Repo</th>
<th title="GitHub branch that the job is from" class="info">Branch</th>
{{ if $containsVariant }}
<th title="Variant of the ci-operator config" class="info">Variant</th>
{{ end }}
<th title="The multistage tests in the configuration" class="info">Tests</th>
</tr>
</thead>
<tbody>
{{ range $index, $org := .Orgs }}
<tr>
<td rowspan="{{ (orgSpan $org $containsVariant) }}" style="vertical-align: middle;">{{ $org.Name }}</td>
</tr>
{{ range $index, $repo := $org.Repos }}
<tr>
<td rowspan="{{ (repoSpan $repo $containsVariant) }}" style="vertical-align: middle;">{{ $repo.Name }}</td>
</tr>
{{ range $index, $branch := $repo.Branches }}
{{ $branchLen := len $branch.Variants }}
{{ if $containsVariant }}
{{ $branchLen = inc $branchLen}}
{{ end }}
{{ if gt (len $branch.Tests) 0 }}
{{ $branchLen = inc $branchLen}}
{{ end }}
<tr>
<td rowspan="{{ $branchLen }}" style="vertical-align: middle;">{{ $branch.Name }}</td>
{{ if gt (len $branch.Tests) 0 }}
{{ if $containsVariant }}
</tr>
<tr>
<td style="vertical-align: middle;"></td>
{{ end }} <!-- if $containsVariant -->
<td>
<ul>
{{ range $index, $test := $branch.Tests }}
<li><nobr><a href="/job?org={{$org.Name}}&repo={{$repo.Name}}&branch={{$branch.Name}}&test={{$test}}" style="font-family:monospace">{{$test}}</a></nobr></li>
{{ end }}
</ul>
</td>
{{ end }} <!-- if gt (len $branch.Tests) 0 -->
</tr>
{{ range $index, $variant := $branch.Variants }}
<tr>
<td style="vertical-align: middle;">{{ $variant.Name }}</td>
<td>
<ul>
{{ range $index, $test := $variant.Tests }}
<li><nobr><a href="/job?org={{$org.Name}}&repo={{$repo.Name}}&branch={{$branch.Name}}&test={{$test}}&variant={{$variant.Name}}" style="font-family:monospace">{{$test}}</a></nobr></li>
{{ end }}
</ul>
</td>
</tr>
{{ end }}
{{ end }}
{{ end }}
{{ end }}
</tbody>
</table>
{{ end }}
`
const optionalOperatorOverviewPage = `<h2 id="title"><a href="#title">Testing Operators Built With The Operator SDK and Deployed Through OLM</a></h2>
<p>
<code>ci-operator</code> supports building, deploying, and testing operator
bundles, whether the operator repository uses the Operator SDK or not. This
document outlines how to configure <code>ci-operator</code> to build bundle and
index images and use those in end-to-end tests.
</p>
<p>
Consult the <code>ci-operator</code> <a href="/help/ci-operator">overview</a> and
the step environment <a href="/help">reference</a> for detailed descriptions of the
broader test infrastructure that an operator test is defined in.
</p>
<h3 id="operator-artifacts"><a href="#operator-artifacts">Building Artifacts for OLM Operators</a></h3>
<p>
Multiple different images are involved in installing and testing
candidate versions of OLM-delivered operators: operand, operator, bundle, and
index images. Operand and operator images are built normally using the
<code>images</code> stanza in <a href="/help/ci-operator#images"><code>ci-operator</code> configuration</a>.
OLM uses bundle and index images to install the desired version of an operator.
<code>ci-operator</code> can build ephemeral versions of these images suitable
for installation and testing, but not for production.
</p>
<h4 id="bundles"><a href="#bundles">Building Operator Bundles</a></h4>
<p>
Configuring <code>ci-operator</code> to build operator bundles from a
repository is as simple as adding a new <code>operator</code> stanza,
specifying the bundles built from the repository, and what sorts of
container image pull specification substitutions are necessary during bundle
build time. Substitutions allow for the operator manifests to refer to images
that were built from the repository during the test or imported from other
sources. The following example builds an operator and then a bundle. While building
the bundle, the operator's pull specification in manifests are replaced with the
operator version built during the test:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "optionalOperatorBundleConfig") }}
<p>
When configuring a bundle build, two options are available:
</p>
<ul>
<li><code>dockerfile_path</code>: a path to the Dockerfile that builds the bundle image, defaulting to <code>bundle.Dockerfile</code></li>
<li><code>context_dir</code>: base directory for the bundle image build, defaulting to the root of the source tree</li>
</ul>
<p>
The <code>operator.bundles</code> stanza is a list, so it is possible to build
multiple bundle images from one repository.
</p>
<h4 id="index"><a href="#index">Building an Index</a></h4>
<p>
When <code>ci-operator</code> builds at least one operator bundle from a
repository, it will also automatically build an ephemeral index image to package
those bundles. Test workloads should consume the bundles via this index
image. The index image is named <code>ci-index</code> and can be exposed to test
steps via the <a href="/help/ci-operator#literal-references"><code>dependencies</code></a> feature.
</p>
<p>
The ephemeral index is built from scratch and only the bundles built in the
current <code>ci-operator</code> run will be added to it, nothing else. The
bundles are added to the index using the <code>semver</code> mode, which means
that the <code>spec.version</code> stanza in the CSV must be a valid semantic
version. Also, if the CSV has a <code>spec.replaces</code> stanza, it is
ignored, because the index will not contain a bundle with the replaced version.
</p>
<h4 id="ci-index-jobs"><a href="#ci-index-jobs">Validating Bundle and Index Builds</a></h4>
<p>
Similarly to how the job generator automatically creates a <code>pull-ci-$ORG-$REPO-$BRANCH-images</code>
job to test image builds when <code>ci-operator</code> configuration has an
<code>images</code> stanza, it will also make a separate job that builds the
configured bundle and index images. This job, named <code>pull-ci-$ORG-$REPO-$BRANCH-ci-index</code>,
is created only when an <code>operator</code> stanza is present.
</p>
<h3 id="tests"><a href="#tests">Running Tests</a></h3>
<p>
Once <code>ci-operator</code> builds the operator bundle and index, they are
available to be used as a <code>CatalogSource</code> by OLM for deploying and
testing the operator. The index image is called <code>ci-index</code> and can
be exposed to multi-stage test workloads via the <a href="/help/ci-operator#literal-references">
<code>dependencies</code> feature</a>:
</p>
Step configuration example:
{{ yamlSyntax (index . "optionalOperatorIndexConsumerStep") }}
<p>
Any test workflow involving such step will require <code>ci-operator</code> to
build the index image before it executes the workflow. The <code>OO_INDEX</code>
environmental variable set for the step will contain the pull specification of
the index image.
</p>
<h3 id="oo-steps">Step Registry Content for Operators</h3>
<p>
The step registry contains several generic steps and workflows that implement the
common operations involving operators. We encourage operator repositories to
consider using (and possibly improving) these shared steps and workflows over
implementing their own from scratch.
</p>
<h4>Simple Operator Installation</h4>
<p>
The <code>optional-operators-ci-$CLOUD</code> (<a href="/workflow/optional-operators-ci-aws">aws</a>
, <a href="/workflow/optional-operators-ci-gcp">gcp</a>, <a href="/workflow/optional-operators-ci-azure">azure</a>)
family of workflows take the following steps to set up the test environment:
</p>
<ul>
<li>deploy an ephemeral OpenShift cluster to test against</li>
<li>create a <code>Namespace</code> to install into</li>
<li>create an <code>OperatorGroup</code> and <code>CatalogSource</code> (referring to built index) to configure OLM</li>
<li>create a <code>Subscription</code> for the operator under test</li>
<li>wait for the operator under test to install and deploy</li>
</ul>
<p>
These workflows enhance the general installation workflows (like
<a href="/workflow/ipi-aws">ipi-aws</a>) with an additional
<a href="/reference/optional-operators-ci-subscribe">optional-operators-ci-subscribe</a>
step. Tests using these workflows need to provide the following parameters:
</p>
<table class="table">
<tr>
<th style="white-space: nowrap">Parameter</th>
<th>Description</th>
</tr>
<tr>
<td style="white-space: nowrap"><code>OO_PACKAGE</code></td>
<td>The name of the operator package to be installed.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>OO_CHANNEL</code></td>
<td>The name of the operator channel to track.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>OO_INSTALL_NAMESPACE</code></td>
<td>The namespace into which the operator and catalog will be installed. Special, default value <code>!create</code> means that a new namespace will be created.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>OO_TARGET_NAMESPACES</code></td>
<td>A comma-separated list of namespaces the operator will target. Special,
default value <code>!all</code> means that all namespaces will be targeted.
If no <code>OperatorGroup</code> exists in <code>$OO_INSTALL_NAMESPACE</code>,
a new one will be created with its target namespaces set to <code>$OO_TARGET_NAMESPACES</code>.
Otherwise, the existing <code>OperatorGroup</code>'s target namespace set
will be replaced. The special value <code>!install</code> will set the
target namespace to the operator's installation namespace.</td>
</tr>
</table>
<p>
The combination of <code>OO_INSTALL_NAMESPACE</code> and <code>OO_TARGET_NAMESPACES</code>
values determines the <code>InstallMode</code> when installing the operator. The
default <code>InstallMode</code> is <code>AllNamespaces</code> (the operator will
be installed into a newly created namespace of a random name, targeting all
namespaces).
</p>
<p>
A user-provided test can expect to have <code>${KUBECONFIG}</code> set, with
administrative privileges, and for the operator under test to be fully deployed
at the time that the test begins. The following example runs a test in this manner:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "optionalOperatorTestConfig") }}
`
const optionalOperatorBundleConfig = `base_images:
ubi: # imports the UBI base image for building
namespace: "ocp"
name: "ubi"
tag: "8"
operand: # imports the latest operand image
namespace: "ocp"
name: "operand"
tag: "latest"
images:
- from: "ubi"
to: "tested-operator"
operator:
bundles: # entries create bundle images from Dockerfiles and an index containing all bundles
- dockerfile_path: "path/to/Dockerfile" # defaults to bundle.Dockerfile
context_dir: "path/" # defaults to .
substitutions:
# replace references to the operand with the imported version (base_images stanza)
- pullspec: "quay.io/openshift/operand:1.3"
with: "stable:operand"
# replace references to the operator with the built version (images stanza)
- pullspec: "quay.io/openshift/tested-operator:1.3"
with: "pipeline:tested-operator"
`
const optionalOperatorIndexConsumerStep = `ref:
as: "step-consuming-ci-index"
from: "cli"
commands: "step-consuming-ci-index.sh"
dependencies:
- env: "OO_INDEX"
name: "ci-index"
documentation: ...
`
const optionalOperatorTestConfig = `tests:
- as: "operator-e2e"
steps:
workflow: "optional-operators-ci-aws"
cluster_profile: "aws"
env:
OO_CHANNEL: "1.2.0"
OO_INSTALL_NAMESPACE: "kubevirt-hyperconverged"
OO_PACKAGE: "kubevirt-hyperconverged"
OO_TARGET_NAMESPACES: '!install'
test:
- as: "e2e"
from: "src" # the end-to-end tests run in the source repository
commands: "make test-e2e" # the commands to run end-to-end tests
resources:
requests:
cpu: 100m
memory: 200Mi
`
const ciOperatorOverviewPage = `<h2 id="title"><a href="#title">What is <code>ci-operator</code> and how does it work?</a></h2>
<p>
<code>ci-operator</code> is a highly opinionated test workflow execution engine
that knows about how OpenShift is built, released and installed. <code>ci-operator</code>
hides the complexity of assembling an ephemeral OpenShift 4.x release payload,
thereby allowing authors of end-to-end test suites to focus on the content of
their tests and not the infrastructure required for cluster setup and installation.
</p>
<p>
<code>ci-operator</code> allows for components that make up an OpenShift
release to be tested together by allowing each component repository to
test with the latest published versions of all other components. An
integration stream of container images is maintained with the latest
tested versions of every component. A test for any one component snapshots
that stream, replaces any images that are being tested with newer versions,
and creates an ephemeral release payload to support installing an OpenShift
cluster to run end-to-end tests.
</p>
<p>
In addition to giving first-class support for testing OpenShift components,
<code>ci-operator</code> expects to run in an OpenShift cluster and uses
OpenShift features like <code>Builds</code> and <code>ImageStreams</code>
extensively, thereby exemplifying a complex OpenShift user workflow and
making use of the platform itself. Each test with a unique set of inputs
will have a <code>Namespace</code> provisioned to hold the OpenShift objects
that implement the test workflow.
</p>
<p>
<code>ci-operator</code> needs to understand a few important characteristics of
any repository it runs tests for. This document will begin by walking through
those characteristics and how they are exposed in the configuration. With an
understanding of those building blocks, then, the internal workflow of
<code>ci-operator</code> will be presented.
</p>
<h3 id="configuration"><a href="#configuration">Configuring <code>ci-operator</code>: Defining A Repository</a></h3>
<p>
At a high level, when a repository author writes a <code>ci-operator</code>
configuration file, they are describing how a repository produces output
artifacts, how those artifacts fit into the larger OpenShift release and
how those artifacts should be tested. The following examples will describe
the configuration file as well as walk through how <code>ci-operator</code>
creates OpenShift objects to fulfill their intent.
</p>
<h4 id="inputs"><a href="#inputs">Configuring Inputs</a></h4>
<p>
When <code>ci-operator</code> runs tests to verify proposed changes in a pull
request to a component repository, it must first build the output artifacts
from the repository. In order to generate these builds, <code>ci-operator</code>
needs to know the inputs from which they will be created. A number of inputs
can be configured; the following example provides both:
</p>
<ul>
<li><code>base_images</code>: provides a mapping of named <code>ImageStreamTags</code> which will be available for use in container image builds</li>
<li><code>build_root</code>: defines the <code>ImageStreamTag</code> in which dependencies exist for building executables and non-image artifacts</li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorInputConfig") }}
<p>
As <code>ci-operator</code> is an OpenShift-native tool, all image references
take the form of an <code>ImageStreamTag</code> on the build farm cluster, not
just a valid pull-spec for an image. <code>ci-operator</code> will import these
<code>ImageStreamTags</code> into the <code>Namespace</code> created for the
test workflow; snapshotting the current state of inputs to allow for reproducible
builds.
</p>
<p>
If an image that is required for building is not yet present on the cluster,
either:
</p>
<ul>
<li>
The correct <code>ImageStream</code> should be declared and committed to
the <code>openshift/release</code> repository <a
href="https://github.com/openshift/release/tree/master/core-services/supplemental-ci-images">here.</a></li>
</li>
<li>
The image referenced in <code>base_images</code> has to be accessible. The
simplest RBAC rule to achieve this is to allow the
<code>system:authenticated</code> role to <code>get</code>
<code>imagestreams/layers</code> in the namespace that contains the
<code>ImageStream</code>.
</li>
</ul>
<h4 id="buildroot"><a href="#buildroot">Build Root Image</a></h4>
<p>
The build root image must contain all dependencies for building executables and
non-image artifacts. Additionally, <code>ci-operator</code> requires this image
to include a <code>git</code> executable in <code>$PATH</code>. Most repositories
will want to use an image already present in the cluster, using the <code>image_stream_tag</code>
stanza like described in <a href="#inputs">Configuring Inputs</a>.
</p>
<p>
Alternatively, a project can be configured to build a build root image using
a <code>Dockerfile</code> in the repository:
</p>
{{ yamlSyntax (index . "ciOperatorProjectImageBuildroot") }}
<p>
In this case, the <code>Dockerfile</code> will <b>always</b> be obtained from
current <code>HEAD</code> of the given branch, even if ci-operator runs in the
context of a PR that updates that <code>Dockerfile</code>.
</p>
<p>
A third option is to configure the <code>build_root</code> in your repo
alongside the code instead of inside the <code>ci-operator</code> config. The main advantage
of this is that it allows to atomically change both code and the <code>build_root</code>.
To do so, set the <code>from_repository: true</code> in your <code>ci-operator</code> config:
</p>
{{ yamlSyntax (index . "ciOperatorBuildRootFromRepo") }}
<p>
Afterwards, create a file named <code>.ci-operator.yaml</code> in your repository
that contains the imagestream you want to use for your <code>build_root</code>:
</p>
{{ yamlSyntax (index . "ciOperatorBuildRootInRepo" ) }}
<h4 id="artifacts"><a href="#artifacts">Building Artifacts</a></h4>
<p>
Starting <code>FROM</code> the image described as the <code>build_root</code>,
<code>ci-operator</code> will clone the repository under test and compile
artifacts, committing them as image layers that may be referenced in derivative
builds. The commands which are run to compile artifacts are configured with
<code>binary_build_commands</code> and are run in the root of the cloned
repository. A a separate set of commands, <code>test_binary_build_commands</code>,
can be configured for building artifacts to support test execution. The following
<code>ImageStreamTags</code> are created in the test's <code>Namespace</code>:
</p>
<ul>
<li><code>pipeline:root</code>: imports or builds the <code>build_root</code> image</li>
<li><code>pipeline:src</code>: clones the code under test <code>FROM pipeline:root</code></li>
<li><code>pipeline:bin</code>: runs commands in the cloned repository to build artifacts <code>FROM pipeline:src</code></li>
<li><code>pipeline:test-bin</code>: runs a separate set of commands in the cloned repository to build test artifacts <code>FROM pipeline:src</code></li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorPipelineConfig") }}
<p>
The content created with these OpenShift <code>Builds</code> is addressable
in the <code>ci-operator</code> configuration simply with the tag. For instance,
the <code>pipeline:bin</code> image can be referenced as <code>bin</code> when
the content in that image is needed in derivative <code>Builds</code>.
</p>
<h4 id="images"><a href="#images">Building Container Images</a></h4>
<p>
Once container images exist with output artifacts for a repository, additional
output container images may be built that make use of those artifacts. Commonly,
the desired output container image will contain only the executables for a
component and not any of the build-time dependencies. Furthermore, most teams
will need to publish their output container images through the automated release
pipeline, which requires that the images are built in Red Hat's production image
build system, OSBS. In order to create an output container image without build-time
dependencies in a manner which is compatible with OSBS, the simplest approach is a
multi-stage <code>Dockerfile</code> build.
</p>
<p>
The standard pattern for a multi-stage <code>Dockerfile</code> is to run a compilation
in a builder image and copy the resulting artifacts into a separate output image base.
For instance, a repository could add this <code>Dockerfile</code> to their source:
<p>
<code>Dockerfile</code>:
{{ dockerfileSyntax (index . "multistageDockerfile") }}
<p>
While such a <code>Dockerfile</code> could simply be built by <code>ci-operator</code>,
a number of optimizations can be configured to speed up the process -- especially if
multiple output images share artifacts. An output container image build is configured
for <code>ci-operator</code> with the <code>images</code> stanza in the configuration.
Any entry in the <code>images</code> stanza can be configured with native OpenShift
<code>Builds</code> options; the full list can be viewed <a href="https://godoc.org/github.com/openshift/ci-tools/pkg/api#ProjectDirectoryImageBuildInputs">here.</a>
In the following example, an output container image is built where the <code>builder</code>
image is replaced with the image layers containing built artifacts in <code>pipeline:bin</code>
and the output image base is replaced with the appropriate entry from <code>base_images</code>.
<p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorImageConfig") }}
<p>
By making use of the previously compiled artifacts in the intermediate <code>pipeline:bin</code>
image, this repository is able to cache the Go build. If multiple output images exist that
rely on a previously built artifact, this caching effect can reduce build times dramatically.
</p>
<h4 id="promotion"><a href="#promotion">Publishing Container Images</a></h4>
<p>
Once <code>ci-operator</code> has built output container images for a repository,
it can publish them to an integration <code>ImageStream</code> so that other
repositories can consume them. For instance, every image that makes up the
OpenShift release payload is incrementally updated in an integration <code>ImageStream</code>.
This allows release payloads to be created incorporating the latest tested version
of every component. In order to publish images to an integration <code>ImageStream</code>,
add the <code>promotion</code> stanza to <code>ci-operator</code> configuration.
</p>
<p>
The <code>promotion</code> stanza declares which container images are published
and defines the integration <code>ImageStream</code> where they will be available.
By default, all container images declared in the <code>images</code> block of a
<code>ci-operator</code> configuration are published when a <code>promotion</code>
stanza is present to define the integration <code>ImageStream</code>. Promotion can
also be configured to include other images by setting <code>additional_images</code>
and to exclude images using <code>excluded_images</code>. For instance, this example
publishes the following images:
</p>
<ul>
<li>the <code>pipeline:src</code> tag, published as <code>ocp/4.5:repo-scripts</code> containing the latest version of the repository</li>
<li>the <code>stable:component</code> tag, published as <code>ocp/4.5:mycomponent</code> containing the output component itself</li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorPromotionConfig") }}
<h4 id="release"><a href="#release">Describing Inclusion in an OpenShift Release</a></h4>
<p>
<code>ci-operator</code> gives first-class support to repositories which need to
run end-to-end tests in the context of an OpenShift cluster. <code>ci-operator</code>
supports two mechanisms for testing in the context of an OpenShift release. First, it
is possible to use the container images built as part of the test to build an ephemeral
release payload, allowing repositories that build parts of OpenShift to test versions
that include components under test. Second, it is possible to reference existing release
payloads that have already been created, in order to validate those releases or for
repositories to test their functionality against published versions of OpenShift.
</p>
<h5 id="ephemeral-release"><a href="#ephemeral-release">Testing With an Ephemeral OpenShift Release</a></h5>
<p>
The <code>tag_specification</code> configuration option enables a repository to declare
which version of OpenShift it is a part of by specifying the images that will be used to
create an ephemeral OpenShift release payload for testing. Most commonly, the same integration
<code>ImageStream</code> is specified for <code>tag_specification</code> as is for
<code>promotion</code>.
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorTagSpecificationConfig") }}
<p>
When <code>ci-operator</code> begins to test a repository, it will snapshot the current
state of the integration <code>ImageStream</code>, importing all tags into the test
<code>Namespace</code>. Any output image tags built from the repository under test
overwrite those that are imported from the integration <code>ImageStream</code>. An
ephemeral release payload is built from the resulting <code>ImageStream</code>,
containing the latest published versions of all components and the proposed version
of the component under test.
</p>
<h5 id="existing-release"><a href="#existing-release">Testing With an Existing OpenShift Release</a></h5>
<p>
The <code>releases</code> configuration option allows specification of an existing
version of OpenShift that a component will be tested on. Three types of releases
may be referenced: candidate release payloads from a release controller, pre-release
payloads that have yet to be published to Cincinnati, and official releases as
customers would see them.
</p>
<p>
Releases may be named, with two names holding special meaning. In ordinary end-to-end
tests, the <code>latest</code> release describes the version that will be installed
before tests are run. For upgrade end-to-end tests, the <code>initial</code> release
describes the version of OpenShift which is initially installed, after which an upgrade
is executed to the <code>latest</code> release, after which tests are run. The full pull
specification for a release payload is provided to test steps with the <code>${RELEASE_IMAGE_<name>}</code>
environment variable. The following example exposes a the following release payload to tests:
</p>
<ul>
<li>the <code>release:initial</code> tag, holding a release candidate for OKD 4.3, exposed as <code>${RELEASE_IMAGE_INITIAL}</code></li>
<li>the <code>release:latest</code> tag, holding an officially-released payload for OCP 4.4, exposed as <code>${RELEASE_IMAGE_LATEST}</code></li>
<li>the <code>release:previous</code> tag, holding a previous release candidate for OCP 4.5, exposed as <code>${RELEASE_IMAGE_PREVIOUS}</code></li>
<li>the <code>release:custom</code> tag, holding the latest pre-release payload for OCP 4.4, exposed as <code>${RELEASE_IMAGE_CUSTOM}</code></li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorReleaseConfig") }}
<h4 id="tests"><a href="#tests">Declaring Tests</a></h4>
<p>
Tests as executed by <code>ci-operator</code> run a set of commands inside of a container;
this is implemented by scheduling a <code>Pod</code> under the hood. <code>ci-operator</code>
can be configured to run one of two types of tests: simple, single-stage container
tests and longer, multi-stage container tests. A single-stage test will schedule one
<code>Pod</code> and execute the commands specified. Note that the default working
directory for any container image in the <code>pipeline</code> <code>ImageStream</code>
is the root of the cloned repository under test. The following example uses this
approach to run static verification of source code:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorContainerTestConfig") }}
</p>
The second approach to describing tests allows for multiple containers to be chained
together and describes a more complicated execution flow between them. This multi-stage
test approach is best suited for end-to-end test suites that require full OpenShift
test clusters to be brought up and torn down. Learn more about this type of test
at the <a href="./">getting started overview</a>.
<p>
<h4 id="test-types"><a href="#test-types">Types of Tests</a></h4>
<h5 id="presubmit"><a href="#presubmit">Pre-submit Tests</a></h5>
<p>
By default, any entry declared in the <code>tests</code> stanza of a <code>ci-operator</code>
configuration file will be a <i>pre-submit</i> test: these tests run before code is
submitted (merged) into the target repository. Pre-submit tests are useful
to give feedback to a developer on the content of their pull request and to gate
merges to the central repository. These tests will fire when a pull request is opened,
when the contents of a pull request are changed, or on demand when a user requests
them.
</p>
<h5 id="postsubmit"><a href="#postsubmit">Post-submit Tests</a></h5>
<p>
When a repository configures <code>ci-operator</code> to build images and publish
them (by declaring container image builds with <code>images</code> and the destination
for them to be published with <code>promotion</code>), a <i>post-submit</i> test will
exist. A post-submit test executes after code is merged to the target repository;
this sort of test type is a good fit for publication of new artifacts after changes to
source code.
</p>
<p>
Adding a custom postsubmit to a repository via the ci-operator config is
supported. To do so, add the <code>postsubmit</code> field to a ci-operator
test config and set it to <code>true</code>. The following example configures
a ci-operator test to run as a postsubmit:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorPostsubmitTestConfig") }}
<p>
One important thing to note is that, unlike presubmit jobs, the postsubmit
tests are configured to not be rehearsable. This means that when the test is
being added or modified by a PR in the <code>openshift/release</code> repo,
the job will not be automatically run against the change in the PR. This is
done to prevent accidental publication of artifacts by rehearsals.
</p>
<h5 id="periodic"><a href="#periodic">Periodic Tests</a></h5>
<p>
A repository may be interested in validating the health of the latest source code,
but not at every moment that the code changes. In these cases, a <i>periodic</i>
test may be configured to run on the latest source code on a schedule. The following
example sets the <code>cron</code> field on an entry in the <code>tests</code> list
to configure that test to run on a schedule, instead of as a pre-submit:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorPeriodicTestConfig") }}
<p>
Note that the build farms used to execute jobs run on UTC time, so time-of-day based
<code>cron</code> schedules must be set with that in mind.
</p>
<h3 id="image-references"><a href="#image-references">Referencing Images</a></h3>
<p>
As <code>ci-operator</code> is OpenShift-native, all images used in a test workflow
are stored as <code>ImageStreamTags</code>. The following <code>ImageStreams</code>
will exist in the <code>Namespace</code> executing a test workflow:
</p>
<table>
<tr>
<th style="white-space: nowrap"><code>ImageStream</code></th>
<th>Description</th>
</tr>
<tr>
<td style="white-space: nowrap"><code>pipeline</code></td>
<td>Input images described with <code>base_images</code> and <code>build_root</code> as well as images holding built artifacts (such as <code>src</code> or <code>bin</code>) and output images as defined in <code>images</code>.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>release</code></td>
<td>Tags of this <code>ImageStreams</code> hold OpenShift release payload images for installing and upgrading ephemeral OpenShift clusters for testing; a tag will be present for every named release configured in <code>releases</code>. If a <code>tag_specification</code> is provided, two tags will be present, <code>:initial</code> and <code>:latest</code>.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>stable-<name></code></td>
<td>Images composing the <code>release:name</code> release payload, present when <code><name><code> is configured in <code>releases<code>.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>stable</code></td>
<td>Same as above, but for the <code>release:latest</code> release payload. Appropriate tags are overridden using the container images built during the test.</td>
</tr>
</table>
<h4 id="config-references"><a href="#config-references">Referring to Images in <code>ci-operator</code> Configuration</a></h4>
<p>
Inside of any <code>ci-operator</code> configuration file all images must be
referenced as an <code>ImageStreamTag</code> (<code>stream:tag</code>), but
may be referenced simply with the tag name. When an image is referenced with
a tag name, the tag will be resolved on the <code>pipeline</code> <code>ImageStream</code>,
if possible, falling back to the <code>stable</code> <code>ImageStream</code>
if not. For example, an image referenced as <code>installer</code> will use
<code>pipeline:installer</code> if that tag is present, falling back to
<code>stable:installer</code> if not. The following configuration fields
use this defaulting mechanism:
</p>
<ul>
<li><code>images[*].from</code>: configuring the base <code>FROM</code> which an image builds</li>
<li><code>promotion.additional_images</code>: configuring which images are published</li>
<li><code>promotion.excluded_images</code>: configuring which images are not published</li>
<li><code>tests[*].container.from</code>: configuring the container image in which a single-stage test runs</li>
<li><code>tests[*].steps.{pre,test,post}[*].from</code>: configuring the container image which some part of a multi-stage test runs</li>
</ul>
<h4 id="literal-references"><a href="#literal-references">Referring to Images in Tests</a></h4>
<p>
<code>ci-operator</code> will run every part of a test as soon as possible, including
imports of external releases, builds of container images and test workflow steps. If a
workflow step runs in a container image that's imported or built in an earlier part of
a test, <code>ci-operator</code> will wait to schedule that test step until the image is
present. In some cases, however, it is necessary for a test command to refer to an image
that was built during the test workflow but not run inside of that container image itself.
In this case, the default scheduling algorithm needs to know that the step requires a
valid reference to exist before running.
</p>
<p>
Test workloads can declare that they require fully resolved pull specification
as a digest for any image from the <code>pipeline</code>,
<code>stable-<name></code> or <code>release</code>
<code>ImageStreams</code>. Multi-stage tests may opt into having these
environment variables present by declaring <code>dependencies</code> in the
<code>ci-operator</code> configuration for the test. For instance, the example
test below will be able to access the following environment variables:
</p>
<ul>
<li><code>${MACHINE_CONFIG_OPERATOR}</code>: exposing the pull specification of the <code>stable:machine-config-operator</code> <code>ImageStreamTag</code></li>
<li><code>${BINARIES}</code>: exposing the pull specification of the <code>pipeline:bin</code> <code>ImageStreamTag</code></li>
<li><code>${LATEST_RELEASE}</code>: exposing the pull specification of the <code>release:latest</code> payload <code>ImageStreamTag</code></li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "ciOperatorContainerTestWithDependenciesConfig") }}
<h5 id="dependency-overrides"><a href="#dependency-overrides">Dependency Overrides</a></h5>
<p>
Dependencies can be defined at the workflows and test level in the registry,
overwriting the source for the pull specification that will populate an environment
variable in a step. These definitions will be propagated from the top-level definition
to individual steps. The following example overrides the content of the <code>${DEP}</code>
environment variable in the <code>test</code> step to point to the pull specification of
<code>pipeline:src</code> instead of the original <code>pipeline:bin</code>.
</p>
{{ yamlSyntax (index . "depsPropagation") }}
`
const ciOperatorInputConfig = `base_images:
base: # provides the OpenShift universal base image for other builds to use when they reference "base"
name: "4.5"
namespace: "ocp"
tag: "base"
cli: # provides an image with the OpenShift CLI for other builds to use when they reference "cli"
name: "4.5"
namespace: "ocp"
tag: "cli"
build_root: # declares that the release:golang-1.13 image has the build-time dependencies
image_stream_tag:
name: "release"
namespace: "openshift"
tag: "golang-1.13"
`
const ciOperatorPipelineConfig = `binary_build_commands: "go build ./cmd/..." # these commands are run to build "pipeline:bin"
test_binary_build_commands: "go test -c -o mytests" # these commands are run to build "pipeline:test-bin"`
const multistageDockerfile = `# this image is replaced by the build system to provide repository source code
FROM registry.svc.ci.openshift.org/ocp/builder:golang-1.13 AS builder
# the repository's source code will be available under $GOPATH of /go
WORKDIR /go/src/github.com/myorg/myrepo
# this COPY bring the repository's source code from the build context into an image layer
COPY . .
# this matches the binary_build_commands but runs against the build cache
RUN go build ./cmd/...
# this is the production output image base and matches the "base" build_root
FROM registry.svc.ci.openshift.org/openshift/origin-v4.5:base
# inject the built artifact into the output
COPY --from=builder /go/src/github.com/myorg/myrepo/mybinary /usr/bin/
`
const ciOperatorImageConfig = `images:
- dockerfile_path: "Dockerfile" # this is a relative path from the root of the repository to the multi-stage Dockerfile
from: "base" # a reference to the named base_image, used to replace the output FROM in the Dockerfile
inputs:
bin: # declares that the "bin" tag is used as the builder image when overwriting that FROM instruction
as:
- "registry.svc.ci.openshift.org/ocp/builder:golang-1.13"
to: "mycomponent" # names the output container image "mycomponent"
- dockerfile_path: "tests/Dockerfile"
from: "test-bin" # base the build off of the built test binaries
inputs:
cli:
paths:
- destination_dir: "."
source_path: "/go/bin/oc" # inject the OpenShift clients into the build context directory
to: "mytests" # names the output container image "mytests"
`
const ciOperatorPromotionConfig = `promotion:
additional_images:
repo-scripts: "src" # promotes "src" as "repo-scripts"
excluded_images:
- "mytests" # does not promote the test image
namespace: "ocp"
name: "4.5"
`
const ciOperatorTagSpecificationConfig = `tag_specification:
cluster: "https://api.ci.openshift.org"
namespace: "ocp"
name: "4.5"
`
const ciOperatorReleaseConfig = `releases:
initial: # describes the 'initial' release
candidate: # references a candidate release payload
product: okd
version: "4.3"
latest:
release: # references a version released to customers
channel: stable # configures the release channel to search
version: "4.4"
previous:
candidate:
product: ocp
architecture: amd64
stream: nightly # specifies a candidate release stream
version: "4.5"
relative: 1 # resolves to the Nth latest payload in this stream
custom:
prerelease: # references a version that may be published to customers, but is not yet
product: ocp
version_bounds: # bounds the version for the release chosen
lower: "4.4.0"
upper: "4.5.0-0"
`
const ciOperatorContainerTestConfig = `tests:
- as: "vet" # names this test "vet"
commands: "go vet ./..." # declares which commands to run
container:
from: "src" # runs the commands in "pipeline:src"
`
const ciOperatorContainerTestWithDependenciesConfig = `tests:
- as: "vet"
steps:
test:
- as: "vet"
from: "src"
commands: "test-script.sh ${BINARIES} ${MACHINE_CONFIG_OPERATOR} ${LATEST_RELEASE}"
resources:
requests:
cpu: 100m
memory: 100Mi
dependencies:
- name: "machine-config-operator"
env: "MACHINE_CONFIG_OPERATOR"
- name: "bin"
env: "BINARIES"
- name: "release:latest"
env: "LATEST_RELEASE"
`
const depsPropagation = `tests:
- as: "example"
steps:
dependencies:
DEP: "pipeline:src" # the override for the definition of ${DEP}
test:
- as: "test"
commands: "make test"
from: "src"
resources:
requests:
cpu: 100m
memory: 100Mi
dependencies:
- name: "pipeline:bin" # the original definition of ${DEP}
env: "DEP"
`
const ciOperatorPostsubmitTestConfig = `tests:
- as: "upload-results" # names this test "upload-results"
commands: "make upload-results" # declares which commands to run
container:
from: "bin" # runs the commands in "pipeline:bin"
postsubmit: true # schedule the job to be run as a postsubmit
`
const ciOperatorPeriodicTestConfig = `tests:
- as: "sanity" # names this test "sanity"
commands: "go test ./..." # declares which commands to run
container:
from: "src" # runs the commands in "pipeline:src"
cron: "0 */6 * * *" # schedule a run on the hour, every six hours
`
const ciOperatorProjectImageBuildroot = `build_root:
project_image:
dockerfile_path: images/build-root/Dockerfile # Dockerfile for building the build root image
`
const ciOperatorBuildRootFromRepo = `build_root:
from_repository: true
`
const ciOperatorBuildRootInRepo = `build_root_image:
namespace: openshift
name: release
tag: golang-1.15
`
const gettingStartedPage = `
<h2 id="title"><a href="#title">What is the Multistage Test and the Test Step Registry?</a></h2>
<p>
The multistage test style in the <code>ci-operator</code> is a modular test design that
allows users to create new tests by combining smaller, individual test steps.
These individual steps can be put into a shared registry that other tests can
access. This results in test workflows that are easier to maintain and
upgrade as multiple test workflows can share steps and don’t have to each be
updated individually to fix bugs or add new features. It also reduces the
chances of a mistake when copying a feature from one test workflow to
another.
</p>
<p>
To understand how the multistage tests and registry work, we must first talk
about the three components of the test registry and how to use those components
to create a test:
<ul>
<li>
<a href="#step">Step</a>: A step is the lowest level
component in the test step registry. It describes an individual test
step.
</li>
<li>
<a href="#chain">Chain</a>: A chain is a registry component that
specifies multiple steps to be run. Any item of the chain can be either a
step or another chain.
</li>
<li>
<a href="#workflow">Workflow</a>: A workflow is the highest level
component of the step registry. It contains three chains:
<code>pre</code>, <code>test</code>, <code>post</code>.
</li>
</ul>
</p>
<h3 id="step"><a href="#step">Step</a></h3>
<p>
A step is the lowest level component in the test registry. A
step defines a base container image, the filename of the
shell script to run inside the container, the resource requests and limits
for the container, and documentation for the step. Example of a
step:
</p>
{{ yamlSyntax (index . "refExample") }}
<p>
A step may be referred to in chains, workflows, and <code>ci-operator</code> configs.
</p>
<h4 id="step-image"><a href="#step-image">Configuring the Container Image For a Step</a></h4>
<p>
The container image used to run a test step can be configured in one of two
ways: by referencing an image tag otherwise present in the configuration or
by explicitly referencing an image tag present on the build farm.
</p>
<h5 id="step-from"><a href="#step-from">Referencing Another Configured Image</a></h5>
<p>
A step may execute in a container image already present in the <code>ci-operator</code>
configuration file by identifying the tag with the <code>from</code> configuration
field. Steps should use this mechanism to determine the container image they run in
when that image will vary with the code under test. For example, the container image
could have contents from the code under test (like <code>src</code>); similarly, the
image may need to contain a component matching the version of OpenShift used in the
test (like <code>installer</code>). When using this configuration option, ensure that
the tag is already present in one of the following places:
</p>
<ul>
<li>
<a href="https://github.com/openshift/ci-tools/blob/master/ARCHITECTURE.md#build-graph-traversal">
a pipeline image
</a>
</li>
<li>
<a href="https://github.com/openshift/ci-tools/blob/master/CONFIGURATION.md#base_images">
an external image
</a>
</li>
<li>
<a href="https://github.com/openshift/ci-tools/blob/master/CONFIGURATION.md#images">
an image built by <code>ci-operator</code>
</a>
</li>
<li>
<a href="https://github.com/openshift/ci-tools/blob/master/CONFIGURATION.md#tag_specification">
an image imported from a release <code>ImageStream</code>
</a>
</li>
</ul>
<p>
Note that static validation for this field is limited because the set of images
originating from the release <code>ImageStream</code> is only known at runtime.
</p>
<h5 id="step-from-image"><a href="#step-from-image">Referencing a Literal Image</a></h5>
<p>
A step may also be configured to use an available <code>ImageStreamTag</code> on
the build farm where the test is executed by specifying the details for the tag with
the <code>from_image</code> configuration field. A step should use this option when
the version of the container image to be used does not vary with the code under test
or the version of OpenShift being tested. Using the <code>from_image</code> field is
synonymous with importing the image as a <code>base_image</code> and referencing the
tag with the <code>from</code> field, but allows the step definition to be entirely
self-contained. The following example of a step configuration uses this option:
</p>
{{ yamlSyntax (index . "refFromImageExample") }}
<h4 id="step-commands"><a href="#step-commands"><code>commands</code></a></h4>
<p>
The commands file must contain shell script in a shell language supported by
the <code>shellcheck</code> program used to validate the commands. However,
regardless of the shell language used for the commands, the web UI will
syntax highlight all commands as bash.
</p>
<p>
Note: the shell script file must follow the <a href="#layout">naming convention</a> described later
in this help page.
</p>
<h4 id="execution"><a href="#execution">Step Execution Environment</a></h4>
<p>
While a step simply defines a set of commands to run in a container image,
by virtue of executing within a <code>ci-operator</code> workflow, the commands
have a number of special considerations for their execution environment.
The commands can expect a set of environment variables to exist that inform
them of the context in which they run. Commands in steps can communicate to
other steps via a shared directory in their filesystem.
</p>
<h5 id="env"><a href="#env">Available Environment Variables</a></h5>
<p>
The following environment variables will be available to commands in a step:
</p>
<table class="table">
<tr>
<th style="white-space: nowrap">Variable</th>
<th>Definition</th>
<th>When is it Present?</th>
</tr>
<tr>
<td style="white-space: nowrap"><code>${OPENSHIFT_CI}</code></td>
<td>Set to <code>"true"</code>, should be used to detect that a script is running in a <code>ci-operator</code> environment.</td>
<td>Always.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${SHARED_DIR}</code></td>
<td>Directory on the step's filesystem where files shared between steps can be read and written.</td>
<td>Always.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${ARTIFACT_DIR}</code></td>
<td>Directory on the step's filesystem where files should be placed to persist them in the job's artifacts.</td>
<td>Always.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${CLUSTER_PROFILE_DIR}</code></td>
<td>Directory on the step's filesystem where credentials and configuration from the cluster profile are stored.</td>
<td>When the test as defined in a <code>ci-operator</code> configuration file sets a <code>cluster_profile</code>.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${KUBECONFIG}</code></td>
<td>Path to <code>system:admin</code> credentials for the ephemeral OpenShift cluster under test.</td>
<td>After an ephemeral cluster has been installed.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${RELEASE_IMAGE_INITIAL}</code></td>
<td>Image pull specification for the initial release payload snapshot when the test began to run.</td>
<td>Always.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${RELEASE_IMAGE_LATEST}</code></td>
<td>Image pull specification for the ephemeral release payload used to install the ephemeral OpenShift cluster.</td>
<td>Always.</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>${LEASED_RESOURCE}</code></td>
<td>The name of the resource leased to grant access to cloud quota. See <a href="./help/leases">the documentation</a>.</td>
<td>When the test requires a lease.</td>
</tr>
</table>
<p>
In addition to these variables, commands will also have a number of other
environment variables available to them from
<a href="https://github.com/kubernetes/test-infra/blob/master/prow/jobs.md#job-environment-variables">Prow</a>
as well as from
<a href="https://github.com/openshift/ci-tools/blob/master/TEMPLATES.md#parameters-available-to-templates"><code>ci-operator</code></a>.
If a job is using these variables, however, it may be an indication that
some level of encapsulation has been broken and that a more straightforward
approach exists to achieve the same outcome.
</p>
<p>
<a href="#parameters">Parameters</a> declared by steps and set by tests will
also be available as environment variables.
</p>
<h5 id="data"><a href="#data">Sharing Data Between Steps</a></h5>
<p>
Steps can communicate between each other by using a shared directory on their
filesystems. This directory is available for test processes via
<code>${SHARED_DIR}</code>. When the process finishes executing, the contents
of that directory will be copied and will be available to following
steps. New data will overwrite previous data, absent files will be removed. The
underlying mechanism for this uses Kubernetes concepts; therefore, the total
amount of data that can be shared is capped at 1MB and only a flat file
structure is permissible: no sub-directories are supported. Steps are more
commonly expected to communicate between each other by using state in the
OpenShift cluster under test. For instance, if a step installs some components
or changes configuration, a later step could check for that as a pre-condition
by using <code>oc</code> or the API to view the cluster's configuration.
</p>
<h5 id="kubeconfig"><a href="#kubeconfig">A Note on <code>$KUBECONFIG</code></a></h5>
<p>
In the default execution environment, commands run in steps will be given the
<code>$KUBECONFIG</code> environment variable to allow them to interact with
the ephemeral cluster that was created for testing. It is required that any
steps which execute a cluster installation publish the resulting configuration
file to <code>$SHARED_DIR/kubeconfig</code> to allow the <code>ci-operator</code>
to correctly propagate this configuration to subsequent steps.
</p>
<h5 id="artifacts"><a href="#artifacts">Exposing Artifacts</a></h5>
<p>
Steps can commit artifacts to the output of a job by placing files at the
<code>${ARTIFACT_DIR}</code>. These artifacts will be available for a job
under <code>artifacts/job-name/step-name/</code>. The logs of each container
in a step will also be present at that location.
</p>
<h5 id="credentials"><a href="#credentials">Injecting Custom Credentials</a></h5>
<p>
Steps can inject custom credentials by adding configuration that identifies
which secrets hold the credentials and where the data should be mounted in
the step. For instance, to mount the <code>my-data</code> secret into the
step's filesystem at <code>/var/run/my-data</code>, a step could be configured
in a literal <code>ci-operator</code> configuration, or in the step's configuration
in the registry in the following manner:
</p>
Registry step configuration:
{{ yamlSyntax (index . "credentialExample") }}
<p>
Note that access to read these secrets from the namespace configured must be
granted separately from the configuration being added to a step. By default,
only secrets in the <code>test-credentials</code> namespace will be available
for mounting into test steps.
</p>
<h3 id="chain"><a href="#chain">Chain</a></h3>
<p>
A chain is a registry component that specifies multiple registry components to be run.
Components are run in the order that they are written. Components specified by a chain
can be either steps and other chains. Example of a chain:
</p>
{{ yamlSyntax (index . "chainExample") }}
<h3 id="workflow"><a href="#workflow">Workflow</a></h3>
<p>
A workflow is the highest level component of the step registry. It is almost
identical to the syntax of the <code>ci-operator</code> configuration for multistage tests and
defines an entire test from start to finish. It has four basic components: a
<code>cluster_profile</code> string (eg: <code>aws</code>, <code>azure4</code>,
<code>gcp</code>), and three chains: <code>pre</code>, <code>test</code>, and
<code>post</code>. The <code>pre</code> chain is intended to be used to set
up a testing environment (such as creating a test cluster), the
<code>test</code> chain is intended to contain all tests that a job wants to
run, and the <code>post</code> chain is intended to be used to clean up any
resources created/used by the test. If a step in <code>pre</code> or
<code>test</code> fails, all pending <code>pre</code> and <code>test</code>
steps are skipped and all <code>post</code> steps are run to ensure that
resources are properly cleaned up. This is an example of a workflow configuration:
</p>
{{ yamlSyntax (index . "workflowExample") }}
<h3 id="config"><a href="#config"><code>ci-operator</code> Test Configuration</a></h3>
<p>
The <code>ci-operator</code> test configuration syntax for multistage tests is very similar to
the registry workflow syntax. The main differences are that the <code>ci-operator</code>
configuration does not have a <code>documentation</code> field, and the <code>ci-operator</code>
configuration can specify a workflow to use. Also, the <code>cluster_profile</code>,
<code>pre</code>, <code>test</code>, and <code>post</code> fields are under a
<code>steps</code> field instead of <code>workflow</code>. Here is an example
of the <code>tests</code> section of a <code>ci-operator</code> configuration using the
multistage test design:
</p>
{{ yamlSyntax (index . "configExample1") }}
<p>
In this example, the <code>ci-operator</code> configuration simply specifies the desired cluster
profile and the <code>origin-e2e</code> workflow shown in the example for the
<code>Workflow</code> section above.
</p>
<p>
Since the <code>ci-operator</code> configuration and workflows share the same fields, it is
possible to override fields specified in a workflow. In cases where both the
workflow and a <code>ci-operator</code> configuration specify the same field, the <code>ci-operator</code> configuration’s
field has priority (i.e. the value from the <code>ci-operator</code> configuration is used).
</p>
Example of a <code>ci-operator</code> configuration that overrides a workflow field.
{{ yamlSyntax (index . "configExample2") }}
The configuration can also override a workflow field with a <a href="#step">full literal step</a>
(not only a reference to a shared step):
{{ yamlSyntax (index . "configExample3") }}
<h2 id="allow-skip-on-success"><a href="#allow-skip-on-success">Options to Change Control Flow</a></h2>
<p>
<code>ci-operator</code> can be configured to skip some or all <code>post</code> steps
when all <code>test</code> steps pass.
Skipping a <code>post</code> step when all tests have passed may be useful to skip
gathering artifacts and save some time at the end of the multistage test.
In order to allow steps to be skipped in a test, the <code>allow_skip_on_success</code> field must
be set in the <code>steps</code> configuration. Individual <code>post</code> steps opt
into being skipped by setting the <code>optional_on_success</code> field. This is an example:
</p>
{{ yamlSyntax (index . "configExample4") }}
<h3 id="layout"><a href="#layout">Registry Layout and Naming Convention</a></h3>
<p>
To prevent naming collisions between all the registry components, the step
registry has a very strict naming scheme and directory layout. First, all
components have a prefix determined by the directory structure, similar to
how the <code>ci-operator</code> configs do. The prefix is the relative directory path
with all `<code>/</code>` characters changed to
`<code>-</code>`. For example, a file under the
<code>ipi/install/conf</code> directory would have as prefix of
<code>ipi-install-conf</code>. If there is a workflow, chain, or step in
that directory, the <code>as</code> field for that component would need to be
the same as the prefix. Further, only one of step, chain, or workflow
can be in a subdirectory (otherwise there would be a name conflict),
</p>
<p>
After the prefix, we apply a suffix based on what the file is defining. These
are the suffixes for the four file types that exist in the registry:
<ul style="margin-bottom:0px;">
<li>Step: <code>-ref.yaml</code></li>
<li>Step command script: <code>-commands.sh</code></li>
<li>Chain: <code>-chain.yaml</code></li>
<li>Workflow: <code>-workflow.yaml</code></li>
</ul>
</p>
<p>
Continuing the example above, a step in the
<code>ipi/install/conf</code> subdirectory would have a filename of
<code>ipi-install-conf-ref.yaml</code> and the command would be
<code>ipi-install-conf-commands.sh</code>.
</p>
<p>
Other files that are allowed in the step registry but are not used for
testing are <code>OWNERS</code> files and files that end in <code>.md</code>.
</p>
<h3 id="parameters"><a href="#parameters">Parameters</a></h3>
<p>
Steps, chains, and workflows can declare parameters in their <code>env</code>
section. These can then be set to different values to generate tests that have
small variations between them. For example:
</p>
{{ yamlSyntax (index . "paramsExample") }}
<p>
A test that utilzes this step must give a value to the
<code>OPENSHIFT_TEST_SUITE</code> parameter, which will be available as an
environment variable when it is executed. Different tests can be generated by
setting different values, which can make generating simple variations easier.
More complex combinations are encouraged to use separate steps instead.
</p>
<p>
Each item in the <code>env</code> section consists of the following fields:
</p>
<ul>
<li><code>name</code>: environment variable name</li>
<li>
<code>default</code> (optional): the value assigned if no other node in the
hierarchy provides one (described below)
</li>
<li>
<code>documentation</code> (optional): a textual description of the
parameter
</li>
</ul>
<h4 id="hierarchical-propagation">
<a href="#hierarchical-propagation">Hierarchical Propagation</a>
</h4>
<p>
Environment variables can be added to chains and workflows in the registry.
These variables will be propagated down the hierarchy. That is: a variable in
the env section of a chain will propagate to all of its sub-chains and
sub-steps, a variable in the env section of a workflow will propagate to all of
its stages.
</p>
{{ yamlSyntax (index . "paramsPropagation") }}
<h4 id="required-parameters">
<a href="#required-parameters">Required Parameters</a>
</h4>
<p>
Any variable that is not assigned a default value is considered required and
must be set at a higher level of the hierarchy. When the configuration is
resolved, tests that do not satisfy this requirement will generate a validation
failure.
</p>
Step definition:
{{ yamlSyntax (index . "paramsRequired") }}
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "paramsRequiredTest") }}
`
const refExample = `ref:
as: ipi-conf # name of the step
from: base # image to run the commands in
commands: ipi-conf-commands.sh # script file containing the command(s) to be run
active_deadline_seconds: 7200 # optional duration in seconds that the step pod may be active before it is killed.
termination_grace_period_seconds: 20 # optional duration in seconds the pod needs to terminate gracefully.
resources:
requests:
cpu: 1000m
memory: 100Mi
documentation: |-
The IPI configure step generates the install-config.yaml file based on the cluster profile and optional input files.`
const refFromImageExample = `ref:
as: ipi-conf
from_image: # literal image tag to run the commands in
namespace: my-namespace
name: test-image
tag: latest
commands: ipi-conf-commands.sh
resources:
requests:
cpu: 1000m
memory: 100Mi
documentation: |-
The IPI configure step generates the install-config.yaml file based on the cluster profile and optional input files.`
const credentialExample = `ref:
as: step
from: base
commands: step-commands.sh
resources:
requests:
cpu: 1000m
memory: 100Mi
credentials:
- namespace: test-credentials # this entry injects the custom credential
name: my-data
mount_path: /var/run/my-data
documentation: |-
The step runs with custom credentials injected.`
const chainExample = `chain:
as: ipi-deprovision # name of this chain
steps:
- chain: gather # a chain being used as a step in another chain
- ref: ipi-deprovision-deprovision # a step being used as a step in a chain
documentation: |-
The IPI deprovision step chain contains all the individual steps necessary to deprovision an OpenShift cluster.`
const workflowExample = `workflow:
as: origin-e2e # name of workflow
steps:
pre: # "pre" chain used to set up test environment
- ref: ipi-conf
- chain: ipi-install
test: # "test" chain containing actual tests to be run
- ref: origin-e2e-test
post: # "post" chain containing cleanup steps
- chain: ipi-deprovision
documentation: |-
The Origin E2E workflow executes the common end-to-end test suite.`
const configExample1 = `tests:
- as: e2e-steps # test name
steps:
cluster_profile: aws
workflow: origin-e2e`
const configExample2 = `tests:
- as: e2e-steps # test name
steps:
cluster_profile: aws
workflow: origin-e2e
test: # this chain will be run for "test" instead of the one in the origin-e2e workflow
- ref: origin-e2e-minimal`
const configExample3 = `tests:
- as: e2e-steps # test name
steps:
cluster_profile: aws
workflow: origin-e2e
test: # this chain will be run for "test" instead of the one in the origin-e2e workflow
- as: e2e-test
commands: make e2e
from: src
resources:
requests:
cpu: 100m
memory: 200Mi`
const configExample4 = `tests:
- as: e2e-steps # test name
steps:
allow_skip_on_success: true # allows steps to be skipped in this test
test:
- as: successful-test-step
commands: echo Success
from: os
resources:
requests:
cpu: 100m
memory: 200Mi
post:
- as: gather-must-gather # this step will be skipped as the successful-test-step passes
optional_on_success: true
from: cli
commands: gather-must-gather-commands.sh
resources:
requests:
cpu: 300m
memory: 300Mi`
const paramsExample = `ref:
as: openshift-e2e-test
from: tests
commands: openshift-e2e-test-commands.sh
resources:
requests:
cpu: "3"
memory: 600Mi
limits:
memory: 4Gi
env:
- name: OPENSHIFT_TEST_SUITE
`
const paramsPropagation = `chain:
as: some-chain
steps:
- ref: some-step # TEST_VARIABLE will propagate to this step
- chain: other-chain # TEST_VARIABLE will propagate to all elements in this chain
env:
- name: TEST_VARIABLE
default: test value
`
const paramsRequired = `ref:
as: some-ref
# …
env:
- name: REQUIRED_VARIABLE # automatically considered required
`
const paramsRequiredTest = `tests:
- as: valid
steps:
env:
REQUIRED_VARIABLE: value
test:
- some-ref
- as: invalid
steps:
test:
- some-ref
`
const addingComponentPage = `
<h2>Adding and Changing Step Registry Content</h2>
<h3 id="adding-content"><a href="#adding-content">Adding Content</a></h3>
<p>
Adding a new component (step, chain, or workflow) to the registry is
quite simple. Descriptions of each of the components as well as the naming
scheme and directory layout is available at the <a href="/help">
Getting Started</a> page. To add a new component, add the new files into the
<code>ci-operator/step-registry</code> directory in
<code>openshift/release</code> following the naming scheme along with an
<code>OWNERS</code> file for the new component and open a PR.
</p>
Prow will automatically run a few tests on registry components.
<ul>
<li>Verify that all required fields are supplied</li>
<li>Verify that the naming scheme for all components is correct</li>
<li>Verify that there are no cyclic dependencies (infinite loops) in chains</li>
<li>Run shellcheck on all shell files used by steps, failing on errors</li>
</ul>
<p>
If a new test is added that uses the new component as well,
<code>pj-rehearse</code> will test the new job with the new component.
</p>
<h3 id="changing-content"><a href="#changing-content">Changing Content</a></h3>
<p>
To change registry content, make the changes in
<code>openshift/release</code> and open a new PR. Prow will run all of the
same checks on the registry listed in the above “Adding Content” section and
run rehearsals for all jobs that use the changed registry component. The
component will require approval and an lgtm from one of the people listed in
the <code>OWNERS</code> file for the component, located in the same directory
as the component.
</p>
`
const releasePage = `
<h2>Contributing CI Configuration to the <code>openshift/release</code> Repository</h2>
<p>
The <a href="https://github.com/openshift/release/"><code>openshift/release</code></a>
repository holds CI configuration for OpenShift component repositories (for both
OKD and OCP) and for many repositories that interact with OpenShift, like
operators. The repository also contains manifests and configuration for various
services that together form the OpenShift CI system.
</p>
<h3 id="pull-requests"><a href="#pull-requests">Pull Requests</a></h3>
<p>
The <code>openshift/release</code> repository contains plenty of different
types of configuration with various impact and different owners. This section
provides the main guidelines for filing and merging pull requests to this
repository.
</p>
<h4 id="reviews"><a href="#reviews">Reviews and Approvals</a></h4>
<p>
This repository heavily uses Prow review and approval plugins together with code
ownership as encoded in <code>OWNERS</code> files. Although the repository's root
<code>OWNERS</code> is the DPTP team, specific content may be owned by different
people or teams. After a PR is filed, the bot assigns two reviewers who should
be suitable to review the PR and are expected to do so. These people are also
the ones to bug when a PR sits there without a review. Teams are expected to own
their CI config, including reviews, and therefore <code>OWNERS</code> file
presence is enforced for some sections of the repository.
</p>
<p>
During the PR lifetime, the bot maintains a comment that summarizes the pull
request's approval status, including the links to the <code>OWNERS</code> files
whose members need to approve the PR. Please pay attention to this comment when
asking for approvals.
<!--TODO: a screenshot would be nice?-->
</p>
<p>
Due to the pull request volume in the repository, DPTP team members review the
pull requests asynchronously when assigned by a bot. Please do not expect a PR
to be reviewed immediately. Unless urgent, do not ping about reviews via Slack.
If a PR sits unreviewed for more than a day, ping via GitHub first via a
mention. If a pull request spends some time in WIP or draft state, it is helpful
to mention the reviewers when the PR is ready for review.
</p>
<h4 id="checks"><a href="#checks">Checks</a></h4>
<h5 id="formatting-checks"><a href="#formatting-checks">Formatting and Generated Content</a></h5>
<p>
Parts of the repository content are partially or entirely managed by automation
, and there are checks in place, enforcing that the repo stays consistent with
respect to this automation. When these checks fail, they usually advise how to
run the tooling (using containers) to bring the repo to the desired state:
</p>
{{ plaintextSyntax (index . "determinizeCheckExample") }}
<p>
While there are individual <code>make</code> targets for different parts of the
repository, it is easiest to run the <code>make update</code> that runs <em>all</em>
these tools before a pull request submission:
</p>
{{ plaintextSyntax (index . "makeUpdateExample") }}
<h5 id="rehearsals"><a href="#rehearsals">Rehearsals</a></h5>
<p>
In addition to the "normal" checks executed against pull requests on <code>openshift/release</code>,
so-called <em>"rehearsals"</em> trigger whenever a pull request would affect one
or more CI jobs. Jobs affected by such PR are executed as if run against a
target component repository after the changes would be merged. This provides
pull request authors early feedback about how config changes impact CI setup.
</p>
<p>
All pull requests trigger a <code>ci/prow/pj-rehearse</code> job that inspects
the changes in the PR and detects affected jobs. It then submits these jobs for
execution, and they will report to the pull request results via the GitHub
contexts named with the <code>ci/rehearse/$org/$repo/$branch/$test</code>
pattern. Both the "driver" job (<code>ci/prow/pj-rehearse</code>) and the
individual rehearsals do not block merges. This allows merging changes to CI
configuration that affect jobs that fail for reasons unrelated to the change
(like flakes or infrastructure issues). Also, merging a failing job can be
useful when it gives correct signal so that such merge can be followed up in the
target repo with a pull request fixing the failing job.
</p>
<p>
The following changes are considered when triggering rehearsals:
</p>
<ol>
<li>Changes to Prow jobs themselves (<code>ci-operator/jobs</code>)</li>
<li>Changes to <code>ci-operator</code> configuration files (<code>ci-operator/config</code>)</li>
<li>Changes to multi-stage steps (<code>ci-operator/step-registry</code>)</li>
<li>Changes to templates (<code>ci-operator/templates</code>)</li>
<li>Changes to cluster profiles (<code>cluster/test-deploy</code>)</li>
</ol>
<p>
The affected jobs are further filtered down so that jobs are only rehearsed when
it is safe. Only the jobs with <code>pj-rehearse.openshift.io/can-be-rehearsed: "true"</code>
label are rehearsed. All presubmits and periodics generated by <code>make jobs</code>
have this label by default. Generated postsubmits will not contain it because
generated postsubmits are used for promoting images. Handcrafted jobs can opt
to be rehearsable by including this label.
</p>
<p>
It is not possible to rerun individual rehearsal jobs. They do not react to any
trigger commands. Rerunning rehearsals must be done by rerunning the "driver"
job: <code>ci/prow/pj-rehearse</code>, which then triggers all rehearsals of
jobs currently affected by the PR, including the rehearsals that passed before.
</p>
<p>
Certain changes affect many jobs. For example, when a template or a step used
by many jobs is changed, in theory all these jobs could be affected by the change,
but it is unrealistic to rehearse them all. In some of these cases, rehearsals
<em>samples</em> from the set of affected jobs. Unfortunately, the sampled jobs
are sometimes not stable between retests, so it is possible that in a retest,
different jobs are selected for rehearsal than in the previous run. In this case,
results from the previous runs stay on the pull request and because rehearsals
cannot be individually triggered, they cannot be rid of. This is especially
inconvenient when these "stuck" jobs failed. Rehearsals do not block merges, so
these jobs do not prevent configuration changes from merging, but they can lead
to confusing situations.
</p>
<h5 id="sharding"><a href="#sharding"><code>ci-operator</code> Configuration Sharding</a></h5>
<p>
The configuration files under <code>ci-operator/config</code> need to be stored
in the CI cluster before jobs can use them. That is done using the
<a href="https://github.com/kubernetes/test-infra/tree/master/prow/plugins/updateconfig"><code>updateconfig</code></a>
Prow plugin, which maps file path globs to <code>ConfigMap</code>s.
</p>
<p>
Because of size constraints, files are distributed across several <code>ConfigMap</code>s
based on the name of the branch they target. Patterns for the most common names
already exist in the plugin configuration, but it may be necessary to
add entries when adding a file for a branch with an unusual name. The <a href="https://prow.ci.openshift.org/job-history/gs/origin-ci-test/pr-logs/directory/pull-ci-openshift-release-master-correctly-sharded-config"><code>correctly-sharded-config</code></a>
pre-submit job guarantees that each file is added to one (and only one) <code>ConfigMap</code>,
and will fail in case a new entry is necessary. To add one, edit the top-level
<code>config_updater</code> key in the <a href="https://github.com/openshift/release/blob/master/core-services/prow/02_config/_plugins.yaml">plugin configuration</a>.
Most likely, the new entry will be in the format:
</p>
{{ yamlSyntax (index . "updateconfigExample") }}
<p>
The surrounding entries that add files to <code>ci-operator-misc-configs</code>
can be used as reference. When adding a new glob, be careful that it does not
unintentionally match other files by being too generic.
</p>
<h3 id="component-maintainers"><a href="#component-maintainers">Component CI Configuration</a></h3>
<p>
As an owner of a repository for which you want to maintain CI configuration in
<code>openshift/release</code>, you mostly need to interact with the following
locations:
</p>
<ul>
<li>
<code>ci-operator/config/$org/$repo/$org-$repo-$branch.yaml</code>:
contains your ci-operator definition, which describes how the images and
tests in your repo work.
</li>
<li>
<code>ci-operator/jobs/$org/$repo/$org-$repo-$branch-(presubmits|postsubmits|periodics).yaml</code>:
contains Prow job definitions for each repository that are run on PRs,
on merges, or periodically. In most cases, these files are generated
from the <code>ci-operator</code> configuration, and you do not need to
touch them. There are exceptions to this, which are described <a href="#component-jobs">below</a>.
</li>
<li>
<code>core-services/prow/02_config/_{config,plugins}.yaml</code>: contains
the configuration for Prow, including repository-specific configuration
for automated merges, plugin enablement and more. This configuration is
usually set up once when a repository is on-boarded, and then rarely
needs to be changed.
</li>
</ul>
<h4 id="new-repos"><a href="#new-repos">Adding CI Configuration for New Repositories</a></h4>
<p>
When adding CI configuration for new repositories, instead of manually modifying
the files in the locations described above or copy-pasting existing configuration
for other repos, you should use the <code>make new-repo</code> target. It walks
you through the necessary steps and generates the configuration for you:
</p>
{{ plaintextSyntax (index . "makeNewRepoExample") }}
<h4 id="component-configs"><a href="#component-configs"><code>ci-operator</code> Configuration</a></h4>
<p>
The <code>ci-operator</code> configuration files for a repository live in <code>ci-operator/config/$org/$repo</code>
directories. For details about the configuration itself, see this <a href="/help/ci-operator">document</a>.
There is a separate configuration file per branch, and the configuration files
follow the <code>$org-$repo-$branch.yaml</code> pattern:
</p>
{{ plaintextSyntax (index . "ciopConfigDirExample") }}
<p>
For the repositories involved in the <a href="https://docs.google.com/document/d/1USkRjWPVxsRZNLG5BRJnm5Q1LSk-NtBgrxl2spFRRU8/edit#heading=h.3myk8y4544sk">Centralized Release Branching and Config Management</a>,
(this includes all OCP components and some others, see the linked document
for details) the configuration for release branches for the <em>future</em>
releases are managed by automation and should not be changed or added by humans.
</p>
<h5 id="feature-branches"><a href="#feature-branches">Feature Branches</a></h5>
<p>
Any branch whose name has a prefix matching to any branch with a <code>ci-operator</code>
configuration file is considered a <em>"feature branch"</em>. Pull requests to
feature branches trigger the same CI presubmit jobs (but not postsubmits) like
configured for the base branch, without any additional configuration. This also
means that such <em>"feature branches"</em> cannot have a separate, different
<code>ci-operator</code> configuration. For example, if a repo has an <code>org-repo-release-2.0.yaml</code>
config (specifying CI config for the <code>release-2.0</code> branch of that
repository), the same CI presubmits will trigger on pull requests to a <code>release-2.0.1</code>
branch, and the repo cannot have an <code>org-repo-release-2.0.1.yaml</code>
configuration file.
</p>
<h5 id="variants"><a href="#variants">Variants</a></h5>
<p>
It is possible to have multiple <code>ci-operator</code> configuration files for
a single branch. This is useful when a component needs to be built and tested in
multiple different ways from a single branch. In that case, the additional
configuration files must follow the <code>org-repo-branch__VARIANT.yaml</code>
pattern (note the double underscore separating the branch from the variant).
</p>
<h4 id="component-jobs"><a href="#component-jobs">Prowjob Configuration</a></h4>
<p>
Most jobs are generated from the <code>ci-operator</code> configuration, so the
need to interact with actual Prowjob configuration should be quite rare.
Modifying the Prowjob configuration is discouraged unless necessary, and can
result in increased fragility and maintenance costs.
</p>
<h5 id="manual-job-changes"><a href="#manual-job-changes">Tolerated Changes to Generated Jobs</a></h5>
<p>
Generated jobs are enforced to stay in the generated form, so when you attempt
to change them, a check will fail on the pull requests, requiring the jobs to be
regenerated and changed back. However, the generator tolerates these
modifications to allow some commonly needed customizations:
</p>
<table class="table">
<tr>
<th style="white-space: nowrap">Field</th>
<th>Description</th>
<th>Presubmit</th>
<th>Postsubmit</th>
<th>Periodic</th>
</tr>
<tr>
<td style="white-space: nowrap"><code>.always_run</code></td>
<td>Set to <code>false</code> to disable automated triggers of the job on pull requests.</td>
<td>✅</td>
<td></td>
<td></td>
</tr>
<tr>
<td style="white-space: nowrap"><code>.run_if_changed</code></td>
<td>Set a regex to make the job trigger only when a pull request changes a certain path in the repository./td>
<td>✅</td>
<td></td>
<td></td>
</tr>
<tr>
<td style="white-space: nowrap"><code>.optional</code></td>
<td>Set to <code>true</code> to make the job not block merges.</td>
<td>✅</td>
<td></td>
<td></td>
</tr>
<tr>
<td style="white-space: nowrap"><code>.skip_report</code></td>
<td>Set to <code>true</code> to make the job not report its result to the pull request.</td>
<td>✅</td>
<td></td>
<td></td>
</tr>
<tr>
<td style="white-space: nowrap"><code>.max_concurrency</code></td>
<td>Set to limit how many instances of the job can run simultaneously.</td>
<td>✅</td>
<td>✅</td>
<td>✅</td>
</tr>
<tr>
<td style="white-space: nowrap"><code>.reporter_config</code></td>
<td>Add this stanza to configure Slack alerts (see the <a href="https://github.com/clarketm/kubernetes_test-infra/blob/master/prow/cmd/crier/README.md#slack-reporter">upstream doc</a>).</td>
<td></td>
<td></td>
<td>✅</td>
</tr>
</table>
<h5 id="handcrafted-jobs"><a href="#handcrafted-jobs">Handcrafted Jobs</a></h5>
<p>
It is possible to add entirely handcrafted Prowjobs. The Prowjob configuration
files' content is a YAML list, so adding a job means adding an item to one
of these lists. Creating handcrafted jobs assumes knowledge of Prow, takes you
out of the well-supported path, and is therefore discouraged. You are expected to
maintain and fully own your handcrafted jobs.
</p>
`
const determinizeCheckExample = `...
ERROR: This check enforces Prow Job configuration YAML file format (ordering,
ERROR: linebreaks, indentation) to be consistent over the whole repository. We have
ERROR: automation in place that manipulates these configs and consistent formatting
ERROR: helps reviewing the changes the automation does.
ERROR: Run the following command to re-format the Prow jobs:
ERROR: $ make jobs
`
const makeUpdateExample = `$ make update
make jobs
docker pull registry.svc.ci.openshift.org/ci/ci-operator-prowgen:latest
docker run --rm <...> registry.svc.ci.openshift.org/ci/ci-operator-prowgen:latest <...>
docker pull registry.svc.ci.openshift.org/ci/sanitize-prow-jobs:latest
docker run --rm <...> registry.svc.ci.openshift.org/ci/sanitize-prow-jobs:latest <...>
make ci-operator-config
docker pull registry.svc.ci.openshift.org/ci/determinize-ci-operator:latest
docker run --rm -v <...> registry.svc.ci.openshift.org/ci/determinize-ci-operator:latest <...>
make prow-config
docker pull registry.svc.ci.openshift.org/ci/determinize-prow-config:latest
docker run --rm <...> registry.svc.ci.openshift.org/ci/determinize-prow-config:latest <...>
make registry-metadata
docker pull registry.svc.ci.openshift.org/ci/generate-registry-metadata:latest
<...>
docker run --rm -v <...> registry.svc.ci.openshift.org/ci/generate-registry-metadata:latest <...>
`
const ciopConfigDirExample = `$ ls -1 ci-operator/config/openshift/api/
openshift-api-master.yaml
openshift-api-release-3.11.yaml
openshift-api-release-4.1.yaml
openshift-api-release-4.2.yaml
openshift-api-release-4.3.yaml
openshift-api-release-4.4.yaml
openshift-api-release-4.5.yaml
openshift-api-release-4.6.yaml
openshift-api-release-4.7.yaml
OWNERS
`
const makeNewRepoExample = `make new-repo
docker pull registry.svc.ci.openshift.org/ci/repo-init:latest
<...>
docker run --rm -it <...> registry.svc.ci.openshift.org/ci/repo-init:latest --release-repo <...>
Welcome to the repository configuration initializer.
In order to generate a new set of configurations, some information will be necessary.
Let's start with general information about the repository...
Enter the organization for the repository: openshift
Enter the repository to initialize: new-repo-example
Enter the development branch for the repository: [default: master]
Now, let's determine how the repository builds output artifacts...
Does the repository build and promote container images? [default: no] yes
Does the repository promote images as part of the OpenShift release? [default: no] yes
Do any images build on top of the OpenShift base image? [default: no] yes
Do any images build on top of the CentOS base image? [default: no] no
Now, let's configure how the repository is compiled...
What version of Go does the repository build with? [default: 1.13] 1.15
[OPTIONAL] Enter the Go import path for the repository if it uses a vanity URL (e.g. "k8s.io/my-repo"):
[OPTIONAL] What commands are used to build binaries in the repository? (e.g. "go install ./cmd/...") make awesome
[OPTIONAL] What commands are used to build test binaries? (e.g. "go install -race ./cmd/..." or "go test -c ./test/...") make awesome-test
...
`
const examplesPage = `
<h2 id="examples"><a href="#examples">Available Examples</a></h2>
<ul>
<li><a href="#aws">How do I add a job that runs the OpenShift end-to-end conformance suite on AWS?</a></li>
<li><a href="#image">How do I use an image from another repo in my repo’s tests?</a></li>
</ul>
<h3 id="aws"><a href="#aws">How do I add a job that runs the OpenShift end-to-end conformance suite on AWS?</a></h3>
<p>
Use the <code>origin-e2e</code> workflow and set <code>cluster_profile</code>
to <code>aws</code>.
</p>
Example:
{{ yamlSyntax (index . "awsExample") }}
<h3 id="image"><a href="#image">How do I use an image from another repo in my repo’s tests?</a></h3>
<p>
In order to use an image from one repository in the tests of another, it is necessary
to first publish the image from the producer repository and import it in the consumer
repository. Generally, a central <code>ImageStream</code> is used for continuous
integration; a repository opts into using an integration stream with the <code>tag_specification</code>
field in the <code>ci-operator</code> configuration and opts into publishing to the
stream with the <code>promotion</code> field.
</p>
<h4 id="image-publication"><a href="#image-publication">Publishing an Image For Reuse</a></h3>
<p>
When configuring <code>ci-operator</code> for a repository, the <code>promotion</code>
stanza declares which container images are published and defines the integration
<code>ImageStream</code> where they will be available. By default, all container images
declared in the <code>images</code> block of a <code>ci-operator</code> configuration
are published when a <code>promotion</code> stanza is present to define the integration
<code>ImageStream</code>. Promotion can be furthermore configured to include other images,
as well. In the following <code>ci-operator</code> configuration, the following images
are promoted for reuse by other repositories to the <code>ocp/4.4</code> integration
<code>ImageStream</code>:
</p>
<ul>
<li>the <code>pipeline:src</code> tag, published as <code>ocp/4.4:repo-scripts</code> containing the latest version of the repository to allow for executing helper scripts</li>
<li>the <code>pipeline:test-bin</code> tag, published as <code>ocp/4.4:repo-tests</code> containing built test binaries to allow for running the repository's tests</li>
<li>the <code>stable:component</code> tag, published as <code>ocp/4.4:component</code> containing the component itself to allow for deployments and installations in end-to-end scenarios</li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "imagePromotionConfig") }}
<h4 id="image-consumption"><a href="#image-consumption">Consuming an Image</a></h3>
<p>
Once a repository is publishing an image for reuse by others, downstream users can
configure <code>ci-operator</code> to use that image in tests by including it as a
<code>base_image</code> or as part of the <code>tag_specification</code>. In general,
images will be available as part of the <code>tag_specification</code> and explicitly
including them as a <code>base_image</code> will only be necessary if the promoting
repository is exposing them to a non-standard <code>ImageStream</code>. Regardless of
which workflow is used to consume the image, the resulting tag will be available under
the <code>stable</code> <code>ImageStream</code>. The following <code>ci-operator</code>
configuration imports a number of images:
</p>
<ul>
<li>the <code>stable:custom-scripts</code> tag, published as <code>myregistry.com/project/custom-scripts:latest</code></li>
<li>the <code>stable:component</code> and <code>:repo-{scripts|tests}</code> tags, by virtue of them being published under <code>ocp/4.4</code> and brought in with the <code>tag_specification</code></li>
</ul>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "imageConsumptionConfig") }}
<p>
Once the image has been configured to be an input for the repository's tests in the
<code>ci-operator</code> configuration, either explicitly as a <code>base_image</code>
or implicitly as part of the <code>tag_specification</code>, it can be used in tests
in one of two ways. A registry step can be written to execute the shared tests
in any <code>ci-operator</code> configuration, or a literal test step can be added just to one
repository's configuration to run the shared tests. Two examples follow which add an
execution of shared end-to-end tests using these two approaches. Both examples assume
that we have the <code>ipi</code> workflow available to use.
</p>
<h5 id="adding-step"><a href="#adding-step">Adding a Reusable Test Step</a></h4>
<p>
Full directions for adding a new reusable test step can be found in the overview for
<a href="./adding-components#adding-content">new registry content</a>. An example of the process
is provided here. First, make directory for the test step in the registry:
<code>ci-operator/step-registry/org/repo/e2e</code>.
</p>
Then, declare a reusable step: <code>ci-operator/step-registry/org/repo/e2e/org-repo-e2e-ref.yaml</code>
{{ yamlSyntax (index . "imageExampleRef") }}
Finally, populate a command file for the step: <code>ci-operator/step-registry/org/repo/e2e/org-repo-e2e-commands.sh</code>
{{ bashSyntax (index . "imageExampleCommands") }}
Now the test step is ready for use by any repository. To make use of it, update
<code>ci-operator</code> configuration for a separate repository under
<code>ci-operator/config/org/other/org-other-master.yaml</code>:
{{ yamlSyntax (index . "imageExampleConfig") }}
<h5 id="adding-literal"><a href="#adding-literal">Adding a Literal Test Step</a></h4>
<p>
It is possible to directly declare a test step in the
<code>ci-operator</code> configuration without adding a new registry component.
However, this is usually not recommended for most use cases as commands must
be inlined (making multilined scripts difficult to handle) and the steps are
not reusable by other tests:
</p>
<code>ci-operator</code> configuration:
{{ yamlSyntax (index . "imageExampleLiteral") }}
`
const updateconfigExample = `config_updater:
# …
maps:
# …
ci-operator/config/path/to/files-*-branch-name*.yaml:
clusters:
app.ci:
- ci
name: ci-operator-misc-configs
`
const awsExample = `- as: e2e-steps
steps:
cluster_profile: aws
workflow: origin-e2e
`
const imagePromotionConfig = `test_binary_build_commands: go test -race -c -o e2e-tests # will create the test-bin tag
promotion:
additional_images:
repo-scripts: src # promotes "src" as "repo-scripts"
repo-tests: test-bin # promotes "test-bin" as "repo-tests"
namespace: ocp
name: 4.4
images:
- from: ubi8
to: component # promotes "component" by default
context_dir: images/component
`
const imageConsumptionConfig = `base_images:
custom-scripts:
cluster: myregistry.com
namespace: project
name: custom-scripts
tag: latest
tag_specification:
namespace: ocp
name: 4.4
`
const imageExampleRef = `ref:
as: org-repo-e2e
from: repo-tests
commands: org-repo-e2e-commands.sh
resources:
requests:
cpu: 1000m
memory: 100Mi
documentation: |-
Runs the end-to-end suite published by org/repo.
`
const imageExampleCommands = `#!/bin/bash
e2e-tests # as built by go test -c
`
const imageExampleConfig = `- as: org-repo-e2e
steps:
cluster_profile: aws
workflow: ipi
test:
- ref: org-repo-e2e
`
const imageExampleLiteral = `- as: repo-e2e
steps:
cluster_profile: aws
workflow: ipi
test:
- as: e2e
from: repo-tests
commands: |-
#!/bin/bash
e2e-tests # as built by go test -c
resources:
requests:
cpu: 1000m
memory: 2Gi
`
const privateRepositoriesPage = `
<h2 id="title"><a href="#title">Private Repositories</a></h2>
<p>
OpenShift CI supports setting up CI jobs for private repositories mainly to
allow temporary non-public development on the forks of the otherwise public
repositories. The CI jobs executed for these forks are not shown in the public
Deck instance, and all their artifacts are not public. Access to these jobs is
limited to engineers who need it.
</p>
<p>
Unfortunately, such access cannot be granted to developers of other private
repositories. Therefore, OpenShift CI only allows setting up <em>public</em> CI
jobs for private repositories -- the logs and artifacts executed for such
private repository will be public. <strong>Only set up such jobs when you are
absolutely sure your jobs would not leak any sensitive information</strong>.
</p>
<p>
To allow the CI jobs to access a private repo, drop a following file to the
directory in <code>openshift/release</code> holding the <code>ci-operator</code>
configuration for your repository (usually <code>ci-operator/config/$org/$repo</code>):
</p>
<code>.config.prowgen</code>
{{ yamlSyntax (index . "privateRepoProwgenConfigExample") }}
<h3><code>openshift-priv</code> organization</h3>
<p>
The <code>openshift-priv</code> organization holds private forks of selected
repositories. The purpose of these forks is to allow temporary non-public
development. Their presence, content, settings, and all CI configuration are
managed automatically.
</p>
<p>
<em>Automated tools manage all CI configuration for repositories in <code>openshift-priv</code>
organization. Humans should not change any CI configuration related to these
repositories. All manual changes to this configuration will be overwritten.</em>
</p>
<h4>Involved Repositories</h4>
<p>
The set of repositories that are managed automatically in <code>openshift-priv</code>
is dynamic and consists of the following two subsets:
</p>
<ol>
<li>Repositories with existing CI configuration promoting images to the <code>ocp/4.X</code>
namespace (same criteria like for enrollment into the centralized release
branch management)</li>
<li>Repositories explicitly listed in the
<a href="https://github.com/openshift/release/blob/master/core-services/openshift-priv/_whitelist.yaml">allowlist</a></li>
</ol>
<h4>Automation Architecture</h4>
When a repository is identified to be included in <code>openshift-priv</code>
by having the appropriate promoting configuration or by being present in the
allowlist, the following jobs and tools maintain the existence, repository
settings, repository content, and all necessary CI configuration of the fork in
<code>openshift-priv</code>:
<ol>
<li>The <a href="https://deck-internal-ci.apps.ci.l2s4.p1.openshiftapps.com/?job=periodic-auto-private-org-peribolos-sync">periodic-auto-private-org-peribolos-sync</a>
job runs the <a href="https://github.com/openshift/ci-tools/tree/master/cmd/private-org-peribolos-sync">private-org-peribolos-sync</a>
tool to maintain the GitHub settings for the fork. These settings are asynchronously
consumed by the <a href="https://prow.ci.openshift.org/?job=periodic-org-sync">periodic-org-sync</a>
job running the <a href="https://github.com/kubernetes/test-infra/tree/master/prow/cmd/peribolos">peribolos</a>
tool to create the fork on GitHub and maintain its settings.</li>
<li>The <a href="https://deck-internal-ci.apps.ci.l2s4.p1.openshiftapps.com/?job=periodic-openshift-release-private-org-sync">periodic-openshift-release-private-org-sync</a>
job runs the <a href="https://github.com/openshift/ci-tools/tree/master/cmd/private-org-sync">private-org-sync</a>
tool to synchronize the git content of the fork with the source repository.</li>
<li>The <a href="https://prow.ci.openshift.org/?job=periodic-prow-auto-config-brancher">periodic-prow-auto-config-brancher</a>
runs the <a href="https://github.com/openshift/ci-tools/tree/master/cmd/ci-operator-config-mirror">ci-operator-config-mirror</a>
tool to create and maintain the CI configuration for the fork (<code>ci-operator</code>
configuration files). The same job then generates the CI jobs from the <code>ci-operator</code>
files. This has a caveat of not carrying over handcrafted (non-generated)
jobs and also manual changes to the generated jobs.</li>
<li>The <a href="https://prow.ci.openshift.org/?job=periodic-prow-auto-config-brancher">periodic-prow-auto-config-brancher</a>
also runs the <a href="https://github.com/openshift/ci-tools/tree/master/cmd/private-prow-configs-mirror">private-prow-configs-mirror</a>
tool to mirror the repository-specific Prow configuration, like merging
criteria, plugin enablement, etc.</li>
</ol>
`
const privateRepoProwgenConfigExample = `private: true
expose: true
`
const quotasAndLeasesPage = `<h2 id="title"><a href="#title">How are Cloud Quota and Aggregate Concurrency Limits Handled?</a></h2>
<p>
A centralized locking system is provided to jobs in order to limit concurrent usage of shared resources like third-party
cloud APIs.
</p>
<p>
Jobs that interact with an Infrastructure-as-a-Service (IaaS) cloud provider use credentials shared across the broader
CI platform. Therefore, all jobs interacting with a specific IaaS will use API quota for these cloud providers from a
shared pool. In order to ensure that our job throughput for a provider remains within the aggregate limit imposed by
shared quota, jobs acquire leases for slices of the quota before they run and only relinquish them once all actions are
completed. This document describes the mechanism used to provide leases of quota slices to jobs, how jobs determine
which quota to ask for, how available leases can be configured and how current usage can be monitored.
</p>
<h3 id="boskos"><a href="#boskos">Introducing the <code>boskos</code> Leasing Server</a></h3>
<p>
<code>boskos</code> (βοσκός), translating as "shepherd" from Greek, is a resource management server that apportions
<i>leases</i> of <i>resources</i> to clients and manages the lifecycle of the resources. When considering the
actions of this server, two terms should be defined:
</p>
<table class="table">
<tr>
<th style="white-space: nowrap">Term</th>
<th>Definition</th>
</tr>
<tr>
<td style="white-space: nowrap">resource</td>
<td>An item which may be leased to clients. Resources represent slices of the larger cloud quota.</td>
</tr>
<tr>
<td style="white-space: nowrap">lease</td>
<td>A binding between a resource and a client. When a lease is active, the underlying resource is not available for other clients.</td>
</tr>
</table>
<p>
The process for granting a lease on a resource follows this workflow:
</p>
<ul>
<li>a client (<i>lessee</i>) requests a lease on an available resource</li>
<li>the server (<i>lessor</i>) grants the lease, if possible, or places the client in a FIFO queue to wait for the next available resource</li>
<li>the client emits a heartbeat while the lease is under active use</li>
<li>the client relinquishes the lease once it is no longer in use</li>
<li>the server places the resource back into the available pool for future clients to request</li>
</ul>
<p>
If a client fails to emit a heartbeat for long enough while the client holds a lease, the server will forcibly
relinquish the lease and return the resource to the available pool for other clients. This mechanism
ensures that clients which crash or otherwise fail to remain responsive cannot exhaust resources by holding a
lease indefinitely.
</p>
<h3 id="admins"><a href="#admins">Directions for Cloud Administrators</a></h3>
<p>
An administrator of a cloud platform will interact with the leasing server in order to configure the aggregate limit on
jobs for the platform or inspect the current settings and usage. Care must be taken when configuring the leasing server
in order to ensure that jobs are well-behaved against the cloud provider APIs.
</p>
<h4 id="adding"><a href="#adding">Adding a New Type Of Resource</a></h4>
<p>
In order to add a new type of cloud quota to the system, changes to the <code>boskos</code> leasing server configuration
are required. The configuration is checked into source control <a href="https://github.com/openshift/release/blob/master/core-services/prow/02_config/_boskos.yaml">here.</a>
When adding a new type of quota, a new entry to the <code>resources</code> list is required, for example:
</p>
<code>boskos</code> configuration:
{{ yamlSyntax (index . "dynamicBoskosConfig") }}
<p>
If it is not clear exactly how many concurrent jobs can share the cloud provider at once, the convention is to set the
<code>min-count</code> and <code>max-count</code> to <code>1000</code>, to effectively leave jobs unlimited and allow
for investigation.
</p>
<p>
In addition to registering the volume of concurrent jobs that are allowed against a new cloud platform, it is required
that the leasing server is configured to reap leases which have not seen a recent heartbeat. This is done by adding the
name of the resource type to the <a href="https://github.com/openshift/release/blob/master/core-services/prow/03_deployment/boskos_reaper.yaml#L27">reaper's configuration.</a>
</p>
<h5 id="static"><a href="#static">Configuration for Heterogeneous Resources</a></h5>
<p>
The example configuration above will create <i>dynamic</i> resources and is most appropriate for operating against large
cloud APIs where clients act identically regardless of which slice of the quota they have leased. If the cloud provider
that is being configured has a static pool of resources and jobs are expected to act differently based on the specific
lease that they acquire, it is necessary to create a static list of resources for <code>boskos</code>:
</p>
<code>boskos</code> configuration:
{{ yamlSyntax (index . "staticBoskosConfig") }}
<p>
A test may access the name of the resource that was acquired using the <code>${LEASED_RESOURCE}</code> environment
variable.
</p>
<h4 id="inspecting"><a href="#inspecting">Viewing Lease Activity Over Time</a></h4>
<p>
In order to view the number of concurrent jobs executing against any specific cloud, or to view the states of resources
in the lease system, a <a href="https://grafana-prow-monitoring.svc.ci.openshift.org/d/628a36ebd9ef30d67e28576a5d5201fd/boskos-dashboard?orgId=1">dashboard</a>
exists.
</p>
<h3 id="job-authors"><a href="#job-authors">Directions for Job Authors</a></h3>
<p>
Job authors should generally not be concerned with the process of acquiring a lease or the mechanisms behind it. However,
a quick overview of the process is given here to explain what is happening behind the scenes. Whenever <code>ci-operator</code>
runs a test target that has a <code>cluster_profile</code> set, a lease will be acquired before the test steps are
executed. <code>ci-operator</code> will acquire the lease, present the name of the leased resource to the job in the
<code>${LEASED_RESOURCE}</code> environment variable, send heartbeats as necessary and relinquish the lease when it is
no longer needed. In order for a <code>cluster_profile</code> to be supported, the cloud administrator will need to have
set up the quota slice resources, so by the time a job author uses a <code>cluster_profile</code>, all the infrastructure
should be in place.
</p>
`
const dynamicBoskosConfig = `resources:
- type: "my-new-quota-slice"
state: "free"
min-count: 10 # how many concurrent jobs can run against the cloud
max-count: 10 # set equal to min-count
`
const staticBoskosConfig = `resources:
- type: "some-static-quota-slice"
state: "free"
names:
- "server01.prod.service.com" # these names should be semantically meaningful to a client
- "server02.prod.service.com"
- "server03.prod.service.com"
`
const linksPage = `<h2 id="clusters"><a href="#clusters">Clusters</a></h2>
<p>The clusters that currently comprise CI are:</p>
<ul>
<li>
<a href="https://console.svc.ci.openshift.org"><code>api.ci</code></a>:
legacy Openshift 3.11 cluster in GCP. Job execution is being migrated out
of it.
</li>
<li>
<a href="https://console-openshift-console.apps.ci.l2s4.p1.openshiftapps.com"><code>app.ci</code></a>:
Openshift Dedicated 4.x cluster containing most Prow services.
</li>
<li>
<a href="https://console.build01.ci.openshift.org/"><code>build01</code></a>:
Openshift 4.x cluster in AWS that executes a growing subset of the jobs.
</li>
<li>
<a href="https://console.build02.ci.openshift.org/"><code>build02</code></a>:
Openshift 4.x cluster in GCP that executes a growing subset of the jobs.
</li>
<li>
<code>vsphere</code>: external cluster used for vSphere tests, not managed
by DPTP.
</li>
</ul>
<p>
Except for <code>vsphere</code>, these clusters use Github OAuth
authentication: all members of the Openshift organization in Github can log in.
</p>
<h2 id="services"><a href="#services">Services</a></h2>
<p>Below is a non-exhaustive list of CI services.</p>
<ul>
<li>
<a href="https://prow.ci.openshift.org">prow.ci.openshift.org</a>:
main Prow dashboard with information about jobs, pull requests, the merge
queue, etc.
</li>
<li>
<a href="https://amd64.ocp.releases.ci.openshift.org">
amd64.ocp.releases.ci.openshift.org
</a>: OCP AMD 64 release status page.
</li>
<li>
<a href="https://ppc64le.ocp.releases.ci.openshift.org">
ppc64le.ocp.releases.ci.openshift.org
</a>: OCP PowerPC 64 LE release status page.
</li>
<li>
<a href="https://s390x.ocp.releases.ci.openshift.org">
s390x.ocp.releases.ci.openshift.org
</a>: OCP S390x release status page.
</li>
<li>
<a href="https://amd64.origin.releases.ci.openshift.org">
amd64.origin.releases.ci.openshift.org
</a>: OKD release status page.
</li>
<li>
<a href="https://search.ci.openshift.org">search.ci.openshift.org</a>:
search tool for error messages in job logs and Bugzilla bugs.
</li>
<li>
<a href="https://sippy.ci.openshift.org">sippy.ci.openshift.org</a>:
CI release health summary.
</li>
<li>
<a href="https://bugs.ci.openshift.org">bugs.ci.openshift.org</a>:
Bugzilla bug overviews, backporting and release viewer.
</li>
</ul>
<h2 id="contact"><a href="#contact">Contact</a></h2>
<p>DPTP maintains several means of contact:</p>
<ul>
<li>
Slack
<ul>
<li>
<code>#announce-testplatform</code>: general announcements and outages.
Usage is limited to the DPTP team, please do not post messages there.
</li>
<li>
<code>#forum-testplatform</code>: general queries and discussion for
the test platform. For general assistance, ping
<code>@dptp-helpdesk</code>. For reporting an outage, ping
<code>@dptp-triage</code>.
</li>
<li>
<code>#4-dev-triage</code>: queries and discussion for CI issues that
are not caused by the test platform.
</li>
<li>
<code>#forum-release-controller</code>: queries and discussion for the
<a href="https://github.com/openshift/release-controller">
<code>release-controller</code></a>, responsible for generating
Openshift release/update payloads and displaying the release status
pages.
</li>
</ul>
</li>
<li>
<a href="https://issues.redhat.com/projects/DPTP">Jira</a>
<ul>
<li>
<a href="https://issues.redhat.com/browse/DPTP-417">Story template</a>
for feature requests.
</li>
<li>
<a href="https://issues.redhat.com/browse/DPTP-419">Bug template</a>
for bugs and issues.
</li>
<li>
<a href="https://issues.redhat.com/browse/DPTP-897">Consulting
template</a> for long-term, asynchronous discussion.
</li>
</ul>
</li>
</ul>
`
const workflowType = "Workflow"
const jobType = "Job"
// workflowJob is a struct that can define either a workflow or a job
type workflowJob struct {
api.RegistryWorkflow
Type string
}
type Jobs struct {
ContainsVariant bool
Orgs []Org
}
type Org struct {
Name string
Repos []Repo
}
type Repo struct {
Name string
Branches []Branch
}
type Branch struct {
Name string
Tests []string
Variants []Variant
}
type Variant struct {
Name string
Tests []string
}
func repoSpan(r Repo, containsVariant bool) int {
if !containsVariant {
return len(r.Branches) + 1
}
rowspan := 0
for _, branch := range r.Branches {
rowspan += len(branch.Variants) + 1
rowspan++
}
return rowspan + 1
}
func orgSpan(o Org, containsVariant bool) int {
rowspan := 0
for _, repo := range o.Repos {
rowspan += repoSpan(repo, containsVariant)
}
return rowspan + 1
}
func githubLink(path string) template.HTML {
link := fmt.Sprintf("https://github.com/openshift/release/blob/master/ci-operator/step-registry/%s", path)
return template.HTML(fmt.Sprintf("<a href=\"%s\">%s</a>", link, link))
}
func createGitHubUserList(items []string) string {
var builder strings.Builder
builder.WriteString("<ul>")
for _, item := range items {
builder.WriteString("\n<li><a href=\"https://github.com/")
builder.WriteString(item)
builder.WriteString("\">")
builder.WriteString(item)
builder.WriteString("</a></li>")
}
builder.WriteString("</ul>")
return builder.String()
}
func ownersBlock(owners repoowners.Config) template.HTML {
var builder strings.Builder
builder.WriteString("<h2 id=\"owners\"><a href=\"#owners\">Owners:</a></h2>")
if len(owners.Approvers) > 0 {
builder.WriteString("<h4 id=\"approvers\"><a href=\"#approvers\">Approvers:</a></h4>\n")
builder.WriteString(createGitHubUserList(owners.Approvers))
}
if len(owners.Reviewers) > 0 {
builder.WriteString("<h4 id=\"reviewers\"><a href=\"#reviewers\">Reviewers:</a></h4>\n")
builder.WriteString(createGitHubUserList(owners.Reviewers))
}
if len(owners.RequiredReviewers) > 0 {
builder.WriteString("<h4 id=\"required_reviewers\"><a href=\"#required_reviewers\">Required Reviewers:</a></h4>\n")
builder.WriteString(createGitHubUserList(owners.RequiredReviewers))
}
if len(owners.Labels) > 0 {
builder.WriteString("<h4 id=\"labels\"><a href\"#labels\">Labels:</a></h4>\n")
builder.WriteString(createGitHubUserList(owners.Labels))
}
return template.HTML(builder.String())
}
func getBaseTemplate(workflows registry.WorkflowByName, chains registry.ChainByName, docs map[string]string) *template.Template {
base := template.New("baseTemplate").Funcs(
template.FuncMap{
"docsForName": func(name string) string {
return docs[name]
},
"testStepNameAndType": getTestStepNameAndType,
"noescape": func(str string) template.HTML {
return template.HTML(str)
},
"toLower": strings.ToLower,
"workflowGraph": func(as string, wfType string) template.HTML {
svg, err := WorkflowGraph(as, workflows, chains, wfType)
if err != nil {
return template.HTML(err.Error())
}
return template.HTML(svg)
},
"chainGraph": func(as string) template.HTML {
svg, err := ChainGraph(as, chains)
if err != nil {
return template.HTML(err.Error())
}
return template.HTML(svg)
},
"orgSpan": orgSpan,
"repoSpan": repoSpan,
"inc": func(i int) int {
return i + 1
},
"doubleInc": func(i int) int {
return i + 2
},
"githubLink": githubLink,
"ownersBlock": ownersBlock,
},
)
base, err := base.Parse(templateDefinitions)
if err != nil {
logrus.Errorf("Failed to load step list template: %v", err)
}
return base
}
type stepNameAndType struct {
Name string
Type string
}
func getTestStepNameAndType(step api.TestStep) stepNameAndType {
var name, typeName string
if step.LiteralTestStep != nil {
name = step.As
} else if step.Reference != nil {
name = *step.Reference
typeName = "reference"
} else if step.Chain != nil {
name = *step.Chain
typeName = "chain"
}
return stepNameAndType{
Name: name,
Type: typeName,
}
}
func jobToWorkflow(name string, config api.MultiStageTestConfiguration, workflows registry.WorkflowByName, docs map[string]string) (workflowJob, map[string]string) {
// If there are literal test steps, we need to add the command to the docs, without changing the original map
// check if there are literal test steps
literalExists := false
for _, step := range append(append(config.Pre, config.Test...), config.Post...) {
if step.LiteralTestStep != nil {
literalExists = true
break
}
}
if literalExists {
newDocs := make(map[string]string)
for k, v := range docs {
newDocs[k] = v
}
docs = newDocs
for _, step := range append(append(config.Pre, config.Test...), config.Post...) {
if step.LiteralTestStep != nil {
baseDoc := fmt.Sprintf(`Container image: <span style="font-family:monospace">%s</span>`, step.From)
if highlighted, err := syntaxBash(step.Commands); err == nil {
docs[step.As] = fmt.Sprintf("%s<br>Command: %s", baseDoc, highlighted)
continue
} else {
logrus.WithError(err).Errorf("Failed to syntax highlight command %s for job %s", step.As, name)
docs[step.As] = fmt.Sprintf("%s<br>Command: <pre>%s</pre>", baseDoc, step.Commands)
}
}
}
}
if config.Workflow != nil {
workflow := workflows[*config.Workflow]
if config.ClusterProfile == "" {
config.ClusterProfile = workflow.ClusterProfile
}
if config.Pre == nil {
config.Pre = workflow.Pre
}
if config.Test == nil {
config.Test = workflow.Test
}
if config.Post == nil {
config.Post = workflow.Post
}
}
return workflowJob{
RegistryWorkflow: api.RegistryWorkflow{
As: name,
Documentation: docs[name],
Steps: config,
},
Type: jobType}, docs
}
func writeErrorPage(w http.ResponseWriter, pageErr error, status int) {
errPage, err := template.New("errPage").Parse(errPage)
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s: %v", http.StatusText(http.StatusInternalServerError), err)
return
}
w.WriteHeader(status)
writePage(w, "Error: Openshift CI Registry", errPage, fmt.Sprintf("%s: %v", http.StatusText(status), pageErr))
}
func writePage(w http.ResponseWriter, title string, body *template.Template, data interface{}) {
fmt.Fprintf(w, htmlPageStart, title)
if err := body.Execute(w, data); err != nil {
w.WriteHeader(http.StatusInternalServerError)
fmt.Fprintf(w, "%s: %v", http.StatusText(http.StatusInternalServerError), err)
return
}
fmt.Fprintln(w, htmlPageEnd)
}
func helpHandler(subPath string, w http.ResponseWriter, _ *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
helpFuncs := template.New("helpPage").Funcs(
template.FuncMap{
"yamlSyntax": func(source string) template.HTML {
formatted, err := syntaxYAML(source)
if err != nil {
logrus.Errorf("Failed to format source file: %v", err)
return template.HTML(source)
}
return template.HTML(formatted)
},
"bashSyntax": func(source string) template.HTML {
formatted, err := syntaxBash(source)
if err != nil {
logrus.Errorf("Failed to format source file: %v", err)
return template.HTML(source)
}
return template.HTML(formatted)
},
"dockerfileSyntax": func(source string) template.HTML {
formatted, err := syntaxDockerfile(source)
if err != nil {
logrus.Errorf("Failed to format source file: %v", err)
return template.HTML(source)
}
return template.HTML(formatted)
},
"plaintextSyntax": func(source string) template.HTML {
formatted, err := syntaxPlaintext(source)
if err != nil {
logrus.Errorf("Failed to format source file: %v", err)
return template.HTML(source)
}
return template.HTML(formatted)
},
},
)
var helpTemplate *template.Template
var err error
data := make(map[string]string)
switch subPath {
case "":
helpTemplate, err = helpFuncs.Parse(gettingStartedPage)
data["refExample"] = refExample
data["refFromImageExample"] = refFromImageExample
data["credentialExample"] = credentialExample
data["chainExample"] = chainExample
data["workflowExample"] = workflowExample
data["configExample1"] = configExample1
data["configExample2"] = configExample2
data["configExample3"] = configExample3
data["configExample4"] = configExample4
data["paramsExample"] = paramsExample
data["paramsPropagation"] = paramsPropagation
data["paramsRequired"] = paramsRequired
data["paramsRequiredTest"] = paramsRequiredTest
case "/adding-components":
helpTemplate, err = helpFuncs.Parse(addingComponentPage)
case "/release":
data["updateconfigExample"] = updateconfigExample
data["determinizeCheckExample"] = determinizeCheckExample
data["makeUpdateExample"] = makeUpdateExample
data["ciopConfigDirExample"] = ciopConfigDirExample
data["makeNewRepoExample"] = makeNewRepoExample
helpTemplate, err = helpFuncs.Parse(releasePage)
case "/private-repositories":
data["privateRepoProwgenConfigExample"] = privateRepoProwgenConfigExample
helpTemplate, err = helpFuncs.Parse(privateRepositoriesPage)
case "/examples":
helpTemplate, err = helpFuncs.Parse(examplesPage)
data["awsExample"] = awsExample
data["imageExampleRef"] = imageExampleRef
data["imageExampleCommands"] = imageExampleCommands
data["imageExampleConfig"] = imageExampleConfig
data["imageExampleLiteral"] = imageExampleLiteral
data["imagePromotionConfig"] = imagePromotionConfig
data["imageConsumptionConfig"] = imageConsumptionConfig
case "/ci-operator":
helpTemplate, err = helpFuncs.Parse(ciOperatorOverviewPage)
data["ciOperatorInputConfig"] = ciOperatorInputConfig
data["ciOperatorPipelineConfig"] = ciOperatorPipelineConfig
data["multistageDockerfile"] = multistageDockerfile
data["ciOperatorImageConfig"] = ciOperatorImageConfig
data["ciOperatorPromotionConfig"] = ciOperatorPromotionConfig
data["ciOperatorTagSpecificationConfig"] = ciOperatorTagSpecificationConfig
data["ciOperatorReleaseConfig"] = ciOperatorReleaseConfig
data["ciOperatorContainerTestConfig"] = ciOperatorContainerTestConfig
data["ciOperatorPostsubmitTestConfig"] = ciOperatorPostsubmitTestConfig
data["ciOperatorPeriodicTestConfig"] = ciOperatorPeriodicTestConfig
data["ciOperatorProjectImageBuildroot"] = ciOperatorProjectImageBuildroot
data["ciOperatorBuildRootFromRepo"] = ciOperatorBuildRootFromRepo
data["ciOperatorBuildRootInRepo"] = ciOperatorBuildRootInRepo
data["ciOperatorContainerTestWithDependenciesConfig"] = ciOperatorContainerTestWithDependenciesConfig
data["depsPropagation"] = depsPropagation
case "/leases":
helpTemplate, err = helpFuncs.Parse(quotasAndLeasesPage)
data["dynamicBoskosConfig"] = dynamicBoskosConfig
data["staticBoskosConfig"] = staticBoskosConfig
case "/links":
helpTemplate, err = helpFuncs.Parse(linksPage)
case "/operators":
helpTemplate, err = helpFuncs.Parse(optionalOperatorOverviewPage)
data["optionalOperatorBundleConfig"] = optionalOperatorBundleConfig
data["optionalOperatorTestConfig"] = optionalOperatorTestConfig
data["optionalOperatorIndexConsumerStep"] = optionalOperatorIndexConsumerStep
default:
writeErrorPage(w, errors.New("Invalid path"), http.StatusNotImplemented)
return
}
if err != nil {
writeErrorPage(w, err, http.StatusInternalServerError)
return
}
writePage(w, "Step Registry Help Page", helpTemplate, data)
}
func mainPageHandler(agent agents.RegistryAgent, templateString string, w http.ResponseWriter, _ *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
refs, chains, wfs, docs, _ := agent.GetRegistryComponents()
page := getBaseTemplate(wfs, chains, docs)
page, err := page.Parse(templateString)
if err != nil {
writeErrorPage(w, err, http.StatusInternalServerError)
return
}
comps := struct {
References registry.ReferenceByName
Chains registry.ChainByName
Workflows registry.WorkflowByName
}{
References: refs,
Chains: chains,
Workflows: wfs,
}
writePage(w, "Step Registry Help Page", page, comps)
}
func WebRegHandler(regAgent agents.RegistryAgent, confAgent agents.ConfigAgent) http.HandlerFunc {
return func(w http.ResponseWriter, req *http.Request) {
trimmedPath := strings.TrimPrefix(req.URL.Path, req.URL.Host)
// remove leading slash
trimmedPath = strings.TrimPrefix(trimmedPath, "/")
// remove trailing slash
trimmedPath = strings.TrimSuffix(trimmedPath, "/")
splitURI := strings.Split(trimmedPath, "/")
if len(splitURI) >= 1 && splitURI[0] == "help" {
helpHandler(strings.TrimPrefix(trimmedPath, "help"), w, req)
return
} else if len(splitURI) == 1 {
switch splitURI[0] {
case "":
mainPageHandler(regAgent, mainPage, w, req)
case "search":
searchHandler(confAgent, w, req)
case "job":
jobHandler(regAgent, confAgent, w, req)
default:
writeErrorPage(w, errors.New("Invalid path"), http.StatusNotImplemented)
}
return
} else if len(splitURI) == 2 {
switch splitURI[0] {
case "reference":
referenceHandler(regAgent, w, req)
return
case "chain":
chainHandler(regAgent, w, req)
return
case "workflow":
workflowHandler(regAgent, w, req)
return
default:
writeErrorPage(w, fmt.Errorf("Component type %s not found", splitURI[0]), http.StatusNotFound)
return
}
}
writeErrorPage(w, errors.New("Invalid path"), http.StatusNotImplemented)
}
}
func syntax(source string, lexer chroma.Lexer) (string, error) {
var output bytes.Buffer
style := styles.Get("dracula")
// highlighted lines based on linking currently require WithClasses to be used
formatter := html.New(html.Standalone(false), html.LinkableLineNumbers(true, "line"), html.WithLineNumbers(true), html.WithClasses(true))
iterator, err := lexer.Tokenise(nil, source)
if err != nil {
return "", fmt.Errorf("failed to tokenise source: %w", err)
}
output.WriteString("<style>")
if err := formatter.WriteCSS(&output, style); err != nil {
return "", fmt.Errorf("failed to write css: %w", err)
}
output.WriteString("</style>")
err = formatter.Format(&output, style, iterator)
return output.String(), err
}
func syntaxPlaintext(source string) (string, error) {
return syntax(source, lexers.Get("plaintext"))
}
func syntaxYAML(source string) (string, error) {
return syntax(source, lexers.Get("yaml"))
}
func syntaxDockerfile(source string) (string, error) {
return syntax(source, lexers.Get("Dockerfile"))
}
func syntaxBash(source string) (string, error) {
return syntax(source, lexers.Get("bash"))
}
func referenceHandler(agent agents.RegistryAgent, w http.ResponseWriter, req *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
name := path.Base(req.URL.Path)
page, err := template.New("referencePage").Funcs(
template.FuncMap{
"syntaxedSource": func(source string) template.HTML {
formatted, err := syntaxBash(source)
if err != nil {
logrus.Errorf("Failed to format source file: %v", err)
return template.HTML(source)
}
return template.HTML(formatted)
},
"githubLink": githubLink,
"ownersBlock": ownersBlock,
},
).Parse(referencePage)
if err != nil {
writeErrorPage(w, fmt.Errorf("Failed to render page: %w", err), http.StatusInternalServerError)
return
}
refs, _, _, docs, metadata := agent.GetRegistryComponents()
if _, ok := refs[name]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find reference `%s`. If you reached this page via a link provided in the logs of a failed test, the failed step may be a literal defined step, which does not exist in the step registry. Please look at the job info page for the failed test instead.", name), http.StatusNotFound)
return
}
refMetadataName := fmt.Sprint(name, load.RefSuffix)
if _, ok := metadata[refMetadataName]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find metadata for file `%s`. Please contact the Developer Productivity Test Platform.", refMetadataName), http.StatusInternalServerError)
return
}
ref := struct {
Reference api.RegistryReference
Metadata api.RegistryInfo
}{
Reference: api.RegistryReference{
LiteralTestStep: api.LiteralTestStep{
As: name,
Commands: refs[name].Commands,
From: refs[name].From,
},
Documentation: docs[name],
},
Metadata: metadata[refMetadataName],
}
writePage(w, "Registry Step Help Page", page, ref)
}
func chainHandler(agent agents.RegistryAgent, w http.ResponseWriter, req *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
name := path.Base(req.URL.Path)
_, chains, _, docs, metadata := agent.GetRegistryComponents()
page := getBaseTemplate(nil, chains, docs)
page, err := page.Parse(chainPage)
if err != nil {
writeErrorPage(w, fmt.Errorf("Failed to render page: %w", err), http.StatusInternalServerError)
return
}
if _, ok := chains[name]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find chain %s", name), http.StatusNotFound)
return
}
chainMetadataName := fmt.Sprint(name, load.ChainSuffix)
if _, ok := metadata[chainMetadataName]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find metadata for file `%s`. Please contact the Developer Productivity Test Platform.", chainMetadataName), http.StatusInternalServerError)
return
}
chain := struct {
Chain api.RegistryChain
Metadata api.RegistryInfo
}{
Chain: api.RegistryChain{
As: name,
Documentation: docs[name],
Steps: chains[name].Steps,
},
Metadata: metadata[chainMetadataName],
}
writePage(w, "Registry Chain Help Page", page, chain)
}
func workflowHandler(agent agents.RegistryAgent, w http.ResponseWriter, req *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
name := path.Base(req.URL.Path)
_, chains, workflows, docs, metadata := agent.GetRegistryComponents()
page := getBaseTemplate(workflows, chains, docs)
page, err := page.Parse(workflowJobPage)
if err != nil {
writeErrorPage(w, fmt.Errorf("Failed to render page: %w", err), http.StatusInternalServerError)
return
}
if _, ok := workflows[name]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find workflow %s", name), http.StatusNotFound)
return
}
workflowMetadataName := fmt.Sprint(name, load.WorkflowSuffix)
if _, ok := metadata[workflowMetadataName]; !ok {
writeErrorPage(w, fmt.Errorf("Could not find metadata for file `%s`. Please contact the Developer Productivity Test Platform.", workflowMetadataName), http.StatusInternalServerError)
return
}
workflow := struct {
Workflow workflowJob
Metadata api.RegistryInfo
}{
Workflow: workflowJob{
RegistryWorkflow: api.RegistryWorkflow{
As: name,
Documentation: docs[name],
Steps: workflows[name],
},
Type: workflowType},
Metadata: metadata[workflowMetadataName],
}
writePage(w, "Registry Workflow Help Page", page, workflow)
}
func findConfigForJob(testName string, config api.ReleaseBuildConfiguration) (api.MultiStageTestConfiguration, error) {
for _, test := range config.Tests {
if test.As == testName {
if test.MultiStageTestConfiguration != nil {
return *test.MultiStageTestConfiguration, nil
}
return api.MultiStageTestConfiguration{}, fmt.Errorf("Provided job %s is not a multi stage type test", testName)
}
}
return api.MultiStageTestConfiguration{}, fmt.Errorf("Could not find job %s. Job either does not exist or is not a multi stage test", testName)
}
func MetadataFromQuery(w http.ResponseWriter, r *http.Request) (api.Metadata, error) {
if r.Method != "GET" {
w.WriteHeader(http.StatusNotImplemented)
err := fmt.Errorf("expected GET, got %s", r.Method)
if _, errWrite := w.Write([]byte(http.StatusText(http.StatusNotImplemented))); errWrite != nil {
return api.Metadata{}, fmt.Errorf("%s and writing the response body failed with %w", err.Error(), errWrite)
}
return api.Metadata{}, err
}
org := r.URL.Query().Get(OrgQuery)
if org == "" {
missingQuery(w, OrgQuery)
return api.Metadata{}, fmt.Errorf("missing query %s", OrgQuery)
}
repo := r.URL.Query().Get(RepoQuery)
if repo == "" {
missingQuery(w, RepoQuery)
return api.Metadata{}, fmt.Errorf("missing query %s", RepoQuery)
}
branch := r.URL.Query().Get(BranchQuery)
if branch == "" {
missingQuery(w, BranchQuery)
return api.Metadata{}, fmt.Errorf("missing query %s", BranchQuery)
}
variant := r.URL.Query().Get(VariantQuery)
return api.Metadata{
Org: org,
Repo: repo,
Branch: branch,
Variant: variant,
}, nil
}
func missingQuery(w http.ResponseWriter, field string) {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s query missing or incorrect", field)
}
func jobHandler(regAgent agents.RegistryAgent, confAgent agents.ConfigAgent, w http.ResponseWriter, r *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
metadata, err := MetadataFromQuery(w, r)
if err != nil {
return
}
test := r.URL.Query().Get(TestQuery)
if test == "" {
missingQuery(w, TestQuery)
return
}
configs, err := confAgent.GetMatchingConfig(metadata)
if err != nil {
writeErrorPage(w, err, http.StatusNotFound)
return
}
config, err := findConfigForJob(test, configs)
if err != nil {
writeErrorPage(w, err, http.StatusNotFound)
return
}
// TODO(apavel): support jobs other than presubmits
name := metadata.JobName("pull", test)
_, chains, workflows, docs, _ := regAgent.GetRegistryComponents()
jobWorkflow, docs := jobToWorkflow(name, config, workflows, docs)
updatedWorkflows := make(registry.WorkflowByName)
for k, v := range workflows {
updatedWorkflows[k] = v
}
updatedWorkflows[name] = jobWorkflow.Steps
page := getBaseTemplate(updatedWorkflows, chains, docs)
page, err = page.Parse(workflowJobPage)
if err != nil {
writeErrorPage(w, fmt.Errorf("Failed to render page: %w", err), http.StatusInternalServerError)
return
}
workflow := struct {
Workflow workflowJob
Metadata api.RegistryInfo
}{
Workflow: jobWorkflow,
Metadata: api.RegistryInfo{},
}
writePage(w, "Job Test Workflow Help Page", page, workflow)
}
// addJob adds a test to the specified org, repo, and branch in the Jobs struct in alphabetical order
func (j *Jobs) addJob(orgName, repoName, branchName, variantName, testName string) {
orgIndex := 0
orgExists := false
for _, currOrg := range j.Orgs {
if diff := strings.Compare(currOrg.Name, orgName); diff == 0 {
orgExists = true
break
} else if diff > 0 {
break
}
orgIndex++
}
if !orgExists {
newOrg := Org{Name: orgName}
j.Orgs = append(j.Orgs[:orgIndex], append([]Org{newOrg}, j.Orgs[orgIndex:]...)...)
}
repoIndex := 0
repoExists := false
for _, currRepo := range j.Orgs[orgIndex].Repos {
if diff := strings.Compare(currRepo.Name, repoName); diff == 0 {
repoExists = true
break
} else if diff > 0 {
break
}
repoIndex++
}
if !repoExists {
newRepo := Repo{Name: repoName}
repos := j.Orgs[orgIndex].Repos
j.Orgs[orgIndex].Repos = append(repos[:repoIndex], append([]Repo{newRepo}, repos[repoIndex:]...)...)
}
branchIndex := 0
branchExists := false
for _, currBranch := range j.Orgs[orgIndex].Repos[repoIndex].Branches {
if diff := strings.Compare(currBranch.Name, branchName); diff == 0 {
branchExists = true
break
} else if diff > 0 {
break
}
branchIndex++
}
if !branchExists {
newBranch := Branch{Name: branchName}
branches := j.Orgs[orgIndex].Repos[repoIndex].Branches
j.Orgs[orgIndex].Repos[repoIndex].Branches = append(branches[:branchIndex], append([]Branch{newBranch}, branches[branchIndex:]...)...)
}
variantIndex := -1
if variantName != "" {
j.ContainsVariant = true
variantIndex = 0
variantExists := false
for _, currVariant := range j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Variants {
if diff := strings.Compare(currVariant.Name, variantName); diff == 0 {
variantExists = true
break
} else if diff > 0 {
break
}
variantIndex++
}
if !variantExists {
newVariant := Variant{Name: variantName}
variants := j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Variants
j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Variants = append(variants[:variantIndex], append([]Variant{newVariant}, variants[variantIndex:]...)...)
}
}
// a single test shouldn't be added multiple times, but that case should be handled correctly just in case
testIndex := 0
testExists := false
var testsArr []string
if variantIndex == -1 {
testsArr = j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Tests
} else {
testsArr = j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Variants[variantIndex].Tests
}
for _, currTestName := range testsArr {
if diff := strings.Compare(currTestName, testName); diff == 0 {
testExists = true
break
} else if diff > 0 {
break
}
testIndex++
}
if !testExists {
if variantIndex == -1 {
j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Tests = append(testsArr[:testIndex], append([]string{testName}, testsArr[testIndex:]...)...)
} else {
j.Orgs[orgIndex].Repos[repoIndex].Branches[branchIndex].Variants[variantIndex].Tests = append(testsArr[:testIndex], append([]string{testName}, testsArr[testIndex:]...)...)
}
}
}
// getAllMultiStageTests return a map that has the config name in org-repo-branch format as the key and the test names for multi stage jobs as the value
func getAllMultiStageTests(confAgent agents.ConfigAgent) *Jobs {
jobs := &Jobs{}
configs := confAgent.GetAll()
for org, orgConfigs := range configs {
for repo, repoConfigs := range orgConfigs {
for _, releaseConfig := range repoConfigs {
for _, test := range releaseConfig.Tests {
if test.MultiStageTestConfiguration != nil {
jobs.addJob(org, repo, releaseConfig.Metadata.Branch, releaseConfig.Metadata.Variant, test.As)
}
}
}
}
}
return jobs
}
func searchHandler(confAgent agents.ConfigAgent, w http.ResponseWriter, req *http.Request) {
start := time.Now()
defer func() { logrus.Infof("rendered in %s", time.Since(start)) }()
w.Header().Set("Content-Type", "text/html;charset=UTF-8")
searchTerm := req.URL.Query().Get("job")
matches := getAllMultiStageTests(confAgent)
if searchTerm != "" {
matches = searchJobs(matches, searchTerm)
}
page := getBaseTemplate(nil, nil, nil)
page, err := page.Parse(jobSearchPage)
if err != nil {
writeErrorPage(w, fmt.Errorf("Failed to render page: %w", err), http.StatusInternalServerError)
return
}
writePage(w, "Job Search Page", page, matches)
}
func searchJobs(jobs *Jobs, search string) *Jobs {
search = strings.TrimPrefix(search, "pull-ci-")
search = strings.TrimPrefix(search, "branch-ci-")
matches := &Jobs{}
for _, org := range jobs.Orgs {
for _, repo := range org.Repos {
for _, branch := range repo.Branches {
for _, test := range branch.Tests {
fullJobName := fmt.Sprintf("%s-%s-%s-%s", org.Name, repo.Name, branch.Name, test)
if strings.Contains(fullJobName, search) {
matches.addJob(org.Name, repo.Name, branch.Name, "", test)
}
}
for _, variant := range branch.Variants {
for _, test := range variant.Tests {
fullJobName := fmt.Sprintf("%s-%s-%s-%s-%s", org.Name, repo.Name, branch.Name, variant.Name, test)
if strings.Contains(fullJobName, search) {
matches.addJob(org.Name, repo.Name, branch.Name, variant.Name, test)
}
}
}
}
}
}
return matches
}
|
package solcast
import (
"os"
)
const BaseUrl = "https://api.solcast.com.au"
const Solcast_API_KeyName = "SOLCAST_API_KEY"
type Config struct {
Url string
APIKey string
}
func Read() Config {
return Config{
Url: BaseUrl,
APIKey: os.Getenv(Solcast_API_KeyName),
}
}
|
package main
import "fmt"
func main() {
const ft float64 = 0.3048 // это миллиметры
var m = 66.12 // это футы
{
fmt.Printf("%.3f", m*ft) // это вычисление кол-ва миллиметров в указанных футах
fmt.Println(" m")
//fmt.Println(" m") - для удобства)
}
}
|
package database
import (
"ewallet/models"
"github.com/jinzhu/gorm"
)
func Migrate(con *gorm.DB) {
con.DropTableIfExists(models.UserBalanceHistory{}, models.UserBalance{}, models.User{}, models.BankBalanceHistory{}, models.BankBalance{})
con.AutoMigrate(models.User{}, models.UserBalance{}, models.UserBalanceHistory{}, models.BankBalance{}, models.BankBalanceHistory{})
//add foreign key
con.Model(&models.UserBalance{}).AddForeignKey("user_id", "users(id)", "CASCADE", "RESTRICT")
con.Model(&models.UserBalanceHistory{}).AddForeignKey("user_balance_id", "user_balances(id)", "CASCADE", "RESTRICT")
con.Model(&models.BankBalanceHistory{}).AddForeignKey("bank_balance_id", "bank_balances(id)", "CASCADE", "RESTRICT" )
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.