text stringlengths 11 4.05M |
|---|
package stmt
import (
"go/ast"
"fmt"
"github.com/sky0621/go-testcode-autogen/inspect/result"
)
type GoStmtInspector struct{}
func (i *GoStmtInspector) IsTarget(node ast.Node) bool {
switch node.(type) {
case *ast.GoStmt:
return true
}
return false
}
func (i *GoStmtInspector) Inspect(node ast.Node, aggregater *result.Aggregater) error {
gs, ok := node.(*ast.GoStmt)
if !ok {
return fmt.Errorf("Not target Node: %#v", node)
}
// FIXME
fmt.Println("===== GoStmtInspector ===================================================================================")
fmt.Printf("GoStmt: %#v\n", gs)
return nil
}
|
package main
import "fmt"
func main() {
state := 0
wait := make(chan struct{})
go func() {
total := 0
for i := 0; i < 12; i++ {
total = 2*i + total
}
state = 1
close(wait)
}()
if state == 1 {
fmt.Println(state)
}
<-wait
}
|
/*
Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudify
import (
rest "github.com/cloudify-incubator/cloudify-rest-go-client/cloudify/rest"
)
// Version - information about manager version
type Version struct {
rest.BaseMessage
Date string `json:"date"`
Edition string `json:"edition"`
Version string `json:"version"`
Build string `json:"build"`
Commit string `json:"commit"`
}
// InstanceStatus - system service status
type InstanceStatus struct {
LoadState string `json:"LoadState"`
Description string `json:"Description"`
State string `json:"state"`
MainPID uint `json:"MainPID"`
ID string `json:"Id"`
ActiveState string `json:"ActiveState"`
SubState string `json:"SubState"`
}
// InstanceService - information about system service started on manager
type InstanceService struct {
Instances []InstanceStatus `json:"instances"`
DisplayName string `json:"display_name"`
}
// Status - current status for service
func (s InstanceService) Status() string {
state := "unknown"
for _, instance := range s.Instances {
if state != "failed" {
state = instance.State
}
}
return state
}
// Status - response from server about current status
type Status struct {
rest.StrStatusMessage
Services []InstanceService `json:"services"`
}
// GetVersion - manager version
func (cl *Client) GetVersion() (*Version, error) {
var ver Version
err := cl.Get("version", &ver)
if err != nil {
return nil, err
}
return &ver, nil
}
// GetStatus - manager status
func (cl *Client) GetStatus() (*Status, error) {
var stat Status
err := cl.Get("status", &stat)
if err != nil {
return nil, err
}
return &stat, nil
}
|
package slacknotifier
import "github.com/odpf/siren/domain"
type SlackMessage struct {
ReceiverName string `json:"receiver_name"`
ReceiverType string `json:"receiver_type"`
Entity string `json:"entity"`
Message string `json:"message"`
}
func (message *SlackMessage) fromDomain(m *domain.SlackMessage) *SlackMessage {
message.ReceiverType = m.ReceiverType
message.ReceiverName = m.ReceiverName
message.Entity = m.Entity
message.Message = m.Message
return message
}
type SlackNotifier interface {
Notify(*SlackMessage, string) error
}
|
package main
import (
"flag"
"fmt"
"github.com/bouncepaw/mycomarkup/v2"
"github.com/bouncepaw/mycomarkup/v2/mycocontext"
"io/ioutil"
"github.com/bouncepaw/mycomarkup/v2/globals"
)
func main() {
hyphaName, filename := parseFlags()
contents, err := ioutil.ReadFile(filename)
if err != nil {
_ = fmt.Errorf("%s\n", err)
}
// TODO: provide a similar function but for []byte and use it here.
ctx, _ := mycocontext.ContextFromStringInput(hyphaName, string(contents))
ast := mycomarkup.BlockTree(ctx)
fmt.Println(mycomarkup.BlocksToHTML(ctx, ast))
}
func parseFlags() (hyphaName, filename string) {
globals.CalledInShell = true
flag.StringVar(&hyphaName, "hypha-name", "", "Set hypha name. Relative links depend on it.")
flag.StringVar(&filename, "filename", "/dev/stdin", "File with mycomarkup.")
flag.Parse()
return
}
|
package io
import (
"os"
"strconv"
"testing"
)
func cleanup() {
// Cleaning up TestRmdirEmptyAll
_ = os.Remove("testdata/file")
_ = os.RemoveAll("testdata/a")
_ = os.RemoveAll("testdata/dirempty")
}
func TestMain(m *testing.M) {
cleanup()
sdebug := os.Getenv("DEBUG")
if len(sdebug) > 0 {
_debug, _ = strconv.Atoi(sdebug)
}
s := m.Run()
cleanup()
os.Exit(s)
}
|
package main
import (
"encoding/json"
"fmt"
msamodule "github.com/MySocialApp/mysocialapp-event-handler/modules"
"gopkg.in/yaml.v2"
"log"
"net/http"
"reflect"
)
type Config struct {
Http struct {
Bind string `yaml:"bind"`
} `yaml:"http-bind"`
Language struct {
Default string `yaml:"default"`
} `yaml:"language"`
EventHandler []Handler `yaml:"handlers"`
Modules struct {
Email *msamodule.EmailModule `yaml:"email"`
} `yaml:"modules"`
}
type Handler struct {
EventType string `yaml:"type"`
Path string `yaml:"path"`
Method string `yaml:"method"`
Actions []interface{} `yaml:"actions"`
}
func (h *Handler) GetMethod() string {
if h.Method != "" {
return h.Method
}
return http.MethodPost
}
type Action interface {
Init()
Do(event interface{}, config *Config) error
}
func GetAction(event interface{}) Action {
v := reflect.ValueOf(event)
if !v.IsValid() {
log.Printf("error reflecting value for %v", event)
return nil
}
typeValue := v.MapIndex(reflect.ValueOf("type"))
if !typeValue.IsValid() || typeValue.IsNil() {
log.Printf("type value invalid for %v", event)
return nil
}
switch fmt.Sprintf("%s", typeValue.Interface()) {
case "email":
var a ActionEmail
ConvertUsingYaml(event, &a)
return &a
break
default:
log.Printf("action type not found (%s) for %v", typeValue.Interface(), event)
}
return nil
}
// TODO : must have a way to improve this
func ConvertUsingYaml(event interface{}, action interface{}) {
out, _ := yaml.Marshal(event)
yaml.Unmarshal(out, action)
}
func ConvertUsingJson(event interface{}, action interface{}) {
out, _ := json.Marshal(event)
json.Unmarshal(out, action)
}
|
package main
import (
"fmt"
"math"
)
func main(){
var c float32 = math.Pi //将常量保存为float32类型
fmt.Println(c)
fmt.Println(int(c)) //转换为int类型,浮点发生精度丢失
fmt.Println(math.Pi)
//注:布尔型值不能强制转换
}
|
package types
// Task is a task
type Task struct {
ID int
Title string
Done bool
}
|
package main
import (
"fmt"
)
func main() {
done := make(chan struct{})
fmt.Printf("%s\n", "Starting program")
go NewFileWatcher()
<-done
}
|
package leetcode
import "testing"
func TestSearch(t *testing.T) {
if search([]int{-1, 0, 3, 5, 9, 12}, 9) != 4 {
t.Fatal()
}
if search([]int{-1, 0, 3, 5, 9, 12}, 2) != -1 {
t.Fatal()
}
}
|
package repl
import "fmt"
const T string = "\r"
type history struct {
store []string
pos int
}
func(h *history) goUp() {
if h.pos+1 >= len(h.store) {
return
}
h.pos++
fmt.Printf("%v%v", T, h.store[h.pos])
}
func(h *history) save(in *input) {
h.store = append(h.store, in.in)
}
func newHistory() *history {
h := history{pos: 0}
return &h
} |
package main
import (
"fmt"
"io/ioutil"
"os"
"strconv"
"sync"
"time"
)
const (
gb = 1024 * 1024 * 1024
)
func genFileData(fileSize int) []byte {
file := make([]byte, fileSize)
for i := 0; i < fileSize; i++ {
file[i] = byte(i % 256)
}
return file
}
// FileData describes a file data
type FileData struct {
Data []byte
Path string
}
// MbsPerSec returns number of megabytes per second given number of bytes and
// duration
func MbsPerSec(nBytes int64, dur time.Duration) float64 {
mbs := float64(nBytes) / (1024 * 1024)
return (mbs * float64(dur)) / float64(time.Second)
}
func test(pathBase string, d []byte, parts int, workers int) {
var fileDatas []FileData
partSize := len(d) / parts
for i := 0; i < parts; i++ {
start := i * partSize
var data []byte
if i == parts-1 {
data = d[start:]
} else {
end := start + partSize
data = d[start:end]
}
fd := FileData{
Data: data,
Path: pathBase + "." + strconv.Itoa(i),
}
fileDatas = append(fileDatas, fd)
}
timeStart := time.Now()
var wg sync.WaitGroup
c := make(chan FileData)
for i := 0; i < workers; i++ {
wg.Add(1)
go func() {
for fd := range c {
err := ioutil.WriteFile(fd.Path, fd.Data, 0644)
if err != nil {
panic(err.Error())
}
}
wg.Done()
}()
}
for _, fd := range fileDatas {
c <- fd
}
close(c)
wg.Wait()
dur := time.Since(timeStart)
nBytes := int64(len(d))
mbsPerSec := MbsPerSec(nBytes, dur)
fmt.Printf("Files: %4d, workers: %2d, time: %s, %.2f MBs/sec\n", parts, workers, dur, mbsPerSec)
for _, fd := range fileDatas {
os.Remove(fd.Path)
}
}
func main() {
d := genFileData(1 * gb)
test("foo.txt.1", d, 1, 1)
test("foo.txt.2", d, 1024, 1)
test("foo.txt.3", d, 1024, 16)
test("foo.txt.4", d, 1024, 64)
}
|
package main
import (
"github.com/hashicorp/terraform/plugin"
"github.com/kradalby/terraform-provider-opnsense/opnsense"
)
func main() {
plugin.Serve(&plugin.ServeOpts{
ProviderFunc: opnsense.Provider})
}
|
package kv_query_util
import (
"bufio"
"io"
"log"
"os"
"sync"
"github.com/transactional-cloud-serving-benchmark/tcsb/serialization_util"
)
type IPCDriver struct {
stdin io.WriteCloser
stdout io.ReadCloser
stderr *bufio.Reader
}
func NewIPCDriver(stdin io.WriteCloser, stdout, stderr io.ReadCloser) IPCDriver {
return IPCDriver{
stdin: stdin,
stdout: stdout,
stderr: bufio.NewReader(stderr),
}
}
func RunIPCDriver(ipcDriver IPCDriver, nValidation uint64, validationFilename string, nBurnIn uint64) {
stdin := bufio.NewReader(os.Stdin)
//stderr := bufio.NewWriter(os.Stderr)
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
_, err := io.Copy(ipcDriver.stdin, stdin)
ipcDriver.stdin.Close()
if err == io.ErrClosedPipe {
// not a problem
} else if err != nil {
log.Fatalf("0 ", err)
}
wg.Done()
}()
wg.Add(1)
go func() {
for {
data, err := ipcDriver.stderr.ReadBytes('\n')
log.Printf("client log: %s", data)
if err == io.ErrClosedPipe || err == io.EOF {
break
// not a problem
} else if err != nil {
log.Fatalf("2 ", err)
}
}
wg.Done()
}()
collector := NewReplyCollector(nBurnIn, nValidation, validationFilename)
buf := make([]byte, 0, 4096)
for {
reply, err := serialization_util.DecodeNextReply(ipcDriver.stdout, &buf)
if err == io.EOF {
break
} else if err != nil {
log.Fatalf("bad decoding: ", err)
break
}
collector.Update(reply)
}
wg.Wait()
collector.Finish()
}
|
package main
import (
"log"
"strconv"
"time"
)
// ClaimLoop recusively keeps claiming rewards
func ClaimLoop(game Game, api *GameAPI) {
resetTime, err := time.Parse(time.RFC3339, game.ClaimReset)
if err != nil {
handleClaimError(api, err)
return
}
duration := calcClaimWaitDuration(resetTime)
if duration > 0 {
log.Println("Making next claim in " + duration.String() + "...")
time.Sleep(duration)
}
game, err = api.ClaimReward()
if err != nil {
handleClaimError(api, err)
return
}
log.Println("Claimed reward, new score: " + strconv.Itoa(game.Score) + ".")
ClaimLoop(game, api)
}
func calcClaimWaitDuration(resetTime time.Time) time.Duration {
timeToWait := resetTime.Sub(time.Now())
return timeToWait + 5*time.Second + randomSeconds(3)
}
func handleClaimError(api *GameAPI, err error) {
log.Println("Error in claim loop occurred, restarting in 15 seconds: " + err.Error())
time.Sleep(15 * time.Second)
game, err := api.GetGameInfo()
if err != nil {
handleClaimError(api, err)
}
ClaimLoop(game, api)
}
|
package main
import (
"encoding/json"
"fmt"
)
type Message struct {
Name string `json:"name" valid:"required"`
Body string `json:"body" valid:"required"`
Time int64 `json:"-" valid:"required"`
}
func main() {
m := Message{"Alice", "Hello", 1294706395881547000}
out, _ := json.Marshal(m)
fmt.Println(string(out))
}
|
package msgHandler
import (
"bytes"
"encoding/json"
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/appMgr"
appComm "github.com/HNB-ECO/HNB-Blockchain/HNB/appMgr/common"
cmn "github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/algorand/types"
"github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/consensusManager/comm/consensusType"
"github.com/HNB-ECO/HNB-Blockchain/HNB/ledger"
"github.com/HNB-ECO/HNB-Blockchain/HNB/msp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork"
"github.com/HNB-ECO/HNB-Blockchain/HNB/p2pNetwork/message/reqMsg"
"github.com/HNB-ECO/HNB-Blockchain/HNB/txpool"
"github.com/json-iterator/go"
"time"
)
const (
FlagZero = 0 //prepose
FlagOne = 1 // provote
FlagTwo = 2 //precommit
FlagThree = 3 // blk
)
func (h *TDMMsgHandler) checkContinue() bool {
if h.isSyncStatus.Get() == true {
ConsLog.Infof(LOGTABLE_CONS, "peer is syncing")
return false
}
if !h.IsContinueConsensus {
ConsLog.Errorf(LOGTABLE_CONS, "%s can not ContinueConsensus", appComm.HNB)
return false
}
return true
}
func (h *TDMMsgHandler) DeliverMsg() {
h.allRoutineExitWg.Add(1)
var peerMsg *cmn.PeerMessage
var err error
for {
select {
case height := <-h.TxsAvailable:
if !h.checkContinue() {
continue
}
h.handleTxsAvailable(height)
case peerMsg = <-h.PeerMsgQueue:
if !h.checkContinue() {
continue
}
if peerMsg == nil {
ConsLog.Warningf(LOGTABLE_CONS, "(msgDeliver) peerMsg is nil")
continue
}
var consensusMsg = &consensusType.ConsensusMsg{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(peerMsg.Msg, &consensusMsg)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(dbftMgr) unmarshal conMsg err %v", err)
continue
}
tdmMsg := &cmn.TDMMessage{}
if err := json.Unmarshal(consensusMsg.Payload, tdmMsg); err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) unmarshal tdmMsg err %v", err)
continue
}
err := h.Verify(tdmMsg, peerMsg.Sender)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) Verify tdmMsg err %v", err)
continue
}
ConsLog.Infof(LOGTABLE_CONS, "#(%v-%v) (msgDeliver) recv peerMsg <- %s type %v", h.Height, h.Round, msp.PeerIDToString(peerMsg.Sender), tdmMsg.Type)
switch tdmMsg.Type {
case cmn.TDMType_MsgCheckFork:
err = h.HandleCheckFork(tdmMsg, peerMsg.PeerID)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "HandleCheckFork err %v", err.Error())
}
default:
if !h.isPeerInbgGroup(peerMsg.Sender) || !h.inbgGroup() {
ConsLog.Warningf(LOGTABLE_CONS, "(msgDeliver) recv peerMsg <- %s not in bftGroup type %v ", msp.PeerIDToString(peerMsg.Sender), tdmMsg.Type)
continue
}
err = h.processOuterMsg(tdmMsg, peerMsg.Sender, peerMsg.PeerID)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) process peerMsg<-%s err %v", peerMsg.Sender, err)
}
}
case internalMsg := <-h.InternalMsgQueue:
if !h.checkContinue() {
continue
}
if internalMsg == nil {
ConsLog.Warningf(LOGTABLE_CONS, "(msgDeliver) internalMsg is nil")
continue
}
h.BroadcastMsgToAll(internalMsg)
var consensusMsg = &consensusType.ConsensusMsg{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err := json.Unmarshal(internalMsg.Msg, &consensusMsg)
if err != nil {
ConsLog.Warningf(LOGTABLE_CONS, "(dbftMgr) unmarshal conMsg err %v", err)
continue
}
tdmMsg := &cmn.TDMMessage{}
if err := json.Unmarshal(consensusMsg.Payload, tdmMsg); err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) unmarshal tdmMsg err %v", err)
continue
}
ConsLog.Infof(LOGTABLE_CONS, "(msgDeliver) recv internalMsg type %s", tdmMsg.Type)
err = h.processInnerMsg(tdmMsg, h.ID)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) process internalMsg err %v", err)
}
case syncData := <-h.recvSyncChan:
if !h.IsContinueConsensus {
ConsLog.Errorf(LOGTABLE_CONS, "%s can not ContinueConsensus", appComm.HNB)
continue
}
if syncData.Version != h.syncHandler.Version {
ConsLog.Errorf(LOGTABLE_CONS, "%s sync version not match %d != %d", CHAINID, syncData.Version, h.syncHandler.Version)
continue
}
syncBlkCntPoint := h.cacheSyncBlkCount[CHAINID]
syncBlktargetV := syncBlkCntPoint.SyncBlkCountTarget.GetCurrentIndex()
syncBlkCntPoint.SyncBlkCountComplete.GetNextIndex() // 完成同步块数++1
nowSyncBlkCnt := syncBlkCntPoint.SyncBlkCountComplete.GetCurrentIndex() // 目前已经完成同步块数
ConsLog.Infof(LOGTABLE_CONS, "sync blk accepting syncBlktargetV<-nowSyncBlkCnt:[%d-%d],finishFlag %v v %d",
syncBlktargetV, nowSyncBlkCnt, syncData.FinishFlag, h.syncHandler.Version)
if syncData.FinishFlag {
ConsLog.Infof(LOGTABLE_CONS, "sync succ")
h.isSyncStatus.SetFalse()
h.stopSyncTimer()
h.syncHandler.Version++
h.newStep()
} else {
block := syncData.Block
if block == nil {
ConsLog.Warningf(LOGTABLE_CONS, "sync err block is nil")
h.resetSync()
continue
}
blkNum := block.Header.BlockNum
ConsLog.Infof(LOGTABLE_CONS, "recv sync blk %d FinishFlag %t", blkNum, syncData.FinishFlag)
tdmBlk, err := types.Standard2Cons(block)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "conver err %s", err.Error())
h.resetSync()
continue
}
status, err := h.LoadLastCommitStateFromBlkAndMem(block)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "load status err %s", err)
h.resetSync()
continue
}
ConsLog.Infof(LOGTABLE_CONS, "(sync blk) %d vals change height last %d curr %d",
blkNum, h.LastCommitState.LastHeightValidatorsChanged, status.LastHeightValidatorsChanged)
if status.LastHeightValidatorsChanged > h.LastCommitState.LastHeightValidatorsChanged {
ConsLog.Infof(LOGTABLE_CONS, "(sync blk) %d vals changed %v", blkNum, status.Validators)
h.LastCommitState.Validators = status.Validators
}
if err := h.blockExec.ValidateBlock(h.LastCommitState, tdmBlk); err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(sync blk) %d validate err %v", blkNum, err)
h.resetSync()
continue
}
appMgr.BlockProcess(block)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(sync blk) %d fallBlock failed error %s", blkNum, err)
h.resetSync()
continue
} else {
hash, err := ledger.CalcBlockHash(block)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(sync blk) %d CalcBlockHash error %s", blkNum, err)
h.resetSync()
continue
}
status.PreviousHash = hash
if len(block.Txs) > 0 {
txpool.DelTxs(CHAINID, block.Txs)
}
ConsLog.Infof(LOGTABLE_CONS, "(sync blk) %d fallBlock success", blkNum)
ConsLog.Infof(LOGTABLE_CONS, h.PrintTDMBlockInfo(tdmBlk))
err = h.updateToState(*status)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "update last status err %s", err)
h.resetSync()
continue
}
err = h.reconstructLastCommit(*status, block)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "update last commit err %s", err)
h.resetSync()
continue
}
h.execConsSucceedFuncs(tdmBlk)
h.execStatusUpdatedFuncs()
ConsLog.Infof(LOGTABLE_CONS, "(sync blk) %d lastValsChange %d", blkNum, h.LastCommitState.LastHeightValidatorsChanged)
}
}
case ti := <-h.timeoutTicker.Chan():
if !h.checkContinue() {
continue
}
h.handleTimeout(ti, h.RoundState)
case <-h.Quit():
h.allRoutineExitWg.Done()
ConsLog.Infof(LOGTABLE_CONS, "(msgDeliver) tdmMsgHandler consensus service stopped ")
return
}
}
}
func (h *TDMMsgHandler) BroadcastMsg() {
h.allRoutineExitWg.Add(1)
for {
select {
case broadcastMsg := <-h.EventMsgQueue:
m, _ := json.Marshal(broadcastMsg)
p2pNetwork.Xmit(reqMsg.NewConsMsg(m), true)
case <-h.Quit():
h.allRoutineExitWg.Done()
ConsLog.Infof(LOGTABLE_CONS, "(msgHandler) broadcast routine stopped ")
return
}
}
}
func (h *TDMMsgHandler) getOtherVals() []*types.Validator {
vals := make([]*types.Validator, 0)
for _, val := range h.Validators.Validators {
if bytes.Equal(val.Address, h.digestAddr) {
continue
} else {
vals = append(vals, val)
}
}
return vals
}
func (h *TDMMsgHandler) handleTxsAvailable(height uint64) {
h.mtx.Lock()
defer h.mtx.Unlock()
if h.inbgGroup() {
h.enterPropose(height, 0)
}
}
func (h *TDMMsgHandler) scheduleTimeout(duration time.Duration, height uint64, round int32, step types.RoundStepType) {
h.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step})
}
func (h *TDMMsgHandler) handleTimeout(ti timeoutInfo, rs types.RoundState) {
if ti.Height != h.Height || ti.Round < h.Round || (ti.Round == h.Round && ti.Step < h.Step) {
return
}
h.mtx.Lock()
defer h.mtx.Unlock()
switch ti.Step {
case types.RoundStepNewHeight:
h.enterNewRound(ti.Height, 0)
case types.RoundStepNewRound:
h.timeState.EndConsumeTime(h)
h.enterPropose(ti.Height, ti.Round)
case types.RoundStepPropose:
h.timeState.EndConsumeTime(h)
h.enterPrevote(ti.Height, ti.Round)
case types.RoundStepPrevote:
h.timeState.EndConsumeTime(h)
h.enterPrevoteWait(ti.Height, ti.Round)
case types.RoundStepPrevoteWait:
h.enterPrecommit(ti.Height, ti.Round)
case types.RoundStepPrecommit:
h.timeState.EndConsumeTime(h)
h.enterPrecommitWait(ti.Height, ti.Round)
case types.RoundStepPrecommitWait:
h.enterNewRound(ti.Height, ti.Round+1)
default:
ConsLog.Warningf(LOGTABLE_CONS, "Invalid timeout step: %v", ti.Step)
}
}
//recv from other peer info
func (h *TDMMsgHandler) processOuterMsg(tdmMsg *cmn.TDMMessage, pubKeyID []byte, peerID uint64) error {
var err error
switch tdmMsg.Type {
case cmn.TDMType_MsgProposal:
err = h.HandleOuterProposalMsg(tdmMsg, pubKeyID)
case cmn.TDMType_MsgVote:
err = h.HandleOutterVoteMsg(tdmMsg, pubKeyID)
case cmn.TDMType_MsgBlockPart:
err = h.HandleBlockPartMsg(tdmMsg)
case cmn.TDMType_MsgProposalHeartBeat:
err = h.HandleProposalHeartBeatMsg(tdmMsg)
case cmn.TDMType_MsgNewRoundStep:
err = h.HandleNewRoundStepMsg(tdmMsg)
case cmn.TDMType_MsgCommitStep:
err = h.HandleCommitStepMsg(tdmMsg)
case cmn.TDMType_MsgProposalPOL:
err = h.HandleProposalPOLMsg(tdmMsg)
case cmn.TDMType_MsgHasVote:
err = h.HandleHasVoteMsg(tdmMsg)
case cmn.TDMType_MsgVoteSetMaj23:
err = h.HandleVoteSetMaj23Msg(tdmMsg)
case cmn.TDMType_MsgVoteSetBits:
err = h.HandleVoteSetBitsMsg(tdmMsg)
case cmn.TDMType_MsgHeightReq:
err = h.SendBlockHeightReqToPeer(tdmMsg, peerID)
case cmn.TDMType_MsgHeihtResp:
err = h.ReciveHeihtResp(tdmMsg, peerID)
default:
ConsLog.Warningf(LOGTABLE_CONS, "(processOuterMsg) type %v not supported", tdmMsg.Type)
}
if err != nil {
return fmt.Errorf("(processOuterMsg) handle tdmMsg err %v", err)
}
return nil
}
func (h *TDMMsgHandler) processInnerMsg(tdmMsg *cmn.TDMMessage, pubkeyID []byte) error {
var err error
switch tdmMsg.Type {
case cmn.TDMType_MsgProposal:
err = h.HandleInnerProposalMsg(tdmMsg)
case cmn.TDMType_MsgVote:
vote, _ := h.buildTypeFromVoteMsg(tdmMsg)
ConsLog.Infof(LOGTABLE_CONS, "(processInnerMsg) VoteMsg height=%v,round=%v,vote=%v", vote.Height, vote.Round, vote)
err = h.HandleInnerVoteMsg(tdmMsg, pubkeyID)
case cmn.TDMType_MsgBlockPart:
err = h.HandleBlockPartMsg(tdmMsg)
case cmn.TDMType_MsgProposalHeartBeat:
err = h.HandleProposalHeartBeatMsg(tdmMsg)
case cmn.TDMType_MsgNewRoundStep:
err = h.HandleNewRoundStepMsg(tdmMsg)
case cmn.TDMType_MsgCommitStep:
err = h.HandleCommitStepMsg(tdmMsg)
case cmn.TDMType_MsgProposalPOL:
err = h.HandleProposalPOLMsg(tdmMsg)
case cmn.TDMType_MsgHasVote:
err = h.HandleHasVoteMsg(tdmMsg)
case cmn.TDMType_MsgVoteSetMaj23:
err = h.HandleVoteSetMaj23Msg(tdmMsg)
case cmn.TDMType_MsgVoteSetBits:
err = h.HandleVoteSetBitsMsg(tdmMsg)
default:
ConsLog.Warningf(LOGTABLE_CONS, "(processOuterMsg) type %v not supported", tdmMsg.Type)
}
if err != nil {
return fmt.Errorf("(processOuterMsg) handle tdmMsg err %v", err)
}
return nil
}
func (h *TDMMsgHandler) getBftGroupID() uint64 {
return h.Validators.BgID
}
func (h *TDMMsgHandler) recvForkSearch() {
h.allRoutineExitWg.Add(1)
var err error
for {
select {
case forkSearchMsg := <-h.ForkSearchMsgQueue:
var consensusMsg = &consensusType.ConsensusMsg{}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
err = json.Unmarshal(forkSearchMsg.Msg, &consensusMsg)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(dbftMgr) unmarshal conMsg err %v", err)
continue
}
tdmMsg := &cmn.TDMMessage{}
if err := json.Unmarshal(consensusMsg.Payload, tdmMsg); err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "(msgDeliver) unmarshal tdmMsg err %v", err)
continue
}
switch tdmMsg.Type {
case cmn.TDMType_MsgBinaryBlockHashReq:
err = h.forkSearchWorker.SendBinaryBlockHashResp(tdmMsg, forkSearchMsg.PeerID)
if err != nil {
ConsLog.Errorf(LOGTABLE_CONS, "forkSearch sendBinaryBlockHashResp err %v", err)
}
case cmn.TDMType_MsgBinaryBlockHashResp:
select {
case h.forkSearchWorker.ResChan <- tdmMsg:
default:
ConsLog.Warningf(LOGTABLE_CONS, "forkSearch ResChan full")
}
}
case <-h.Quit():
ConsLog.Infof(LOGTABLE_CONS, "forkSearch rountine stopped")
h.allRoutineExitWg.Done()
return
}
}
}
|
package lexer
import (
"github.com/kzbandai/playground/go/src/interpreter/token"
"testing"
)
func TestNextToken(t *testing.T) {
input := `let five = 5;
let ten = 10;
let add = fn(x, y) {
x + y;
};
let result = add(five, ten);
!-/*5;
5 < 10 > 5;
if (5 < 10) {
return true;
} else {
return false;
}
10 == 10;
10 != 9;
`
tests := []struct {
expectedLiteral string
expectedType token.TokenType
}{
{"let", token.LET},
{"five", token.IDENTIFIER},
{"=", token.ASSIGN},
{"5", token.INT},
{";", token.SEMICOLON},
{"let", token.LET},
{"ten", token.IDENTIFIER},
{"=", token.ASSIGN},
{"10", token.INT},
{";", token.SEMICOLON},
{"let", token.LET},
{"add", token.IDENTIFIER},
{"=", token.ASSIGN},
{"fn", token.FUNCTION},
{"(", token.LPAREN},
{"x", token.IDENTIFIER},
{",", token.COMMA},
{"y", token.IDENTIFIER},
{")", token.RPAREN},
{"{", token.LBRACE},
{"x", token.IDENTIFIER},
{"+", token.PLUS},
{"y", token.IDENTIFIER},
{";", token.SEMICOLON},
{"}", token.RBRACE},
{";", token.SEMICOLON},
{"let", token.LET},
{"result", token.IDENTIFIER},
{"=", token.ASSIGN},
{"add", token.IDENTIFIER},
{"(", token.LPAREN},
{"five", token.IDENTIFIER},
{",", token.COMMA},
{"ten", token.IDENTIFIER},
{")", token.RPAREN},
{";", token.SEMICOLON},
{"!", token.BANG},
{"-", token.MINUS},
{"/", token.SLASH},
{"*", token.ASTERISK},
{"5", token.INT},
{";", token.SEMICOLON},
{"5", token.INT},
{"<", token.LT},
{"10", token.INT},
{">", token.GT},
{"5", token.INT},
{";", token.SEMICOLON},
{"if", token.IF},
{"(", token.LPAREN},
{"5", token.INT},
{"<", token.LT},
{"10", token.INT},
{")", token.RPAREN},
{"{", token.LBRACE},
{"return", token.RETURN},
{"true", token.TRUE},
{";", token.SEMICOLON},
{"}", token.RBRACE},
{"else", token.ELSE},
{"{", token.LBRACE},
{"return", token.RETURN},
{"false", token.FALSE},
{";", token.SEMICOLON},
{"}", token.RBRACE},
{"10", token.INT},
{"==", token.EQUAL},
{"10", token.INT},
{";", token.SEMICOLON},
{"10", token.INT},
{"!=", token.NOT_EQUAL},
{"9", token.INT},
{";", token.SEMICOLON},
}
l := New(input)
for i, tt := range tests {
tok := l.NextToken()
if tok.Type != tt.expectedType {
t.Fatalf("tests[%d] - token type is wrong. expected=%q, got=%q", i, tt.expectedType, tok.Type)
}
if tok.Literal != tt.expectedLiteral {
t.Fatalf("tests[%d] - literal is wrong. expected=%q, got=%q", i, tt.expectedLiteral, tok.Literal)
}
}
}
|
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"github.com/spf13/viper"
"github.com/spf13/cobra"
)
// ./Integration-with-Viper.exe get -u foo -p bar https://httpbin.org/basic-auth/foo/bar
// ./Integration-with-Viper.exe get https://httpbin.org/basic-auth/Jamal/gizli
// ./Integration-with-Viper.exe post -c "Post content to check Code" https://httpbin.org/post
var cmd = &cobra.Command{
Use: "cobraintro",
Short: "This tool gets a URL basic auth",
Run: func(cmd *cobra.Command, args []string) {
log.Fatalln("must use a subcommand")
},
}
var getCmd = &cobra.Command{
Use: "get",
Short: "Get a URL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
log.Fatalln("must set URL!")
}
client := http.Client{}
req, err := http.NewRequest("GET", args[0], nil)
if err != nil {
log.Fatalln("Unable to get request")
}
username := viper.GetString("username")
password := viper.GetString("password")
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
log.Fatalln("Unable to get response")
}
defer resp.Body.Close()
content, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln("unable to read body")
}
fmt.Println(string(content))
},
}
var postCmd = &cobra.Command{
Use: "post",
Short: "Post a URL",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
log.Fatalln("must set URL!")
}
client := http.Client{}
var contentReader io.Reader
content := viper.GetString("content")
if content != "" {
contentReader = bytes.NewReader([]byte(content))
}
req, err := http.NewRequest("POST", args[0], contentReader)
if err != nil {
log.Fatalln("Unable to get request")
}
username := viper.GetString("username")
password := viper.GetString("password")
if username != "" && password != "" {
req.SetBasicAuth(username, password)
}
resp, err := client.Do(req)
if err != nil {
log.Fatalln("Unable to get response")
}
defer resp.Body.Close()
respContent, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln("unable to read body")
}
fmt.Println(string(respContent))
},
}
func main() {
cmd.PersistentFlags().StringP("username", "u",
viper.GetString("credentials.username"), "Username for credentials")
cmd.PersistentFlags().StringP("password", "p",
viper.GetString("credentials.password"), "Password for credentials")
viper.BindPFlag("username", cmd.PersistentFlags().Lookup("username"))
viper.BindPFlag("password", cmd.PersistentFlags().Lookup("password"))
postCmd.PersistentFlags().StringP("content", "c", "", "Content for POST")
viper.BindPFlag("content", postCmd.PersistentFlags().Lookup("content"))
cmd.AddCommand(getCmd)
cmd.AddCommand(postCmd)
cmd.Execute()
}
func init() {
viper.AddConfigPath(".")
viper.SetConfigName("cobra")
viper.ReadInConfig()
}
|
package models
// import (
// "github.com/messagedb/messagedb/meta/schema"
// "github.com/messagedb/messagedb/meta/utils"
// log "github.com/Sirupsen/logrus"
// "gopkg.in/mgo.v2"
// "gopkg.in/mgo.v2/bson"
// )
// var Team *TeamModel
// type TeamModel struct {
// *storage.Model
// }
// func (m *TeamModel) New() *schema.Team {
// return storage.CreateDocument(&schema.Team{}).(*schema.Team)
// }
// func (m *TeamModel) FindById(id interface{}) (*schema.Team, error) {
// team := &schema.Team{}
// err := m.FindId(id).One(team)
// if err != nil {
// return nil, err
// }
// return team, nil
// }
// func (m *TeamModel) FindByOrganizationIdAndName(orgId interface{}, name string) (*schema.Team, error) {
// team := &schema.Team{}
// err := m.Find(bson.M{"org_id": utils.ObjectId(orgId), "name": name}).One(team)
// if err != nil {
// return nil, err
// }
// return team, nil
// }
// func (m *TeamModel) FindOwnerTeamByOrganizationId(orgId interface{}) (*schema.Team, error) {
// return m.FindByOrganizationIdAndName(orgId, "owner")
// }
// func (m *TeamModel) FindAllByOrganizationId(orgId interface{}) ([]*schema.Team, error) {
// teams := []*schema.Team{}
// err := m.Find(bson.M{"org_id": utils.ObjectId(orgId)}).All(&teams)
// if err != nil {
// return nil, err
// }
// return teams, nil
// }
// func init() {
// Team = storage.RegisterModel(schema.Team{}, "teams", func(col *mgo.Collection) interface{} {
// return &TeamModel{storage.NewModel(col)}
// }).(*TeamModel)
// // create required indexes in MongoDB
// indexes := []mgo.Index{}
// indexes = append(indexes, mgo.Index{
// Key: []string{"org_id", "name"},
// Unique: true,
// DropDups: true,
// Background: true, // See notes.
// Sparse: false,
// })
// for _, index := range indexes {
// err := Team.EnsureIndex(index)
// if err != nil {
// log.Panicf("Failed to ensure index on 'users' Collection: %v", index)
// }
// }
// }
|
package booking
import (
"fmt"
"time"
)
// Schedule returns a time.Time from a string containing a date
func Schedule(date string) time.Time {
mTime, _ := time.Parse("1/02/2006 15:04:05", date)
return mTime
}
// HasPassed returns whether a date has passed
func HasPassed(date string) bool {
mTime, _ := time.Parse("January 2, 2006 15:04:05", date)
return mTime.Before(time.Now())
}
// IsAfternoonAppointment returns whether a time is in the afternoon
func IsAfternoonAppointment(date string) bool {
mTime, _ := time.Parse("Monday, January 2, 2006 15:04:05", date)
return mTime.Hour() >= 12 && mTime.Hour() < 18
}
// Description returns a formatted string of the appointment time
func Description(date string) string {
mTime, _ := time.Parse("1/2/2006 15:04:05", date)
return fmt.Sprintf("You have an appointment on %s", mTime.Format("Monday, January 2, 2006, at 15:04."))
}
// AnniversaryDate returns a Time with this year's anniversary
func AnniversaryDate() time.Time {
mTime, _ := time.Parse("2006-01-2", fmt.Sprintf("%d-09-15", time.Now().Year()))
return mTime
}
|
package main
import (
"fmt"
)
func max(a, b int) int {
if a > b {
return a
}
return b
}
func canJump(nums []int) bool {
maxJumpableIndex := 0
if len(nums) == 0 || len(nums) == 1 {
return true
}
for i, jmpLen := range nums {
if i > maxJumpableIndex {
return false
}
maxJumpableIndex = max(maxJumpableIndex, i+jmpLen)
}
return true
}
func test1() {
nums := []int{2, 3, 1, 1, 4}
fmt.Printf("can jump? %v\n", canJump(nums))
}
func test2() {
nums := []int{3, 2, 1, 0, 4}
fmt.Printf("can jump? %v\n", canJump(nums))
}
func test3() {
nums := []int{0}
fmt.Printf("can jump? %v\n", canJump(nums))
}
func main() {
test1()
test2()
test3()
}
|
package order
import (
"context"
"tpay_backend/adminapi/internal/common"
"tpay_backend/model"
"tpay_backend/adminapi/internal/svc"
"tpay_backend/adminapi/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type GetMerchantWithdrawOrderListLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewGetMerchantWithdrawOrderListLogic(ctx context.Context, svcCtx *svc.ServiceContext) GetMerchantWithdrawOrderListLogic {
return GetMerchantWithdrawOrderListLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetMerchantWithdrawOrderListLogic) GetMerchantWithdrawOrderList(req types.GetMerchantWithdrawOrderListRequest) (*types.GetMerchantWithdrawOrderListResponse, error) {
f := model.FindWithdrawOrderList{
Page: req.Page,
PageSize: req.PageSize,
StartCreateTime: req.StartCreateTime,
EndCreateTime: req.EndCreateTime,
MerchantName: req.MerchantName,
OrderNo: req.OrderNo,
OrderStatus: req.OrderStatus,
}
data, total, err := model.NewMerchantWithdrawOrderModel(l.svcCtx.DbEngine).FindList(f)
if err != nil {
l.Errorf("查询商户提现订单列表失败, err=%v", err)
return nil, common.NewCodeError(common.SysDBGet)
}
var list []types.MerchantWithdrawOrderList
for _, v := range data {
list = append(list, types.MerchantWithdrawOrderList{
OrderNo: v.OrderNo,
MerchantName: v.MerchantName,
OrderAmount: v.OrderAmount,
MerchantFee: v.MerchantFee,
RealAmount: v.RealAmount,
Remark: v.Remark,
BankName: v.BankName,
PayeeName: v.PayeeName,
CardNumber: v.CardNumber,
BranchName: v.BranchName,
AuditRemark: v.AuditRemark,
CreateTime: v.CreateTime,
AuditTime: v.AuditTime,
OrderStatus: v.OrderStatus,
UpstreamOrderNo: v.TransferOrderNo,
UpstreamChannelName: v.ChannelName,
Currency: v.Currency,
DeductionMethod: v.DeductionMethod,
})
}
return &types.GetMerchantWithdrawOrderListResponse{
Total: total,
List: list,
}, nil
}
|
/*
Copyright © 2020 Denis Rendler <connect@rendler.me>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logs
import (
"bytes"
"io"
"os"
"strings"
"github.com/lajosbencz/glo"
)
const EOL = "\n"
var (
Writer glo.Facility
stdIo = os.Stdout
errIo = os.Stderr
)
func init() {
Writer = glo.NewFacility()
ho := glo.NewHandler(stdIo).PushFilter(glo.NewFilterLevelRange(glo.Info, glo.Info))
he := glo.NewHandler(errIo).PushFilter(glo.NewFilterLevelRange(glo.Error, glo.Emergency))
Writer.PushHandler(ho).PushHandler(he)
}
// UpdateStdIoHandler resets the Stdout and Stderr loggers based on verbose flag
func UpdateStdIoHandler(verbose bool) {
Writer.ClearHandlers()
ho := glo.NewHandler(stdIo).PushFilter(NewVerbosityFilter(verbose))
he := glo.NewHandler(errIo).PushFilter(glo.NewFilterLevelRange(glo.Error, glo.Emergency))
Writer.PushHandler(ho).PushHandler(he)
}
// LogBuffer is logging a buffered out by separating the lines using \n as delimiter
func LogBuffer(lvl glo.Level, b bytes.Buffer) {
for {
line, err := b.ReadString('\n')
_ = Writer.Log(lvl, strings.TrimRight(line, EOL))
if err == io.EOF {
return
}
}
}
|
/*
Write a function that pairs the first number in an array with the last, the second number with the second to last, etc.
Examples
pairs([1, 2, 3, 4, 5, 6, 7]) ➞ [[1, 7], [2, 6], [3, 5], [4, 4]]
pairs([1, 2, 3, 4, 5, 6]) ➞ [[1, 6], [2, 5], [3, 4]]
pairs([5, 9, 8, 1, 2]) ➞ [[5, 2], [9, 1], [8, 8]]
pairs([]) ➞ []
Notes
If the array has an odd length, repeat the middle element twice for the last pair.
Return an empty array if the input is an empty array.
*/
package main
func main() {
eq(pairs([]int{1, 2, 3, 4, 5, 6, 7}), [][2]int{{1, 7}, {2, 6}, {3, 5}, {4, 4}})
eq(pairs([]int{1, 2, 3, 4, 5, 6}), [][2]int{{1, 6}, {2, 5}, {3, 4}})
eq(pairs([]int{5, 9, 8, 1, 2}), [][2]int{{5, 2}, {9, 1}, {8, 8}})
eq(pairs([]int{5, 6, 7}), [][2]int{{5, 7}, {6, 6}})
eq(pairs([]int{1, 1, 4, 4, 5, 5}), [][2]int{{1, 5}, {1, 5}, {4, 4}})
eq(pairs([]int{9, 9, 9, 9, 3, 3, 9}), [][2]int{{9, 9}, {9, 3}, {9, 3}, {9, 9}})
eq(pairs([]int{5, 6}), [][2]int{{5, 6}})
eq(pairs([]int{5}), [][2]int{{5, 5}})
eq(pairs([]int{}), [][2]int{})
}
func pairs(a []int) [][2]int {
var p [][2]int
l := len(a)
for i := 0; i < l/2; i++ {
p = append(p, [2]int{a[i], a[l-1-i]})
}
if l&1 != 0 {
p = append(p, [2]int{a[l/2], a[l/2]})
}
return p
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func eq(a, b [][2]int) {
assert(len(a) == len(b))
for i := range a {
assert(a[i] == b[i])
}
}
|
package main
import (
"fmt"
)
//If all workers die, exit
func WorkerOverseer() {
for workerDeadCounter < *maxWorkers {
deadId := <-exitChan
if *verbose {
fmt.Printf("Worker %x ended (Sent %d requests) \n", deadId, workers[deadId].RequestCounter)
}
workerDeadCounter += 1
}
fmt.Printf("All (%d) workers died", workerDeadCounter)
GracefulExit()
}
|
//go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 deployment_install_client.go InstallStrategyDeploymentInterface
package wrappers
import (
"context"
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorclient"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/operatorlister"
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/ownerutil"
)
var ErrNilObject = errors.New("Bad object supplied: <nil>")
type InstallStrategyDeploymentInterface interface {
CreateRole(role *rbacv1.Role) (*rbacv1.Role, error)
CreateRoleBinding(roleBinding *rbacv1.RoleBinding) (*rbacv1.RoleBinding, error)
EnsureServiceAccount(serviceAccount *corev1.ServiceAccount, owner ownerutil.Owner) (*corev1.ServiceAccount, error)
CreateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error)
CreateOrUpdateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error)
DeleteDeployment(name string) error
GetServiceAccountByName(serviceAccountName string) (*corev1.ServiceAccount, error)
FindAnyDeploymentsMatchingNames(depNames []string) ([]*appsv1.Deployment, error)
FindAnyDeploymentsMatchingLabels(label labels.Selector) ([]*appsv1.Deployment, error)
GetOpClient() operatorclient.ClientInterface
GetOpLister() operatorlister.OperatorLister
}
type InstallStrategyDeploymentClientForNamespace struct {
opClient operatorclient.ClientInterface
opLister operatorlister.OperatorLister
Namespace string
}
var _ InstallStrategyDeploymentInterface = &InstallStrategyDeploymentClientForNamespace{}
func NewInstallStrategyDeploymentClient(opClient operatorclient.ClientInterface, opLister operatorlister.OperatorLister, namespace string) InstallStrategyDeploymentInterface {
return &InstallStrategyDeploymentClientForNamespace{
opClient: opClient,
opLister: opLister,
Namespace: namespace,
}
}
func (c *InstallStrategyDeploymentClientForNamespace) GetOpClient() operatorclient.ClientInterface {
return c.opClient
}
func (c *InstallStrategyDeploymentClientForNamespace) GetOpLister() operatorlister.OperatorLister {
return c.opLister
}
func (c *InstallStrategyDeploymentClientForNamespace) CreateRole(role *rbacv1.Role) (*rbacv1.Role, error) {
return c.opClient.KubernetesInterface().RbacV1().Roles(c.Namespace).Create(context.TODO(), role, metav1.CreateOptions{})
}
func (c *InstallStrategyDeploymentClientForNamespace) CreateRoleBinding(roleBinding *rbacv1.RoleBinding) (*rbacv1.RoleBinding, error) {
return c.opClient.KubernetesInterface().RbacV1().RoleBindings(c.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{})
}
func (c *InstallStrategyDeploymentClientForNamespace) EnsureServiceAccount(serviceAccount *corev1.ServiceAccount, owner ownerutil.Owner) (*corev1.ServiceAccount, error) {
if serviceAccount == nil {
return nil, ErrNilObject
}
foundAccount, err := c.opLister.CoreV1().ServiceAccountLister().ServiceAccounts(c.Namespace).Get(serviceAccount.Name)
if err != nil && !apierrors.IsNotFound(err) {
return nil, errors.Wrap(err, "checking for existing serviceacccount failed")
}
// create if not found
if err != nil && apierrors.IsNotFound(err) {
serviceAccount.SetNamespace(c.Namespace)
createdAccount, err := c.opClient.CreateServiceAccount(serviceAccount)
if err != nil && !apierrors.IsAlreadyExists(err) {
return nil, errors.Wrap(err, "creating serviceacccount failed")
}
if apierrors.IsAlreadyExists(err) {
return serviceAccount, nil
}
return createdAccount, nil
}
// if found, ensure ownerreferences
if ownerutil.IsOwnedBy(foundAccount, owner) {
return foundAccount, nil
}
// set owner if missing
ownerutil.AddNonBlockingOwner(foundAccount, owner)
return c.opClient.UpdateServiceAccount(foundAccount)
}
func (c *InstallStrategyDeploymentClientForNamespace) CreateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
return c.opClient.CreateDeployment(deployment)
}
func (c *InstallStrategyDeploymentClientForNamespace) DeleteDeployment(name string) error {
foregroundDelete := metav1.DeletePropagationForeground // cascading delete
// Note(tflannag): See https://bugzilla.redhat.com/show_bug.cgi?id=1939294.
immediate := int64(1)
immediateForegroundDelete := &metav1.DeleteOptions{GracePeriodSeconds: &immediate, PropagationPolicy: &foregroundDelete}
return c.opClient.DeleteDeployment(c.Namespace, name, immediateForegroundDelete)
}
func (c *InstallStrategyDeploymentClientForNamespace) CreateOrUpdateDeployment(deployment *appsv1.Deployment) (*appsv1.Deployment, error) {
_, err := c.opClient.GetDeployment(deployment.Namespace, deployment.Name)
if err != nil {
if !apierrors.IsNotFound(err) {
return nil, err
}
created, err := c.CreateDeployment(deployment)
if err != nil {
return nil, err
}
return created, err
}
return c.opClient.KubernetesInterface().AppsV1().Deployments(deployment.GetNamespace()).Update(context.TODO(), deployment, metav1.UpdateOptions{})
}
func (c *InstallStrategyDeploymentClientForNamespace) GetServiceAccountByName(serviceAccountName string) (*corev1.ServiceAccount, error) {
return c.opLister.CoreV1().ServiceAccountLister().ServiceAccounts(c.Namespace).Get(serviceAccountName)
}
func (c *InstallStrategyDeploymentClientForNamespace) FindAnyDeploymentsMatchingNames(depNames []string) ([]*appsv1.Deployment, error) {
var deployments []*appsv1.Deployment
for _, depName := range depNames {
fetchedDep, err := c.opLister.AppsV1().DeploymentLister().Deployments(c.Namespace).Get(depName)
if err == nil {
deployments = append(deployments, fetchedDep)
} else {
// Any errors other than !exists are propagated up
if !apierrors.IsNotFound(err) {
return deployments, err
}
}
}
return deployments, nil
}
func (c *InstallStrategyDeploymentClientForNamespace) FindAnyDeploymentsMatchingLabels(label labels.Selector) ([]*appsv1.Deployment, error) {
deployments, err := c.opLister.AppsV1().DeploymentLister().Deployments(c.Namespace).List(label)
if err != nil {
return nil, err
}
return deployments, nil
}
|
package main
import (
"log"
"net"
"github.com/xtaci/gaio"
)
func main() {
ln, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
log.Println("echo server listening on", ln.Addr())
w, err := gaio.CreateWatcher(4096)
if err != nil {
log.Fatal(err)
}
chRx := make(chan gaio.OpResult)
chTx := make(chan gaio.OpResult)
go func() {
for {
select {
case res := <-chRx:
// handle unexpected read error
if res.Err != nil {
log.Println("read error")
w.StopWatch(res.Fd)
continue
}
// handle connection close
if res.Size == 0 {
log.Println("client closed")
w.StopWatch(res.Fd)
continue
}
// write the data, we won't start to read again until write completes.
buf := make([]byte, res.Size)
copy(buf, res.Buffer[:res.Size])
w.Write(res.Fd, buf, chTx)
case res := <-chTx:
// handle unexpected write error
if res.Err != nil {
log.Println("write error")
w.StopWatch(res.Fd)
continue
}
// write complete, start read again
w.Read(res.Fd, nil, chRx)
}
}
}()
for {
conn, err := ln.Accept()
if err != nil {
log.Println(err)
return
}
fd, err := w.Watch(conn)
if err != nil {
log.Println(err)
return
}
log.Println("new client", conn.RemoteAddr())
// kick off the first read action on this conn
err = w.Read(fd, nil, chRx)
if err != nil {
log.Println(err)
return
}
}
}
|
package queries
import (
"database/sql"
"log"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/configuration"
"gitlab.com/semestr-6/projekt-grupowy/backend/obsluga-formularzy/energy_resources/models"
)
const EDIT_ENERGY_RESOURCE_ATTRIBUTE_SQL = `
UPDATE energy_resources."EnergyResourcesAttributes"
SET
"EnergyResourceId" = $1
,"SourceId" = $2
,"CO2Value" = $3
,"NCVValue" = $4
,"CO2UnitId" = $5
WHERE
"EnergyResourceAttributeId" = $6`
const EDIT_ENERGY_RESOURCE_SQL = `
UPDATE energy_resources."EnergyResources"
SET
"EnergyResourceName" = $1
,"GUSResourceId" = $2
WHERE
"EnergyResourceId" = $3;`
func EditEnergyResourceAttribute(energyResourceEdit models.EnergyResourceAttributeEdit) (err error) {
db, err := sql.Open("postgres", configuration.ConnectionString)
if err != nil {
log.Fatal(err)
return
}
defer db.Close()
transaction, err := db.Begin()
if err != nil {
log.Fatal(err)
return
}
stmt, err := transaction.Prepare(EDIT_ENERGY_RESOURCE_ATTRIBUTE_SQL)
if err != nil {
log.Fatal(err)
return
}
_, err = stmt.Exec(
energyResourceEdit.EnergyResourceId,
energyResourceEdit.SourceId,
energyResourceEdit.CO2Value,
energyResourceEdit.NCVValue,
energyResourceEdit.CO2UnitId,
energyResourceEdit.EnergyResourceAttributeId,
)
if err != nil {
log.Fatal(err)
return
}
stmt, err = transaction.Prepare(EDIT_ENERGY_RESOURCE_SQL)
if err != nil {
log.Fatal(err)
return
}
_, err = stmt.Exec(
energyResourceEdit.EnergyResourceName,
energyResourceEdit.GUSResourceId,
energyResourceEdit.EnergyResourceId,
)
if err != nil {
log.Fatal(err)
return
}
err = stmt.Close()
if err != nil {
log.Fatal(err)
return
}
err = transaction.Commit()
if err != nil {
log.Fatal(err)
}
return
}
|
package main
import (
"fmt"
kyu6 "github.com/imskojs/learn_go_lang/00-Toy_problems/codewars/6kyu"
)
func main() {
var answer interface{}
answer =
kyu6.Parse("iiisdoso")
fmt.Println(answer)
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-lambda-go/lambda"
"github.com/dachanh/daita-serverless/user_api/model"
"github.com/dachanh/daita-serverless/user_api/storage"
"github.com/google/uuid"
"net/http"
)
func main() {
lambda.Start(Handler)
}
func Handler(req events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {
//var user model.User
id := uuid.New()
user := model.User{
ID: id.String(),
Role: "normal",
}
fmt.Println(req.Body)
err := json.Unmarshal([]byte(req.Body), &user)
if err != nil {
return response("Couldn't unmarshal ", http.StatusBadRequest), nil
}
err = storage.CrateUser(user)
if err != nil {
return response("Could't Create User"+err.Error(), http.StatusBadRequest), nil
}
return response(user.UserName, http.StatusOK), nil
}
func response(body string, statusCode int) events.APIGatewayProxyResponse {
return events.APIGatewayProxyResponse{
StatusCode: statusCode,
Body: string(body),
Headers: map[string]string{
"Access-Control-Allow-Origin": "*",
},
}
}
|
package random
import (
crand "crypto/rand"
"fmt"
"math/big"
"math/rand"
"sync"
"time"
)
// NewMathematical runs rand.Seed with the current time and returns a random.Provider, specifically *random.Mathematical.
func NewMathematical() *Mathematical {
return &Mathematical{
rand: rand.New(rand.NewSource(time.Now().UnixNano())), //nolint:gosec
lock: &sync.Mutex{},
}
}
// Mathematical is the random.Provider which uses math/rand and is COMPLETELY UNSAFE FOR PRODUCTION IN MOST SITUATIONS.
// Use random.Cryptographical instead.
type Mathematical struct {
rand *rand.Rand
lock *sync.Mutex
}
// Read implements the io.Reader interface.
func (r *Mathematical) Read(p []byte) (n int, err error) {
r.lock.Lock()
defer r.lock.Unlock()
return r.rand.Read(p)
}
// BytesErr returns random data as bytes with the standard random.DefaultN length and can contain any byte values
// (including unreadable byte values). If an error is returned from the random read this function returns it.
func (r *Mathematical) BytesErr() (data []byte, err error) {
data = make([]byte, DefaultN)
if _, err = r.Read(data); err != nil {
return nil, err
}
return data, nil
}
// Bytes returns random data as bytes with the standard random.DefaultN length and can contain any byte values
// (including unreadable byte values). If an error is returned from the random read this function ignores it.
func (r *Mathematical) Bytes() (data []byte) {
data, _ = r.BytesErr()
return data
}
// BytesCustomErr returns random data as bytes with n length and can contain only byte values from the provided
// values. If n is less than 1 then DefaultN is used instead. If an error is returned from the random read this function
// returns it.
func (r *Mathematical) BytesCustomErr(n int, charset []byte) (data []byte, err error) {
if n < 1 {
n = DefaultN
}
data = make([]byte, n)
if _, err = r.Read(data); err != nil {
return nil, err
}
t := len(charset)
for i := 0; i < n; i++ {
data[i] = charset[data[i]%byte(t)]
}
return data, nil
}
// StringCustomErr is an overload of BytesCustomWithErr which takes a characters string and returns a string.
func (r *Mathematical) StringCustomErr(n int, characters string) (data string, err error) {
var d []byte
if d, err = r.BytesCustomErr(n, []byte(characters)); err != nil {
return "", err
}
return string(d), nil
}
// BytesCustom returns random data as bytes with n length and can contain only byte values from the provided values.
// If n is less than 1 then DefaultN is used instead. If an error is returned from the random read this function
// ignores it.
func (r *Mathematical) BytesCustom(n int, charset []byte) (data []byte) {
data, _ = r.BytesCustomErr(n, charset)
return data
}
// StringCustom is an overload of BytesCustom which takes a characters string and returns a string.
func (r *Mathematical) StringCustom(n int, characters string) (data string) {
return string(r.BytesCustom(n, []byte(characters)))
}
// Intn returns a random int with a maximum of n.
func (r *Mathematical) Intn(n int) int {
r.lock.Lock()
defer r.lock.Unlock()
return r.rand.Intn(n)
}
// IntnErr returns a random int error combination with a maximum of n.
func (r *Mathematical) IntnErr(n int) (output int, err error) {
if n <= 0 {
return 0, fmt.Errorf("n must be more than 0")
}
return r.Intn(n), nil
}
// Int returns a random *big.Int with a maximum of max.
func (r *Mathematical) Int(max *big.Int) (value *big.Int) {
var err error
if value, err = r.IntErr(max); err != nil {
return big.NewInt(-1)
}
return value
}
// IntErr returns a random *big.Int error combination with a maximum of max.
func (r *Mathematical) IntErr(max *big.Int) (value *big.Int, err error) {
if max == nil {
return nil, fmt.Errorf("max is required")
}
if max.Int64() <= 0 {
return nil, fmt.Errorf("max must be 1 or more")
}
return big.NewInt(int64(r.Intn(int(max.Int64())))), nil
}
// Prime returns a number of the given bit length that is prime with high probability. Prime will return error for any
// error returned by rand.Read or if bits < 2.
func (r *Mathematical) Prime(bits int) (prime *big.Int, err error) {
return crand.Prime(r, bits)
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"text/tabwriter"
"time"
)
func main() {
raw, errRead := ioutil.ReadFile("message_1.json")
if errRead != nil {
panic(errRead)
}
var fbExport FacebookExport
if errUm := json.Unmarshal(raw, &fbExport); errUm != nil {
panic(errUm)
}
fmt.Printf("Message count: %d\n\n", len(fbExport.Messages))
tw := tabwriter.NewWriter(os.Stdout, 1, 2, 2, ' ', tabwriter.TabIndent)
fmt.Fprintln(tw, "Caller\tDuration\tStart time\tFinish time")
for _, msg := range fbExport.Messages {
switch msg.Type {
case FMTCall:
if !msg.Missed {
finishTime := epochMsToHumanReadable(msg.TimestampMs)
duration, errParse := time.ParseDuration(fmt.Sprintf("%ds", msg.CallDuration))
if errParse != nil {
panic(errParse)
}
startTime := finishTime.Add(-1 * duration)
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", msg.SenderName, duration, startTime, finishTime)
}
case FMTGeneric:
case FMTShare:
default:
panic("unknown Facebook message type")
}
}
tw.Flush()
}
func epochMsToHumanReadable(epoch int64) time.Time {
return time.Unix(epoch/1000, 0)
}
|
package stringutils
import (
"strconv"
"strings"
"github.com/axgle/mahonia"
)
//分行
func SplitLine(str string) []string {
return strings.FieldsFunc(str, func(s rune) bool {
if s == '\n' || s == '\r' {
return true
}
return false
})
}
// parse memory string as value in unit K like: 6M, 7G etc
func ParseMemory(s string) (int64, bool) {
find := -1
mult := 1
for idx, c := range s {
switch c {
case 'k', 'K':
mult = 1
find = idx
case 'm', 'M':
mult = 1024
find = idx
case 'g', 'G':
mult = 1024 * 1024
find = idx
case 't', 'T':
mult = 1024 * 1024 * 1024
find = idx
default:
}
if find >= 0 {
break
}
}
if find <= 0 {
return 0, false
}
num, err := strconv.Atoi(s[0:find])
if err != nil {
return 0, false
}
return int64(num * mult), true
}
// src 字符串
// srcCharset
// dstCharset
func CharsetConvert(src string, srcCharset string, dstCharset string) (string, error) {
srcDecoder := mahonia.NewDecoder(srcCharset)
utf8String := srcDecoder.ConvertString(src)
dstDecoder := mahonia.NewDecoder(dstCharset)
_, dstString, err := dstDecoder.Translate([]byte(utf8String), true)
return string(dstString), err
}
|
/*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"k8s.io/apimachinery/pkg/labels"
"kmodules.xyz/client-go/apiextensions"
"x-helm.dev/apimachinery/apis"
"x-helm.dev/apimachinery/crds"
)
func (_ Plan) CustomResourceDefinition() *apiextensions.CustomResourceDefinition {
return crds.MustCustomResourceDefinition(GroupVersion.WithResource(ResourcePlans))
}
func (plan *Plan) ResetLabels(planName, planID, prodID, phase string) {
labelMap := map[string]string{
apis.LabelPlanName: planName,
apis.LabelPlanID: planID,
apis.LabelProductID: prodID,
apis.LabelPlanPhase: phase,
}
plan.ObjectMeta.SetLabels(labelMap)
}
func (_ Plan) FormatLabels(planName, planID, prodID, phase string) labels.Selector {
labelMap := make(map[string]string)
if planName != "" {
labelMap[apis.LabelPlanName] = planName
}
if planID != "" {
labelMap[apis.LabelPlanID] = planID
}
if prodID != "" {
labelMap[apis.LabelProductID] = prodID
}
if phase != "" {
labelMap[apis.LabelPlanPhase] = phase
}
return labels.SelectorFromSet(labelMap)
}
|
package shamirutil
import (
"math/rand"
"github.com/renproject/secp256k1"
"github.com/renproject/shamir"
)
// RandomCommitment constructs and returns a random commitment with the given
// number of curve points.
func RandomCommitment(k int) shamir.Commitment {
c := make(shamir.Commitment, k)
for i := range c {
c[i] = secp256k1.RandomPoint()
}
return c
}
// RandomIndices initialises and returns a slice of n indices, each of which is
// random. Often it is desired that each index is distinct. This function does
// not gaurantee this, however the chance of two indices being equal is
// negligible for low n.
func RandomIndices(n int) []secp256k1.Fn {
indices := make([]secp256k1.Fn, n)
for i := range indices {
indices[i] = secp256k1.RandomFn()
}
return indices
}
// SequentialIndices initialises and returns a slice of n indices, where the
// slice index i is equal to i+1 in the field.
func SequentialIndices(n int) []secp256k1.Fn {
indices := make([]secp256k1.Fn, n)
for i := range indices {
indices[i].SetU16(uint16(i) + 1)
}
return indices
}
// Shuffle randomises the order of the givens shares in the slice.
func Shuffle(shares shamir.Shares) {
rand.Shuffle(len(shares), func(i, j int) {
shares[i], shares[j] = shares[j], shares[i]
})
}
// AddDuplicateIndex picks two random (distinct) indices in the given slice of
// shares and sets the share index of the second to be equal to that of the
// first.
func AddDuplicateIndex(shares shamir.Shares) {
// Pick two distinct array indices.
first, second := rand.Intn(len(shares)), rand.Intn(len(shares))
for first == second {
second = rand.Intn(len(shares))
}
// Set the second share to have the same index as the first.
shares[second].Index = shares[first].Index
}
// SharesAreConsistent returns true if the given shares are found to be
// consistent. Consistency is defined as all points lying on some polynomial of
// degree less than `k`.
func SharesAreConsistent(shares shamir.Shares, k int) bool {
if len(shares) < k {
return true
}
secret := shamir.Open(shares[:k])
for i := 1; i <= len(shares)-k; i++ {
recon := shamir.Open(shares[i : i+k])
if !recon.Eq(&secret) {
return false
}
}
return true
}
// PerturbIndex modifies the given verifiable share to have a random index.
func PerturbIndex(vs *shamir.VerifiableShare) {
vs.Share.Index = secp256k1.RandomFn()
}
// PerturbValue modifies the given verifiable share to have a random value.
func PerturbValue(vs *shamir.VerifiableShare) {
vs.Share.Value = secp256k1.RandomFn()
}
// PerturbDecommitment modifies the given verifiable share to have a random
// decommitment value.
func PerturbDecommitment(vs *shamir.VerifiableShare) {
vs.Decommitment = secp256k1.RandomFn()
}
// VsharesAreConsistent is a wrapper around SharesAreConsistent for the
// VerifiableShares type.
func VsharesAreConsistent(
vshares shamir.VerifiableShares,
k int,
) bool {
return SharesAreConsistent(vshares.Shares(), k)
}
|
package products
import (
"errors"
"net/http"
"time"
"cinemo.com/shoping-cart/internal/errorcode"
)
// Product representation in app
type Product struct {
ID int64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Details string `json:"details,omitempty"`
Amount int64 `json:"amount,omitempty"`
Image string `json:"image,omitempty"`
CreatedAt time.Time `json:"-"`
UpdatedAt time.Time `json:"-"`
}
type ProductDiscount struct {
ID int64
ProductID int64
MinQuantity int
MaxQuantity int
PriceType PriceType
ComboPackageID int64
Discount int
Active bool
CreatedAt time.Time
UpdatedAt time.Time
}
// Stock representation in app
type Stock struct {
ID int64
ProductID int64
Quantity int
CreatedAt time.Time
UpdatedAt time.Time
}
// Order representation in app
type Order struct {
ID int64
Name string
Details string
Amount int
CreatedAt time.Time
UpdatedAt time.Time
}
type ComboPackage struct {
ID int64
Name string
CreatedAt time.Time
UpdatedAt time.Time
}
type ComboPackagedWith struct {
ID int64
ProductID int64
PackagedWithProductID int64
CreatedAt time.Time
UpdatedAt time.Time
}
func statusAndErrorCodeForServiceError(err error) (int, string) {
if errors.As(err, &errorcode.ValidationError{}) {
return http.StatusBadRequest, errorcode.ErrorsInRequestData
} else if errors.As(err, &errorcode.DBError{}) {
return http.StatusInternalServerError, errorcode.DatabaseProcessError
}
return http.StatusInternalServerError, errorcode.InternalError
}
|
package main
import (
"fmt"
"time"
"github.com/yanzay/tbot"
)
func (app *application) createPet(f tbot.UpdateHandler) tbot.UpdateHandler {
return func(u *tbot.Update) {
if u.Message == nil {
f(u)
return
}
m := u.Message
pet := app.petStore.Get(m.Chat.ID)
if !pet.Alive {
app.petStore.Set(m.Chat.ID, NewPet(m.Chat.ID))
buttons := tbot.Buttons([][]string{{"Create"}})
content, err := contentFromTemplate(rootTemplate, pet)
if err != nil {
return
}
app.client.SendMessage(m.Chat.ID, content, tbot.OptParseModeMarkdown)
app.client.SendMessage(m.Chat.ID, "Your pet is dead. Create new one?",
tbot.OptReplyKeyboardMarkup(buttons))
return
}
if pet.Name != "" && pet.Emoji != "" {
f(u)
return
}
defer app.petStore.Set(m.Chat.ID, pet)
if pet.AskType {
switch m.Text {
case Chicken.String():
pet.Emoji = Chicken.Emoji
case Penguin.String():
pet.Emoji = Penguin.Emoji
case Dog.String():
pet.Emoji = Dog.Emoji
case Monkey.String():
pet.Emoji = Monkey.Emoji
case Fox.String():
pet.Emoji = Fox.Emoji
case Panda.String():
pet.Emoji = Panda.Emoji
case Pig.String():
pet.Emoji = Pig.Emoji
case Rabbit.String():
pet.Emoji = Rabbit.Emoji
case Mouse.String():
pet.Emoji = Mouse.Emoji
default:
app.client.SendMessage(m.Chat.ID, fmt.Sprintf("Wrong pet type %s", m.Text))
}
pet.AskType = false
}
if pet.AskName {
pet.Name = m.Text
pet.AskName = false
pet.Born = time.Now()
pet.Alive = true
app.petStore.Set(pet.PlayerID, pet)
app.rootHandler(m)
}
if pet.Emoji == "" {
pet.AskType = true
pets := tbot.Buttons([][]string{
{Chicken.String(), Penguin.String(), Dog.String()},
{Monkey.String(), Fox.String(), Panda.String()},
{Pig.String(), Rabbit.String(), Mouse.String()},
})
pets.OneTimeKeyboard = true
app.client.SendMessage(m.Chat.ID, "Choose your pet:",
tbot.OptReplyKeyboardMarkup(pets))
return
}
if pet.Name == "" {
pet.AskName = true
app.client.SendMessage(m.Chat.ID, "Name your pet:")
return
}
}
}
|
package knowledge
import "github.com/clems4ever/go-graphkb/internal/query"
// QueryWhereVisitor a visitor for the where clauses
type QueryWhereVisitor struct {
ExpressionVisitorBase
Variables []string
queryGraph *QueryGraph
}
// NewQueryWhereVisitor create an instance of query where visitor.
func NewQueryWhereVisitor(queryGraph *QueryGraph) *QueryWhereVisitor {
return &QueryWhereVisitor{
queryGraph: queryGraph,
}
}
// ParseExpression return whether the expression require aggregation
func (qwv *QueryWhereVisitor) ParseExpression(q *query.QueryExpression) (string, error) {
expression, err := NewExpressionBuilder(qwv.queryGraph).Build(q)
if err != nil {
return "", err
}
return expression, nil
}
// OnVariable handler called when a variable is visited in the where clause
func (qwv *QueryWhereVisitor) OnVariable(name string) error {
qwv.Variables = append(qwv.Variables, name)
return nil
}
|
package main
import (
"fmt"
)
func main() {
Mustf(3)
}
// Must前缀表示调用者不能接收不合法输入
func Mustf(x int) {
fmt.Printf("f(%d) \n", x+0/x) // panics if x == 0
defer fmt.Printf("defer %d\n", x)
Mustf(x - 1)
}
|
package matchmaker
import (
"encoding/json"
"log"
"net/http"
"github.com/garyburd/redigo/redis"
"github.com/gorilla/mux"
"github.com/pkg/errors"
predis "github.com/ryank90/matchmaker-sample/pkg/redis"
)
const version string = "alpha-0.0.1"
// Server is the http server instance.
type Server struct {
srv *http.Server
p *redis.Pool
sessAddr string
}
type handler func(*Server, http.ResponseWriter, *http.Request) error
// NewServer returns the HTTP server instance.
func NewServer(hostAddr, redisAddr string, sessionAddr string) *Server {
s := &Server{
p: predis.NewPool(redisAddr),
sessAddr: sessionAddr,
}
r := s.routes()
s.srv = &http.Server{
Handler: r,
Addr: hostAddr,
}
log.Printf("[log][server] connecting to server: %v on port: %v", version, hostAddr)
log.Printf("[log][server] connecting to redis: %v", redisAddr)
log.Printf("[log][server] connecting to sessions: %v", sessionAddr)
return s
}
// Start initialises the server.
func (s *Server) Start() error {
err := predis.WaitForConnection(s.p)
if err != nil {
return errors.Wrap(err, "could not connect to redis")
}
return errors.Wrap(s.srv.ListenAndServe(), "error starting the server")
}
func (s *Server) routes() http.Handler {
r := mux.NewRouter()
r.HandleFunc("/healthz", predis.NewReadinessProbe(s.p))
r.HandleFunc("/game", s.middleware(gameHandler)).Methods("POST")
r.HandleFunc("/game/{id}", s.middleware(getHandler)).Methods("GET")
return r
}
func (s *Server) middleware(h handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
err := h(s, w, r)
if err != nil {
log.Printf("[error][server] %+v", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
}
func gameHandler(s *Server, w http.ResponseWriter, r *http.Request) error {
c := s.p.Get()
defer c.Close()
log.Print("[info][route] match to a game")
g, err := removeOpenGame(c)
if err != nil {
if err != errors.New("game not found") {
return err
}
g = NewGame()
err := addOpenGame(c, g)
if err != nil {
return err
}
w.WriteHeader(http.StatusCreated)
} else {
g, err = s.generateSessionForGame(c, g)
if err != nil {
return err
}
err := updateGame(c, g)
if err != nil {
return err
}
}
return errors.Wrap(json.NewEncoder(w).Encode(g), "error encoding to json")
}
func getHandler(s *Server, w http.ResponseWriter, r *http.Request) error {
c := s.p.Get()
defer c.Close()
vars := mux.Vars(r)
id := vars["id"]
log.Printf("[info][route] retrieving game: %v", id)
g, err := getGame(c, "")
if err != nil {
return err
}
return errors.Wrap(json.NewEncoder(w).Encode(g), "error encoding game to json")
}
|
package assert
type TestInterface interface {
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
|
package main
import (
"crypto/rsa"
"crypto/rand"
"crypto/md5"
"fmt"
"encoding/base64"
)
func crypt() {
//创建私钥
priv, _ := rsa.GenerateKey(rand.Reader, 1024)
//创建公钥
pub := priv.PublicKey
org := []byte("hello jason")
cipherTxt, _ := rsa.EncryptOAEP(md5.New(), rand.Reader, &pub, org, nil)
fmt.Println("密文为:", base64.StdEncoding.EncodeToString(cipherTxt))
plainTxt, _ := rsa.DecryptOAEP(md5.New(), rand.Reader, priv, cipherTxt, nil)
fmt.Println("明文为:", string(plainTxt))
}
func main() {
crypt()
}
|
package spa
import (
"path"
"strings"
"github.com/labstack/echo/v4"
echomiddleware "github.com/labstack/echo/v4/middleware"
)
// DefaultIndexFilename the filename used as the default index
const DefaultIndexFilename = "index.html"
// IndexConfig defines the config for the middleware which determines the path to load
// the SPA index file
type IndexConfig struct {
// Skipper defines a function to skip middleware
echomiddleware.Skipper
// This is required to support redirects and branch builds
DomainName string
// This can enabled to support serving of branch builds from a folder in a static files store or route
SubDomainMode bool
// The name of the file used as the index, defaults to index.html
IndexFilename string
}
// IndexWithConfig configure the index middleware
func IndexWithConfig(cfg IndexConfig) echo.MiddlewareFunc {
if cfg.Skipper == nil {
cfg.Skipper = echomiddleware.DefaultSkipper
}
if cfg.IndexFilename == "" {
cfg.IndexFilename = DefaultIndexFilename
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
if cfg.Skipper(c) {
return next(c)
}
var pathPrefix string
p := c.Request().URL.Path
u := c.Request().URL
if cfg.SubDomainMode {
// if we use host it can include the port, hostname:port, whereas hostname is just the hostname
pathPrefix = extractPathPrefix(cfg.DomainName, c.Request().URL.Hostname())
}
// does the path end in slash?
if strings.HasSuffix(p, "/") {
u.Path = path.Join("/", pathPrefix, cfg.IndexFilename)
} else {
u.Path = path.Join("/", pathPrefix, p)
}
return next(c)
}
}
}
func extractPathPrefix(domainName, host string) string {
if host == domainName {
return ""
}
subdomain := strings.TrimSuffix(host, domainName)
if !strings.HasSuffix(subdomain, ".") {
return ""
}
return strings.TrimSuffix(subdomain, ".")
}
|
package bean
import (
"github.com/astaxie/beego/orm"
"fmt"
"time"
)
type AchievementAttr struct {
AchieveName string `orm:"pk;column(achieveName)"` // 成就属性名称 + 角色编号
AchieveValue int32 `orm:"column(achieveValue)"` // 成就属性值
}
type AchievementUnLock struct {
AchieveName string `orm:"pk;column(achieveName)"` // 成就名称 + 角色编号
Date string `orm:"column(data)"` // 成就解锁日期字符串
}
func (self *AchievementAttr) TableName() string {
return "achievement"
}
// ---------------------------------------------------------------------------
func GetAchievement(uid uint32, name string) int32 {
achieveKey := fmt.Sprintf("%s%d", name, uid);
var achievement = AchievementAttr{AchieveName:achieveKey}
err := defaultOrm.Read(&achievement)
if err == nil{
return achievement.AchieveValue
}else if(err == orm.ErrNoRows){
return 0
}else{
return 0
}
}
func SetAchievement(uid uint32, name string, value int32) {
achieveKey := fmt.Sprintf("%s%d", name, uid);
var achievement = AchievementAttr{AchieveName:achieveKey}
err := defaultOrm.Read(&achievement)
if err == nil{
if achievement.AchieveValue < value{
achievement.AchieveValue = value;
defaultOrm.Update(&achievement);
}
}else if(err == orm.ErrNoRows){
achievement.AchieveValue = value;
defaultOrm.Insert(&achievement);
}else{
}
}
func UnLockAchievement(uid uint32, name string) string{
achieveKey := fmt.Sprintf("%s%d", name, uid);
timestamp := time.Now().Unix()
tm := time.Unix(timestamp, 0);
dateString := tm.Format("2006/01/02")
var achievement = AchievementUnLock{AchieveName:achieveKey, Date:dateString}
defaultOrm.InsertOrUpdate(&achievement)
return dateString
}
func GetUnLockAchieveDate(uid uint32, name string) string {
achieveKey := fmt.Sprintf("%s%d", name, uid);
var achievement = AchievementUnLock{AchieveName:achieveKey}
err := defaultOrm.Read(&achievement)
if err == nil{
return achievement.Date
}else if(err == orm.ErrNoRows){
return ""
}else{
return ""
}
}
|
package docs_test
import (
"path/filepath"
"runtime"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/werf/werf/integration/pkg/utils"
)
var _ = Describe("docs", func() {
BeforeEach(func() {
if runtime.GOOS == "windows" {
Skip("skip on windows")
}
resolvedExpectationPath, err := filepath.EvalSymlinks(utils.FixturePath("cli", "docs"))
Ω(err).ShouldNot(HaveOccurred())
utils.CopyIn(resolvedExpectationPath, filepath.Join(SuiteData.TestDirPath, "docs"))
utils.RunSucceedCommand(
SuiteData.TestDirPath,
"git",
"init",
)
utils.RunSucceedCommand(
SuiteData.TestDirPath,
"git",
"add", "-A",
)
utils.RunSucceedCommand(
SuiteData.TestDirPath,
"git",
"commit", "-m", "+",
)
SuiteData.Stubs.UnsetEnv("DOCKER_CONFIG")
SuiteData.Stubs.UnsetEnv("WERF_DOCKER_CONFIG")
SuiteData.Stubs.SetEnv("WERF_LOG_TERMINAL_WIDTH", "100")
})
It("should be without changes", func() {
_, _ = utils.RunCommandWithOptions(
SuiteData.TestDirPath,
SuiteData.WerfBinPath,
[]string{"docs", "--dir", SuiteData.TestDirPath},
utils.RunCommandOptions{ShouldSucceed: true, ExtraEnv: []string{"HOME=~", "WERF_PROJECT_NAME="}},
)
utils.RunSucceedCommand(
SuiteData.TestDirPath,
"git",
"add", "-A",
)
utils.RunSucceedCommand(
SuiteData.TestDirPath,
"git",
"diff", "--exit-code", "HEAD", "--",
)
})
})
|
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package finalizer
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/types"
)
var _ = Describe("Util", func() {
WithMatchingElements := func(expected interface{}) TableEntry {
return Entry(
"with matching elements",
[]string{"foo", "bar", "foo", "baz"}, "foo", expected,
)
}
WithoutMatchingElements := func(expected interface{}) TableEntry {
return Entry(
"without matching elements",
[]string{"foo", "bar"}, "baz", expected,
)
}
WithEmptySlice := func(expected interface{}) TableEntry {
return Entry(
"with empty slice",
[]string{}, "foo", expected,
)
}
WithNilSlice := func(expected interface{}) TableEntry {
return Entry(
"with nil slice",
[]string(nil), "foo", expected,
)
}
Describe("sliceContains", func() {
DescribeTable("checking if element is in slice",
func(slice []string, element string, match types.GomegaMatcher) {
Expect(sliceContains(slice, element)).To(match)
},
WithMatchingElements(BeTrue()),
WithoutMatchingElements(BeFalse()),
WithEmptySlice(BeFalse()),
WithNilSlice(BeFalse()),
)
})
Describe("sliceRemove", func() {
DescribeTable("removing an element from slice",
func(slice []string, element string, expected []string) {
Expect(sliceRemove(slice, element)).To(Equal(expected))
},
WithMatchingElements([]string{"bar", "baz"}),
WithoutMatchingElements([]string{"foo", "bar"}),
WithEmptySlice([]string(nil)),
WithNilSlice([]string(nil)),
)
})
})
|
package ircserver
import "gopkg.in/sorcix/irc.v2"
func init() {
Commands["server_KICK"] = &ircCommand{
Func: (*IRCServer).cmdServerKick,
MinParams: 2,
}
}
func (i *IRCServer) cmdServerKick(s *Session, reply *Replyctx, msg *irc.Message) {
// e.g. “:ChanServ KICK #noname-ev blArgh_ :get out”
channelname := msg.Params[0]
c, ok := i.channels[ChanToLower(channelname)]
if !ok {
i.sendServices(reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.ERR_NOSUCHCHANNEL,
Params: []string{msg.Prefix.Name, channelname, "No such nick/channel"},
})
return
}
if _, ok := c.nicks[NickToLower(msg.Params[1])]; !ok {
i.sendServices(reply, &irc.Message{
Prefix: i.ServerPrefix,
Command: irc.ERR_USERNOTINCHANNEL,
Params: []string{msg.Prefix.Name, msg.Params[1], channelname, "They aren't on that channel"},
})
return
}
// Must exist since c.nicks contains the nick.
session, _ := i.nicks[NickToLower(msg.Params[1])]
i.sendServices(reply, i.sendChannel(c, reply, &irc.Message{
Prefix: &irc.Prefix{
Name: msg.Prefix.Name,
User: "services",
Host: "services",
},
Command: irc.KICK,
Params: []string{msg.Params[0], msg.Params[1], msg.Trailing()},
}))
// TODO(secure): reduce code duplication with cmdPart()
delete(c.nicks, NickToLower(msg.Params[1]))
i.maybeDeleteChannelLocked(c)
delete(session.Channels, ChanToLower(channelname))
}
|
/*
Given a string, return a new string where "not " has been added to the front. However, if the string already begins with "not", return the string unchanged.
*/
package main
import (
"fmt"
"strings"
)
func not_string(s string) string {
if ! strings.HasPrefix(s, "not") {
return "not " + s
}
return s
}
func main(){
var status int = 0
if not_string("candy") == "not candy"{
status += 1
}
if not_string("x") == "not x" {
status += 1
}
if not_string("not bad") == "not bad" {
status += 1
}
if not_string("notnot") == "notnot" {
status += 1
}
if status == 3 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
package cli
import (
"testing"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
"github.com/cosmos/cosmos-sdk/testutil/testdata"
"github.com/stretchr/testify/assert"
"github.com/cosmos/cosmos-sdk/client"
sdk "github.com/cosmos/cosmos-sdk/types"
)
func Test_splitAndCall_NoMessages(t *testing.T) {
clientCtx := client.Context{}
err := newSplitAndApply(nil, clientCtx, nil, 10)
assert.NoError(t, err, "")
}
func Test_splitAndCall_Splitting(t *testing.T) {
clientCtx := client.Context{}
addr := sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address())
// Add five messages
msgs := []sdk.Msg{
testdata.NewTestMsg(addr),
testdata.NewTestMsg(addr),
testdata.NewTestMsg(addr),
testdata.NewTestMsg(addr),
testdata.NewTestMsg(addr),
}
// Keep track of number of calls
const chunkSize = 2
callCount := 0
err := newSplitAndApply(
func(clientCtx client.Context, msgs ...sdk.Msg) error {
callCount++
assert.NotNil(t, clientCtx)
assert.NotNil(t, msgs)
if callCount < 3 {
assert.Equal(t, len(msgs), 2)
} else {
assert.Equal(t, len(msgs), 1)
}
return nil
},
clientCtx, msgs, chunkSize)
assert.NoError(t, err, "")
assert.Equal(t, 3, callCount)
}
|
// Package engine provides the all-encompassing interface to the Orbit
// background operations. This includes replicated state management, gossip
// control, and ensuring that the state is maintained for the respective nodes.
package engine
import (
"log"
"os"
"path/filepath"
)
// Engine is the primary all-encompassing struct for the primary Orbit
// operations. This means that all of the top-level features such as the
// replicated state store and REST API are located here.
type Engine struct {
APIServer *APIServer
RPCServer *RPCServer
Watcher *Watcher
Store *Store
Status Status
DataPath string
ConfigFile string
}
// New creates a new instance of the engine.
func New() *Engine {
e := &Engine{
Status: StatusInit,
DataPath: "/var/orbit",
ConfigFile: "config.json",
}
e.Store = NewStore(e)
e.APIServer = NewAPIServer(e)
e.RPCServer = NewRPCServer(e)
e.Watcher = NewWatcher(e)
return e
}
// Status is an enum about the current state of the engine.
type Status uint8
const (
// StatusInit is the first opening state of the engine and means that the config has
// not yet been loaded.
StatusInit Status = iota
// StatusSetup is when the engine has not yet been bootstrapped.
StatusSetup
// StatusReady is when the engine has been successfully bootstrapped, but
// before it has been fully configured with a domain name or user.
StatusReady
// StatusRunning is when the store has been successfully bootstrapped and a
// user has set themselves up fully.
StatusRunning
)
func (s Status) String() string {
switch s {
case StatusInit:
return "init"
case StatusSetup:
return "setup"
case StatusReady:
return "ready"
case StatusRunning:
return "running"
default:
return ""
}
}
// SetupStatus is a string representation of the stage and mode.
func (e *Engine) SetupStatus() (mode, stage string) {
// If the engine is in running mode, then there is nothing to do.
if e.Status == StatusRunning {
return "complete", "complete"
}
// If the engine is in setup mode, then there is nothing to do.
if e.Status == StatusSetup {
return "bootstrap", "welcome"
}
// If the engine is not ready, don't do anything. It's only if the engine is
// ready that all of the following conditions apply about the setup location.
if e.Status != StatusReady {
return
}
// If there is only one node that the cluster is aware of, it means that this
// node must be the one responsible for establishing the cluster. Otherwise,
// it means that this node must be joining the cluster, which means that
// because the engine is not running, they must be in the node configuration
// stage.
if len(e.Store.state.Nodes) > 1 {
return "join", "node"
}
// If there are no routers, that means that we must need to set up the domain
// that is used for routing all Orbit traffic.
if len(e.Store.state.Routers) == 0 {
return "bootstrap", "domain"
}
if len(e.Store.state.Users) == 0 {
return "bootstrap", "user"
}
// If the single node that is in the cluster does not have any roles, we can
// assume that this hasn't yet been configured and so this is the final stage
// of the system.
if !e.Store.state.Nodes[0].HasRole(RoleManager) {
return "bootstrap", "node"
}
// Otherwise, the store state must be complete.
return "bootstrap", "complete"
}
// Start starts the engine and all of its subcomponents. This is dependent on
// state, so for example if the cluster still has yet to be set up, then it
// won't start the store.
func (e *Engine) Start() error {
log.Println("[INFO] engine: Starting...")
errCh := make(chan error) // Main error channel closure
// Ensure that required directories exist. This also involves creating a blank
// directory for the root directory just for the sake of completion.
dirs := []string{"", "raft"}
for _, dir := range dirs {
path := filepath.Join(e.DataPath, dir)
_, err := os.Stat(path)
if !os.IsNotExist(err) {
continue
}
log.Printf("[INFO] engine: Creating new directory %s", path)
os.MkdirAll(path, 0644)
}
// Read in the config
if err := e.readConfig(); err != nil {
return err
}
// Start the API server.
go func() { errCh <- e.APIServer.Start() }()
// If the engine is ready, start the RPC server and the store.
if e.Status >= StatusReady {
go func() { errCh <- e.RPCServer.Start() }()
go func() { errCh <- e.Store.Open() }()
}
// Monitor started progress on each component.
go func() {
<-e.APIServer.Started()
if e.Status >= StatusReady {
<-e.Store.Started()
<-e.RPCServer.Started()
}
log.Println("[INFO] engine: Started")
}()
// Start the engine watcher process to ensure the node updates its state
// correctly. This does not need to be monitored.
go e.Watcher.Start()
return <-errCh
}
// Stop will stop the operation of the engine instance.
func (e *Engine) Stop() error {
log.Println("[INFO] engine: Stopping...")
log.Println("[INFO] engine: Stopped")
return nil
}
// Reset will reset all engine properties and wipe all local data files.
func (e *Engine) Reset() error {
return nil
}
|
/*
Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package tests - fake classes for api testing.
*/
package tests
// FakeClient - fake clent for tests
type FakeClient struct {
// get call
GetURL string
GetType string
GetResponse []byte
GetError error
// delete call
DeleteURL string
DeleteType string
DeleteData []byte
DeleteResponse []byte
DeleteError error
// post call
PostURL string
PostType string
PostData []byte
PostResponse []byte
PostError error
// put call
PutURL string
PutType string
PutData []byte
PutResponse []byte
PutError error
// debug
DebugState bool
}
// Get - mimic to real get
func (cl *FakeClient) Get(url, acceptedContentType string) ([]byte, error) {
cl.GetURL = url
cl.GetType = acceptedContentType
return cl.GetResponse, cl.GetError
}
// Delete - mimic to real delete
func (cl *FakeClient) Delete(url, providedContentType string, data []byte) ([]byte, error) {
cl.DeleteURL = url
cl.DeleteType = providedContentType
cl.DeleteData = data
return cl.DeleteResponse, cl.DeleteError
}
// Post - mimic to real post
func (cl *FakeClient) Post(url, providedContentType string, data []byte) ([]byte, error) {
cl.PostURL = url
cl.PostType = providedContentType
cl.PostData = data
return cl.PostResponse, cl.PostError
}
// Put - mimic to real put
func (cl *FakeClient) Put(url, providedContentType string, data []byte) ([]byte, error) {
cl.PutURL = url
cl.PutType = providedContentType
cl.PutData = data
return cl.PutResponse, cl.PutError
}
// SetDebug - mimic to real set debug
func (cl *FakeClient) SetDebug(state bool) {
cl.DebugState = state
}
// GetDebug - mimic to real get debug
func (cl *FakeClient) GetDebug() bool {
return cl.DebugState
}
|
package mongomodel
import (
"time"
)
type ShutdownModel struct {
View *DailyShutDownView
Typemap map[int]int
}
func NewShutdownModel(date time.Time) *ShutdownModel {
model := ShutdownModel{
View: newDailyShutDownView(date),
Typemap: make(map[int]int),
}
model.Typemap[0] = 0
model.Typemap[10] = 1
model.Typemap[20] = 2
model.Typemap[30] = 3
model.Typemap[40] = 4
model.Typemap[50] = 5
model.Typemap[60] = 6
model.Typemap[70] = 7
model.Typemap[80] = 8
model.Typemap[90] = 9
model.Typemap[100] = 10
model.Typemap[110] = 11
model.Typemap[120] = 12
model.Typemap[130] = 13
model.Typemap[140] = 14
return &model
}
type DailyShutDownView struct {
Date time.Time
ViewData []item
}
func newDailyShutDownView(date time.Time) *DailyShutDownView {
view := DailyShutDownView{
Date: date,
}
view.ViewData = append(view.ViewData,
item{Key: "Unknown", Value: 0},
item{Key: "StartButtonPressHold", Value: 0},
item{Key: "ResetAp", Value: 0},
item{Key: "BatteryVCT", Value: 0},
item{Key: "UpdateFirmware", Value: 0},
item{Key: "BatteryForceShutdown", Value: 0},
item{Key: "ScheduledReset", Value: 0},
item{Key: "SleepTimeout", Value: 0},
item{Key: "BatteryFault", Value: 0},
item{Key: "PrepareShutdownTimeout", Value: 0},
item{Key: "DockDisconnectedAccident", Value: 0},
item{Key: "McuProxyUnableToStart", Value: 0},
item{Key: "DockDisconnectedPower", Value: 0},
item{Key: "MTShutdownTimeout", Value: 0},
item{Key: "MidnightResetTimeout", Value: 0})
return &view
}
|
package structs
import "time"
type BaseStruct struct {
Id int64 `xorm:"pk autoincr"`
CreateTime time.Time `xorm:"created"`
CreateUser int64
UpdateTime time.Time `xorm:"updated"`
UpdateUser int64
DeletedTime time.Time `xorm:"deleted"`
Status int64 `xorm:"default 1"`
} |
package btree
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
func zigzagLevelOrder(root *TreeNode) [][]int {
var levelOrder [][]int
if root == nil {
return levelOrder
}
var queue []*TreeNode
queue = append(queue, root)
count := 0
for len(queue) > 0 {
count++
var level []int
size := len(queue)
for i := 0; i < size; i++ {
node := queue[0]
queue = queue[1:]
level = append(level, node.Val)
if node.Left != nil {
queue = append(queue, node.Left)
}
if node.Right != nil {
queue = append(queue, node.Right)
}
}
if count%2 == 0 {
n := len(level)
for i := 0; i < n/2; i++ {
level[i], level[n-1-i] = level[n-1-i], level[i]
}
}
levelOrder = append(levelOrder, level)
}
return levelOrder
}
|
// Copyright 2020 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mockstore
import (
"github.com/pingcap/errors"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore/mockcopr"
"github.com/pingcap/tidb/store/mockstore/mockstorage"
"github.com/tikv/client-go/v2/testutils"
"github.com/tikv/client-go/v2/tikv"
)
// newMockTikvStore creates a mocked tikv store, the path is the file path to store the data.
// If path is an empty string, a memory storage will be created.
func newMockTikvStore(opt *mockOptions) (kv.Storage, error) {
client, cluster, pdClient, err := testutils.NewMockTiKV(opt.path, mockcopr.NewCoprRPCHandler())
if err != nil {
return nil, errors.Trace(err)
}
opt.clusterInspector(cluster)
kvstore, err := tikv.NewTestTiKVStore(newClientRedirector(client), pdClient, opt.clientHijacker, opt.pdClientHijacker, opt.txnLocalLatches)
if err != nil {
return nil, err
}
return mockstorage.NewMockStorage(kvstore)
}
|
package main
import (
"fmt"
"log"
"os"
"runtime"
"sync"
"gSSHToInnobackupex/gotossh"
"github.com/WangJiemin/gocomm"
)
var (
sshUser, sshPass, sshHost, sshPort string
xtrabackupUser, xtrabackupPass, xtrabackupHost, xtrabackupPort string
xtrabackupConfig, xtrabackupTmpDir, xtrabackupBackupDir, xtrabackupCommand string
wg sync.WaitGroup
)
func checkErr(err error) {
if err != nil {
log.Fatal(err)
}
}
func initialize(config map[string]string) {
sshUser = config["sshuser"]
sshPass = config["sshpass"]
sshHost = config["sshhost"]
sshPort = config["sshport"]
xtrabackupUser = config["xtrabackupuser"]
xtrabackupPass = config["xtrabackuppass"]
xtrabackupHost = config["xtrabackuphost"]
xtrabackupPort = config["xtrabackupport"]
xtrabackupConfig = config["xtrabackupconfig"]
xtrabackupTmpDir = config["xtrabackuptmpdir"]
xtrabackupBackupDir = config["xtrabackupbackupdir"]
xtrabackupCommand = config["xtrabackupcommand"]
}
func RunSSH() {
sshExceSession := gotossh.New(sshHost, sshUser, sshPass, sshPort)
defer sshExceSession.Close()
innobackupexCommand := fmt.Sprintf("nohup %s --defaults-file=%s --user=%s --password=%s --host=%s --port=%s"+
" --stream=xbstream --encrypt-threads=6 --compress --compress-threads=8 --tmpdir=%s"+
" %s &", xtrabackupCommand, xtrabackupConfig, xtrabackupUser, xtrabackupPass, xtrabackupHost, xtrabackupPort, xtrabackupTmpDir, xtrabackupBackupDir)
fmt.Println(innobackupexCommand)
exceErr := sshExceSession.Start(innobackupexCommand)
checkErr(exceErr)
wg.Done()
}
func RunSSHTerminal() {
sshExceSession := gotossh.New(sshHost, sshUser, sshPass, sshPort)
sshExceSession.RunTerminal("top", os.Stdout, os.Stdin)
wg.Done()
}
func main() {
conf := gocomm.ReadConfig("./conf/app.cnf")
initialize(conf)
runtime.GOMAXPROCS()
//wg.Add(1)
//go RunSSH()
//wg.Wait()
wg.Add(1)
go RunSSHTerminal()
wg.Wait()
}
|
package jwt
import (
"time"
"github.com/dgrijalva/jwt-go"
)
const (
IDKey = "id" // 用户唯一标识
ExpireKey = "expire" // 过期时间
SignTSKey = "sign_ts" // token签发时间
)
// JwtSigner 签名结构
type JwtSigner struct {
// signing algorithm - possible values are HS256, HS384, HS512
// Optional, default is HS256.
SignAlgorithm string
Key []byte
Timeout time.Duration
MaxRefresh time.Duration // 最大刷新有效期
PayloadFunc func(data interface{}) jwt.MapClaims
}
// JwtSigner 初始化
func (j *JwtSigner) Init() error {
switch j.SignAlgorithm {
case "HS256", "HS384", "HS512":
default:
j.SignAlgorithm = "HS256"
}
if j.Timeout == 0 {
j.Timeout = time.Hour
}
if len(j.Key) == 0 {
return ErrMissingSecretKey
}
if j.PayloadFunc == nil {
j.PayloadFunc = func(data interface{}) jwt.MapClaims {
return jwt.MapClaims{
IDKey: data,
}
}
}
return nil
}
// 生成Token
func (j *JwtSigner) Gen(data interface{}) (string, error) {
token := jwt.New(jwt.GetSigningMethod(j.SignAlgorithm))
claims := token.Claims.(jwt.MapClaims)
if j.PayloadFunc != nil {
for key, value := range j.PayloadFunc(data) {
claims[key] = value
}
}
now := time.Now()
expire := now.Add(j.Timeout)
claims[ExpireKey] = expire.Unix()
claims[SignTSKey] = now.Unix()
return token.SignedString(j.Key)
}
// 解析认证Token
// 未过期的Token & 过期可以Refresh的Token 返回claims
// 过期Token返回error
func (j *JwtSigner) Verification(token string) (jwt.MapClaims, bool, error) {
claims, isUpdate, err := j.checkIfTokenExpire(token)
if err != nil {
return nil, false, err
}
m := make(jwt.MapClaims)
for k, v := range claims {
m[k] = v
}
return m, isUpdate, nil
}
// 更新token,重新设置一些过期时间
func (j *JwtSigner) Refresh(claims jwt.MapClaims) (string, error) {
// Create the token
newToken := jwt.New(jwt.GetSigningMethod(j.SignAlgorithm))
newClaims := newToken.Claims.(jwt.MapClaims)
for key := range claims {
newClaims[key] = claims[key]
}
now := time.Now()
expire := now.Add(j.Timeout)
newClaims[ExpireKey] = expire.Unix()
newClaims[SignTSKey] = now.Unix()
return newToken.SignedString(j.Key)
}
// token存在三种状态:未过期、过期、过期但可以Refresh
// err != nil :过去
// err != nil && true :过期可以refresh
func (j *JwtSigner) checkIfTokenExpire(t string) (jwt.MapClaims, bool, error) {
token, err := j.parseToken(t)
if err != nil {
return nil, false, err
}
claims := token.Claims.(jwt.MapClaims)
expTS := int64(claims[ExpireKey].(float64))
if expTS > time.Now().Unix() {
return claims, false, nil
}
signTS := int64(claims[SignTSKey].(float64))
if signTS > time.Now().Add(-(j.MaxRefresh + j.Timeout)).Unix() {
return claims, true, nil
}
return nil, false, ErrExpiredToken
}
// parseToken : 将 string 解析成 jwt.Token
func (j *JwtSigner) parseToken(token string) (*jwt.Token, error) {
return jwt.Parse(token, func(t *jwt.Token) (interface{}, error) {
if jwt.GetSigningMethod(j.SignAlgorithm) != t.Method {
return nil, ErrInvalidSigningAlgorithm
}
return j.Key, nil
})
}
|
package operatorlister
import (
"fmt"
"sync"
"k8s.io/apimachinery/pkg/labels"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
aregv1 "k8s.io/kube-aggregator/pkg/client/listers/apiregistration/v1"
)
// UnionAPIServiceLister is a custom implementation of an APIService lister that allows a new
// Lister to be registered on the fly
type UnionAPIServiceLister struct {
apiServiceLister aregv1.APIServiceLister
apiServiceLock sync.RWMutex
}
// List lists all APIServices in the indexer.
func (ual *UnionAPIServiceLister) List(selector labels.Selector) (ret []*v1.APIService, err error) {
ual.apiServiceLock.RLock()
defer ual.apiServiceLock.RUnlock()
if ual.apiServiceLister == nil {
return nil, fmt.Errorf("no apiService lister registered")
}
return ual.apiServiceLister.List(selector)
}
// Get retrieves the APIService with the given name
func (ual *UnionAPIServiceLister) Get(name string) (*v1.APIService, error) {
ual.apiServiceLock.RLock()
defer ual.apiServiceLock.RUnlock()
if ual.apiServiceLister == nil {
return nil, fmt.Errorf("no apiService lister registered")
}
return ual.apiServiceLister.Get(name)
}
// RegisterAPIServiceLister registers a new APIServiceLister
func (ual *UnionAPIServiceLister) RegisterAPIServiceLister(lister aregv1.APIServiceLister) {
ual.apiServiceLock.Lock()
defer ual.apiServiceLock.Unlock()
ual.apiServiceLister = lister
}
func (l *apiRegistrationV1Lister) RegisterAPIServiceLister(lister aregv1.APIServiceLister) {
l.apiServiceLister.RegisterAPIServiceLister(lister)
}
func (l *apiRegistrationV1Lister) APIServiceLister() aregv1.APIServiceLister {
return l.apiServiceLister
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/aws/aws-lambda-go/events"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-sdk-go/service/kms"
"github.com/aws/aws-sdk-go/service/kms/kmsiface"
"github.com/stretchr/testify/assert"
"net/http"
"os"
"testing"
)
const (
TEST_EXISTING_EMAIL = "orest@test.com"
TEST_NOT_EXISTING_EMAIL = "test@test.com"
TEST_PASSWORD = "password"
)
type MockDynamoDB struct {
dynamodbiface.DynamoDBAPI
}
type MockKMS struct {
kmsiface.KMSAPI
}
// if we pass existing email we get the result
// else if we pass not existing email we get empty result
// else we return db error
func (mockDynamoDb *MockDynamoDB) Query(input *dynamodb.QueryInput) (*dynamodb.QueryOutput, error) {
email := *input.KeyConditions["email"].AttributeValueList[0].S
if email == TEST_EXISTING_EMAIL {
count := int64(1)
return &dynamodb.QueryOutput{
Items: []map[string]*dynamodb.AttributeValue{
{
"email": {
S: aws.String(TEST_EXISTING_EMAIL),
},
"password": {
S: aws.String(TEST_PASSWORD),
},
},
},
Count: &count,
}, nil
} else if email == TEST_NOT_EXISTING_EMAIL {
count := int64(0)
return &dynamodb.QueryOutput{
Count: &count,
}, nil
} else {
return nil, fmt.Errorf("test error")
}
}
func (mockDynamoDb *MockDynamoDB) PutItem(input *dynamodb.PutItemInput) (*dynamodb.PutItemOutput, error) {
email := *input.Item["email"].S
if email == TEST_NOT_EXISTING_EMAIL {
return &dynamodb.PutItemOutput{}, nil
} else {
return nil, fmt.Errorf("error occurred while saving to db")
}
}
func (mockKms *MockKMS) Encrypt(input *kms.EncryptInput) (*kms.EncryptOutput, error) {
return &kms.EncryptOutput{
CiphertextBlob: input.Plaintext,
}, nil
}
func init() {
ddbClient = &MockDynamoDB{}
kmsClient = &MockKMS{}
os.Setenv(REGION, "us-west-2")
os.Setenv(TABLE_NAME, "test table")
}
func TestCheckIfAccountInNew(t *testing.T) {
isNew, _ := checkIfAccountInNew(TEST_NOT_EXISTING_EMAIL)
assert.True(t, isNew)
}
func TestCheckIfAccountIsNotNew(t *testing.T) {
isNew, response := checkIfAccountInNew(TEST_EXISTING_EMAIL)
assert.False(t, isNew)
assert.Equal(t, http.StatusConflict, response.StatusCode)
}
func TestEncryptPassword(t *testing.T) {
encryptedPassword, _ := encryptPassword(TEST_PASSWORD)
assert.Equal(t, TEST_PASSWORD, string(encryptedPassword))
}
func TestCreateAccountInDb(t *testing.T) {
created, _ := createAccountInDb(Account{
Name: "Test account",
Email: TEST_NOT_EXISTING_EMAIL,
Password: TEST_PASSWORD,
})
assert.True(t, created)
}
func TestCreateAccountInDbFailed(t *testing.T) {
created, response := createAccountInDb(Account{
Name: "Test account",
Email: TEST_EXISTING_EMAIL,
Password: TEST_PASSWORD,
})
assert.False(t, created)
assert.Equal(t, http.StatusInternalServerError, response.StatusCode)
}
func TestHandleRequest(t *testing.T) {
body, _ := json.Marshal(Account{
Email: TEST_NOT_EXISTING_EMAIL,
Password: TEST_PASSWORD,
Name: "Test name",
})
request := events.APIGatewayProxyRequest{Body: string(body)}
response, _ := HandleRequest(request)
assert.Equal(t, http.StatusOK, response.StatusCode)
}
|
package main
import (
"fmt"
"llvvlv00.org/zinx/ziface"
"llvvlv00.org/zinx/znet"
)
// 基于zinx框架开发的服务器端应用程序
// ping test 自定义路由
type PingRouter struct {
znet.BaseRouter
}
type HelloZinxRouter struct {
znet.BaseRouter
}
// Test Handle
func (this *PingRouter)Handle(request ziface.IRequest) {
fmt.Println("Call Router Handle")
// 先读取客户端的数据,再回写 ping, ping, ping...
fmt.Println("recv from client: msgID=", request.GetMsgID(),
", data=", string(request.GetData()))
err:=request.GetConnection().SendMsg(200, []byte("ping... ping... ping..."))
if err != nil {
fmt.Println(err)
}
}
func DoConnectionBegin(conn ziface.IConnection) {
fmt.Println("====>DoConnectionBegin is Called ...")
if err := conn.SendMsg(202, []byte("DoConnection BEGIN")); err != nil {
fmt.Println(err)
}
//给当前的链接设置一些属性
fmt.Println("Set Conn Name: Hoe ...")
conn.SetProperty("Name", "llvvlv00-Shawn-davi")
conn.SetProperty("GitHub", "https://github.com/llvvlv00")
conn.SetProperty("Home", "http://www.shawndavi.top")
}
//链接断开之前需要执行的函数
func DoConnectionLost(conn ziface.IConnection) {
fmt.Println("====>DoConnectionLost is Called ...")
fmt.Println(" conn ID = ", conn.GetConnID())
//获取链接属性
if name, err := conn.GetProperty("Name"); err == nil {
fmt.Println("Name =", name)
}
if github, err := conn.GetProperty("GitHub"); err == nil {
fmt.Println("GetHub =", github)
}
if home, err := conn.GetProperty("Home"); err == nil {
fmt.Println("Home =", home)
}
}
// Test Handle
func (this *HelloZinxRouter)Handle(request ziface.IRequest) {
fmt.Println("Call HelloZinxRouter Handle")
// 先读取客户端的数据,再回写 ping, ping, ping...
fmt.Println("recv from client: msgID=", request.GetMsgID(),
", data=", string(request.GetData()))
err:=request.GetConnection().SendMsg(201, []byte("hello zinx router"))
if err != nil {
fmt.Println(err)
}
}
func main() {
//1、创建一个server句柄,使用Zinx的api
s := znet.NewServer("[zinx V0.8]")
//2、 注册链接Hook钩子函数
s.SetOnConnStart(DoConnectionBegin)
s.SetOnConnStop(DoConnectionLost)
//3、增加路由
s.AddRouter(0, &PingRouter{})
s.AddRouter(1, &HelloZinxRouter{})
//4、启动server
s.Serve()
} |
package cidranger
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestInsert(t *testing.T) {
ranger := newBruteRanger().(*bruteRanger)
_, networkIPv4, _ := net.ParseCIDR("0.0.1.0/24")
_, networkIPv6, _ := net.ParseCIDR("8000::/96")
ranger.Insert(*networkIPv4)
ranger.Insert(*networkIPv6)
assert.Equal(t, 1, len(ranger.ipV4Networks))
assert.Equal(t, *networkIPv4, ranger.ipV4Networks["0.0.1.0/24"])
assert.Equal(t, 1, len(ranger.ipV6Networks))
assert.Equal(t, *networkIPv6, ranger.ipV6Networks["8000::/96"])
}
func TestInsertError(t *testing.T) {
bRanger := newBruteRanger().(*bruteRanger)
_, networkIPv4, _ := net.ParseCIDR("0.0.1.0/24")
networkIPv4.IP = append(networkIPv4.IP, byte(4))
err := bRanger.Insert(*networkIPv4)
assert.Equal(t, ErrInvalidNetworkInput, err)
}
func TestRemove(t *testing.T) {
ranger := newBruteRanger().(*bruteRanger)
_, networkIPv4, _ := net.ParseCIDR("0.0.1.0/24")
_, networkIPv6, _ := net.ParseCIDR("8000::/96")
_, notInserted, _ := net.ParseCIDR("8000::/96")
ranger.Insert(*networkIPv4)
deletedIPv4, err := ranger.Remove(*networkIPv4)
assert.NoError(t, err)
ranger.Insert(*networkIPv6)
deletedIPv6, err := ranger.Remove(*networkIPv6)
assert.NoError(t, err)
network, err := ranger.Remove(*notInserted)
assert.NoError(t, err)
assert.Nil(t, network)
assert.Equal(t, networkIPv4, deletedIPv4)
assert.Equal(t, 0, len(ranger.ipV4Networks))
assert.Equal(t, networkIPv6, deletedIPv6)
assert.Equal(t, 0, len(ranger.ipV6Networks))
}
func TestRemoveError(t *testing.T) {
r := newBruteRanger().(*bruteRanger)
_, invalidNetwork, _ := net.ParseCIDR("0.0.1.0/24")
invalidNetwork.IP = append(invalidNetwork.IP, byte(4))
_, err := r.Remove(*invalidNetwork)
assert.Equal(t, ErrInvalidNetworkInput, err)
}
func TestContains(t *testing.T) {
r := newBruteRanger().(*bruteRanger)
_, network, _ := net.ParseCIDR("0.0.1.0/24")
_, network1, _ := net.ParseCIDR("8000::/112")
r.Insert(*network)
r.Insert(*network1)
cases := []struct {
ip net.IP
contains bool
err error
name string
}{
{net.ParseIP("0.0.1.255"), true, nil, "IPv4 should contain"},
{net.ParseIP("0.0.0.255"), false, nil, "IPv4 houldn't contain"},
{net.ParseIP("8000::ffff"), true, nil, "IPv6 shouldn't contain"},
{net.ParseIP("8000::1:ffff"), false, nil, "IPv6 shouldn't contain"},
{append(net.ParseIP("8000::1:ffff"), byte(0)), false, ErrInvalidNetworkInput, "Invalid IP"},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
contains, err := r.Contains(tc.ip)
if tc.err != nil {
assert.Equal(t, tc.err, err)
} else {
assert.NoError(t, err)
assert.Equal(t, tc.contains, contains)
}
})
}
}
func TestContainingNetworks(t *testing.T) {
r := newBruteRanger().(*bruteRanger)
_, network1, _ := net.ParseCIDR("0.0.1.0/24")
_, network2, _ := net.ParseCIDR("0.0.1.0/25")
_, network3, _ := net.ParseCIDR("8000::/112")
_, network4, _ := net.ParseCIDR("8000::/113")
r.Insert(*network1)
r.Insert(*network2)
r.Insert(*network3)
r.Insert(*network4)
cases := []struct {
ip net.IP
containingNetworks []net.IPNet
err error
name string
}{
{net.ParseIP("0.0.1.255"), []net.IPNet{*network1}, nil, "IPv4 should contain"},
{net.ParseIP("0.0.1.127"), []net.IPNet{*network1, *network2}, nil, "IPv4 should contain both"},
{net.ParseIP("0.0.0.127"), []net.IPNet{}, nil, "IPv4 should contain none"},
{net.ParseIP("8000::ffff"), []net.IPNet{*network3}, nil, "IPv6 should constain"},
{net.ParseIP("8000::7fff"), []net.IPNet{*network3, *network4}, nil, "IPv6 should contain both"},
{net.ParseIP("8000::1:7fff"), []net.IPNet{}, nil, "IPv6 should contain none"},
{append(net.ParseIP("8000::1:7fff"), byte(0)), nil, ErrInvalidNetworkInput, "Invalid IP"},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
networks, err := r.ContainingNetworks(tc.ip)
if tc.err != nil {
assert.Equal(t, tc.err, err)
} else {
assert.NoError(t, err)
assert.Equal(t, len(tc.containingNetworks), len(networks))
for _, network := range tc.containingNetworks {
assert.Contains(t, networks, network)
}
}
})
}
}
|
package qprob
// classifyAnal.go
import (
"encoding/json"
"fmt"
iou "io/ioutil"
"qutil"
"sort"
)
const AnalNoClassSpecified = TClassId(-9999)
// Structures to report on results
// and make them easy to analyze
// Some of these are also used
// by the optimizer. Not to be confused
// with ResultForRow which contains
// more detail such as probability for
// membership in each class.
type testRowRes struct {
rowNum int32
actClass TClassId
predClass TClassId
}
type testRes struct {
rows []testRowRes
cntRow int32
cntCorrect int32
percCorrect float32
}
func TestClassifyAnal() {
fmt.Println("Hello World!")
}
/*
// Function Create Summary Results
func (fier *Classifier) createSummaryResults(astr string) *summaryResult {
// NOTE: Some of this code already exists in ClassifyFiles
return nil
}
func (sumRes *summaryResult) ToSimpleRowCSV(fier *Classifier) string {
return ""
}
// function to build statistics by class
// from a given result set.
*/
type AnalResByFeat struct {
FeatNdx int
FeatWeight float32 // assigned by
ColName string
MinNumBuck int16
MaxNumBuck int16
EffMinVal float32
EffMaxVal float32
TotCnt int32
SucCnt int32
Prec float32
TargClass TClassId
TargPrec float32
ByClasses map[TClassId]ResByClass
}
type AnalResults struct {
Cols []AnalResByFeat
TotCnt int32
SucCnt int32
Prec float32
}
func makeAnalResults(numCol int) *AnalResults {
tout := new(AnalResults)
tout.Cols = make([]AnalResByFeat, numCol)
for tndx := 0; tndx < numCol; tndx++ {
tout.Cols[tndx].ByClasses = make(map[TClassId]ResByClass)
tout.Cols[tndx].FeatWeight = 1
tout.Cols[tndx].FeatNdx = -1
}
return tout
}
// Sort Feature Set By precision of Chosen class
type srtAnalResByFeatSpecClass []AnalResByFeat
func (v srtAnalResByFeatSpecClass) Len() int {
return len(v)
}
func (v srtAnalResByFeatSpecClass) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}
func (v srtAnalResByFeatSpecClass) Less(i, j int) bool {
if v[i].FeatNdx == -1 {
return false
}
// Priority 1 = precision
// tie-break = SucCnt
// ti-break = total feature precision
// ti-break = featNdx
if v[i].TargPrec == v[j].TargPrec {
if v[i].SucCnt == v[j].SucCnt {
if v[i].Prec == v[j].Prec {
return v[i].FeatNdx > v[j].FeatNdx
}
return v[i].Prec > v[j].Prec
}
return v[i].SucCnt > v[j].SucCnt
}
return v[i].TargPrec > v[j].TargPrec
}
// Sort Feature Set By precision of Total Set
type srtAnalResByFeat []AnalResByFeat
func (v srtAnalResByFeat) Len() int {
return len(v)
}
func (v srtAnalResByFeat) Swap(i, j int) {
v[i], v[j] = v[j], v[i]
}
func (v srtAnalResByFeat) Less(i, j int) bool {
if v[i].FeatNdx == -1 {
return false
}
if v[i].Prec == v[j].Prec {
return v[i].FeatNdx > v[j].FeatNdx
}
return v[i].Prec > v[j].Prec
}
//If targClass is != AnalNoClassSpecified then the results
// for that class will be used as the driving input where
// as long as precision is >= targPrecision then increasing
// recall is chosen otherwise increasing precision is chosen.
// when targClass == AnalNoClassSpecified then increasing precision for entire
// data set is chosen. This is used to pre-set the maxNumBuck
// for each column. In some instances it could be used to set
// MinNumBuck as well becaause if we know that a low number such
// as 2 yeidls poor results for a given class we do not want to
// allow the results module to fall back to those lower numbers
func (fier *Classifier) TestColumnNumBuck(targClass int16, targPrecis float32, trainRow [][]float32, testRow [][]float32) []*AnalResults {
return nil
}
// Loads the saved analysis input and uses those values
// to override the default minNumBuck, MaxNumBuck, featWeight
// for each column.
func (fier *Classifier) LoadSavedAnal(fiName string) {
fmt.Printf("L108: Attempt Load Saved Analysis settings from %s\n", fiName)
ba, err := iou.ReadFile(fiName)
if err != nil {
fmt.Printf("L105: LoadSavedAnal() Error Reading %s err=%v \n", fiName, err)
} else {
tobj := AnalResults{}
if err := json.Unmarshal(ba, &tobj); err != nil {
fmt.Printf("L111: LoadSavedAnal() Error JSON Parsing from %s err=%v\n", fiName, err)
} else {
// Copy our important values from the saved set back into the
// classifier so it can use them to adjust it's classificaiton
// results.
// WARN: The MaxBuck used when the saved values are generated
// must be the same or smaller as the MaxBuck used in this run
// or a index error will occur. The number of features in the
// training file must be identical to those used when the
// saved settings were generated.
// fmt.Printf("L112 tobj=%v\n", tobj)
for _, acol := range tobj.Cols {
featndx := acol.FeatNdx
if featndx != fier.ClassCol && featndx != -1 {
feat := fier.ColDef[featndx]
if acol.MaxNumBuck > feat.MaxNumBuck {
feat.MaxNumBuck = acol.MaxNumBuck
feat.initializeBuckets()
}
feat.MinNumBuck = acol.MinNumBuck
feat.FeatWeight = acol.FeatWeight
fmt.Printf("L182 Load ndx=%v, name=%v MinNB=%v MaxNB=%v weight=%v\n",
featndx, feat.Spec.ColName, feat.MinNumBuck, feat.MaxNumBuck, feat.FeatWeight)
}
}
}
}
}
func sortColumnsByPrecision(col []AnalResByFeat) {
}
func setFeatWeightByOrderedPrecision(col []AnalResByFeat) {
}
func (fier *Classifier) DoPreAnalyze(analFiName string) {
req := fier.Req
// Pre-analyze each column to try and find the sweet spot
// for precision and recall as number of buckets.
origTrainRows := fier.GetTrainRowsAsArr(OneGig)
testRows := origTrainRows
trainRows := origTrainRows
if req.AnalTestPort == 100 {
fmt.Printf("L139: DoPreAnalyze() Use entire training set as test numRow=%v", len(origTrainRows))
} else if req.AnalSplitType == 1 {
// pull test records from the body of test data using 1 row every
// so often. Nice to get a good sampling of data that is not
// time series.
oneEvery := int(float32(len(origTrainRows)) / (float32(len(origTrainRows)) * req.AnalTestPort))
fmt.Printf("Analyze SplitOneEvery=%v portSet=%v\n", oneEvery, req.AnalTestPort)
trainRows, testRows = qutil.SplitFloatArrOneEvery(origTrainRows, 1, oneEvery)
} else {
// pull records from end of test data. Best for
// time series when predicting on records near the end
fmt.Printf("Analyze splitEnd PortSet=%v", req.AnalTestPort)
trainRows, testRows = qutil.SplitFloatArrTail(origTrainRows, req.AnalTestPort)
}
_, sumRows := fier.ClassifyRows(testRows, fier.ColDef)
// Have to retrain based on the newly split data
fmt.Printf("L215: Analyze #TrainRow=%v #TestRow=%v analFiName=%v\n", len(trainRows), len(testRows), analFiName)
fier.Retrain(trainRows)
anaRes := fier.TestIndividualColumnsNB(AnalNoClassSpecified, -1.0, trainRows, testRows)
jsonRes, err := json.Marshal(anaRes)
if err != nil {
fmt.Printf("L220: Error converting analyze res to JSON err=%v analRes=%v\n", err, anaRes)
} else {
//fmt.Printf("L222: Analysis Results=\n%s\n", jsonRes)
barr := []byte(jsonRes)
fmt.Printf("L256: write %v bytes to %v\n", len(barr), analFiName)
err := iou.WriteFile(analFiName, barr, 0666)
if err != nil {
fmt.Printf("L92: Error writting analyzis output file %s err=%v\n", analFiName, err)
}
}
if req.AnalTestPort != 100 {
// Rerun the classificaiton using all our columns but with the new
// bucket and weights for each column
fier.Retrain(origTrainRows)
_, sumRows = fier.ClassifyRows(testRows, fier.ColDef)
fmt.Printf("L178: DoPreAnalyze() Precision with all columns = %v\n Based on training data\n", sumRows.Precis)
}
}
// TODO: Add the Save Feature
// TODO: Add a human legible save feature
// TODO: Add ability to sort the list of featurs by precision
// TODO: Convert the analysis result structure to direct storage to allow JSON conversion.
// TODO: Need a smaller JSON structure that is just enough to reload latter classification runs.
// TODO: We should take the highest precision feature and give it a weight
// value of that is high then assign each lower precision value
// a lower weight.
// TODO: Need human editable stucture to turn feature columns off.
// Analyze inidividual columns predictive power. This can help identify
// columns that have better predictive input. It can also help
// identify columns with low predictive input so they can be
// removed from the dataset.
//
// Runs each feature independantly by the number of buckets
// seeking the number of columns for this feature that return
// the best results.
func (fier *Classifier) TestIndividualColumnsNB(targClass TClassId, targPrecis float32, trainRows [][]string, testRows [][]string) *AnalResults {
// Question how do you quantify better. If Precision is high
// but recall is very low then which is better. Seems like you
// must set one as a minimum value and alllow the others to
// vary.
req := fier.Req
featSet := make([]*Feature, 1)
specClass := req.AnalClassId
if specClass != AnalNoClassSpecified {
fmt.Printf("Analyze for ClassId=%v\n", specClass)
} else {
fmt.Printf("Analyze for Total Set\n")
}
numCol := len(fier.ColDef)
tout := makeAnalResults(numCol)
//fmt.Printf("L108:trainRows=%v\n", trainRows)
//fmt.Printf("L100: testRows=%v\n", testRows)
for _, feat := range fier.ColDef {
featNum := feat.ColNum
if featNum == fier.ClassCol || feat.Enabled == false {
continue
}
startMaxNumBuck := feat.MaxNumBuck
startMinNumBuck := feat.MinNumBuck
featSet[0] = feat
//featSet = fier.ColDef // see if changing individual feature changes entire set score
_, sumRows := fier.ClassifyRows(testRows, featSet)
//detRow, sumRows := fier.ClassifyRows(testRows, featSet)
//fmt.Printf("L122: detRow=%v\n", detRow)
//fmt.Printf("L123: sumRows=%s\n", sumRows.ToDispStr())
startMaxPrec := sumRows.Precis
startMaxRecall := float32(0.0)
bestMaxPrecis := sumRows.Precis
bestMaxBuck := startMaxNumBuck
bestMaxRecall := startMaxRecall
if specClass != AnalNoClassSpecified {
clasSum := fier.MakeByClassStats(sumRows, testRows)
tclass := clasSum.ByClass[specClass]
//fmt.Printf("L113: Init by class tclass=%v\n", tclass)
startMaxRecall = tclass.Recall
startMaxPrec = tclass.Prec
bestMaxRecall = tclass.Recall
bestMaxPrecis = tclass.Prec
}
//fmt.Printf("L102: featNum=%v StartPrecis=%v startMaxNB=%v startMinNB=%v\n", featNum, startMaxPrec, startMaxNumBuck, startMinNumBuck)
//fmt.Printf("specClass=%v AnalNoClassSpecified=%v\n", specClass, AnalNoClassSpecified)
for maxNumBuck := feat.MaxNumBuck; maxNumBuck >= fier.Req.MinNumBuck; maxNumBuck-- {
feat.MaxNumBuck = maxNumBuck
_, sumRows := fier.ClassifyRows(testRows, featSet)
//fmt.Printf("L120: sumRows=%s\n", sumRows.ToDispStr())
//fmt.Printf("L115: fe#=%v maxNB=%v setPrec=%v bMaxPrec=%v bMaxRec=%v bestNb=%v\n", featNum, maxNumBuck, sumRows.Precis, bestMaxPrecis, bestMaxRecall, bestMaxBuck)
if req.AnalClassId == AnalNoClassSpecified {
if sumRows.Precis >= bestMaxPrecis {
// measure by accuracy when all rows are forced
// to be classified eg: recall is forced to 100%
// for the set by forcing the classifier to take
// it's best guess for every row.
bestMaxBuck = maxNumBuck
bestMaxPrecis = sumRows.Precis
}
} else {
// measure by target class or by set
clasSum := fier.MakeByClassStats(sumRows, testRows)
tclass := clasSum.ByClass[specClass]
//fmt.Printf("L137: test by class tclass=%v\n", tclass)
if (tclass.Prec >= bestMaxPrecis && tclass.Recall >= bestMaxRecall) || (tclass.Prec >= bestMaxPrecis && tclass.Recall > bestMaxRecall) {
bestMaxRecall = tclass.Recall
bestMaxPrecis = tclass.Prec
bestMaxBuck = maxNumBuck
}
}
} // for maxNumBuck
feat.MaxNumBuck = bestMaxBuck
//fmt.Printf("L133: BEST MAX featNdx=%v numBuck=%v Precis=%v\n", featNum, bestMaxBuck, bestMaxPrecis)
//_, sumRows = fier.ClassifyRows(testRows, featSet)
//fmt.Printf("L135: Retest with max prec=%v\n", sumRows.Precis)
// Now tighen the minimum number of buckets to find our best setting
bestMinBuck := startMinNumBuck
bestMinPrecis := startMaxPrec
bestMinRecall := startMaxRecall
for minNumBuck := startMinNumBuck; minNumBuck <= feat.MaxNumBuck; minNumBuck++ {
feat.MinNumBuck = minNumBuck
_, sumRows := fier.ClassifyRows(testRows, featSet)
//fmt.Printf("L145: fe#=%v minNB=%v SPrec=%v Bprec=%v BRecal=%v bNB=%v\n", featNum, minNumBuck, sumRows.Precis, bestMinPrecis, bestMinRecall, bestMinBuck)
if req.AnalClassId == AnalNoClassSpecified {
if sumRows.Precis >= bestMinPrecis {
// measure by accuracy when all rows are forced
// to be classified eg: recall is forced to 100%
// for the set by forcing the classifier to take
// it's best guess for every row.
bestMinBuck = minNumBuck
bestMinPrecis = sumRows.Precis
}
} else {
// measure by target class or by set
clasSum := fier.MakeByClassStats(sumRows, testRows)
tclass := clasSum.ByClass[specClass]
//fmt.Printf("L137: test by class tclass=%v\n", tclass)
if (tclass.Prec > bestMinPrecis && tclass.Recall >= bestMinRecall) || (tclass.Prec >= bestMinPrecis && tclass.Recall > bestMinRecall) {
bestMinRecall = tclass.Recall
bestMinPrecis = tclass.Prec
bestMinBuck = minNumBuck
}
}
} // for minNumBuck
feat.MinNumBuck = bestMinBuck
_, sumRows = fier.ClassifyRows(testRows, featSet)
//fmt.Printf("L163:MIN fe#=%v BMinNB=%v BPrec=%v retestPre%v\n", featNum, bestMinBuck, bestMinPrecis, sumRows.Precis)
// TODO: Add complete printout of what we discovered by Feature
fmt.Printf("L158: After Analyze ColNum=%v colName=%v\n startPrecis=%v endPrecis=%v\n",
feat.ColNum, feat.Spec.ColName, startMaxPrec, bestMinPrecis)
if req.AnalClassId != AnalNoClassSpecified {
fmt.Printf(" startRecall=%v endRecall=%v\n", startMaxRecall, bestMinRecall)
}
fmt.Printf(" startMaxNumBuck=%v endMaxNumBuck=%v\n startMinNumBuck=%v endMinNumBuck=%v\n",
startMaxNumBuck, bestMaxBuck, startMinNumBuck, bestMinBuck)
// Update output structure
clasSum := fier.MakeByClassStats(sumRows, testRows)
targPrec := float32(0.0)
for classId, aclass := range clasSum.ByClass {
tout.Cols[featNum].ByClasses[classId] = *aclass
if aclass.ClassId == req.AnalClassId {
targPrec = aclass.Prec
}
}
col := &tout.Cols[featNum]
col.FeatNdx = feat.ColNum
col.ColName = feat.Spec.ColName
col.EffMinVal = feat.EffMinVal
col.EffMaxVal = feat.EffMaxVal
col.MinNumBuck = bestMinBuck
col.MaxNumBuck = bestMaxBuck
col.TotCnt = sumRows.TotCnt
col.SucCnt = sumRows.SucCnt
col.Prec = sumRows.Precis
col.TargPrec = targPrec
col.TargClass = req.AnalClassId
} // for features
// Update out output structure for enire set.
_, sumRows := fier.ClassifyRows(testRows, fier.ColDef)
tout.Prec = sumRows.Precis
tout.SucCnt = sumRows.SucCnt
tout.TotCnt = sumRows.TotCnt
if req.AnalClassId != AnalNoClassSpecified {
// Sort features by total precision
sort.Sort(srtAnalResByFeatSpecClass(tout.Cols))
} else {
// Sort Features based on Specified Class
// performance.
sort.Sort(srtAnalResByFeat(tout.Cols))
}
// Adjust the feature weights to the most
// beneficial feature to reflect the feature
// that provides highest predictive value.
if req.AnalAdjFeatWeight == true {
currWeight := float32(10.0)
for tndx, acol := range tout.Cols {
if acol.FeatNdx != -1 {
feat := fier.ColDef[acol.FeatNdx]
if feat.Enabled == true {
tout.Cols[tndx].FeatWeight = currWeight // have to assign it directly because we are working with a copy
feat.FeatWeight = currWeight
currWeight = currWeight * 0.8
fmt.Printf("L450: featNdx=%v name=%v MinNumBuck=%v maxNumBuck=%v featWeight=%v\n",
feat.ColNum, feat.Spec.ColName, feat.MinNumBuck, feat.MaxNumBuck, feat.FeatWeight)
}
}
}
}
fmt.Printf("L274: After analyze setPrec all Feat enabled = %v\n", sumRows.Precis)
return tout
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package apigee
import (
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
)
func DCLOrganizationSchema() *dcl.Schema {
return &dcl.Schema{
Info: &dcl.Info{
Title: "Apigee/Organization",
Description: "The Apigee Organization resource",
StructName: "Organization",
},
Paths: &dcl.Paths{
Get: &dcl.Path{
Description: "The function used to get information about a Organization",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "organization",
Required: true,
Description: "A full instance of a Organization",
},
},
},
Apply: &dcl.Path{
Description: "The function used to apply information about a Organization",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "organization",
Required: true,
Description: "A full instance of a Organization",
},
},
},
Delete: &dcl.Path{
Description: "The function used to delete a Organization",
Parameters: []dcl.PathParameters{
dcl.PathParameters{
Name: "organization",
Required: true,
Description: "A full instance of a Organization",
},
},
},
DeleteAll: &dcl.Path{
Description: "The function used to delete all Organization",
},
List: &dcl.Path{
Description: "The function used to list information about many Organization",
},
},
Components: &dcl.Components{
Schemas: map[string]*dcl.Component{
"Organization": &dcl.Component{
Title: "Organization",
ID: "organizations/{{name}}",
ParentContainer: "project",
HasCreate: true,
ApplyTimeout: 4800,
DeleteTimeout: 4800,
SchemaProperty: dcl.Property{
Type: "object",
Required: []string{
"analyticsRegion",
"runtimeType",
},
Properties: map[string]*dcl.Property{
"addonsConfig": &dcl.Property{
Type: "object",
GoName: "AddonsConfig",
GoType: "OrganizationAddonsConfig",
Description: "Addon configurations of the Apigee organization.",
Properties: map[string]*dcl.Property{
"advancedApiOpsConfig": &dcl.Property{
Type: "object",
GoName: "AdvancedApiOpsConfig",
GoType: "OrganizationAddonsConfigAdvancedApiOpsConfig",
Description: "Configuration for the Advanced API Ops add-on.",
Properties: map[string]*dcl.Property{
"enabled": &dcl.Property{
Type: "boolean",
GoName: "Enabled",
Description: "Flag that specifies whether the Advanced API Ops add-on is enabled.",
},
},
},
"monetizationConfig": &dcl.Property{
Type: "object",
GoName: "MonetizationConfig",
GoType: "OrganizationAddonsConfigMonetizationConfig",
Description: "Configuration for the Monetization add-on.",
Properties: map[string]*dcl.Property{
"enabled": &dcl.Property{
Type: "boolean",
GoName: "Enabled",
Description: "Flag that specifies whether the Monetization add-on is enabled.",
},
},
},
},
},
"analyticsRegion": &dcl.Property{
Type: "string",
GoName: "AnalyticsRegion",
Description: "Required. Primary GCP region for analytics data storage. For valid values, see (https://cloud.google.com/apigee/docs/api-platform/get-started/create-org).",
Immutable: true,
},
"authorizedNetwork": &dcl.Property{
Type: "string",
GoName: "AuthorizedNetwork",
Description: "Compute Engine network used for Service Networking to be peered with Apigee runtime instances. See (https://cloud.google.com/vpc/docs/shared-vpc). To use a shared VPC network, use the following format: `projects/{host-project-id}/{region}/networks/{network-name}`. For example: `projects/my-sharedvpc-host/global/networks/mynetwork` **Note:** Not supported for Apigee hybrid.",
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Compute/Network",
Field: "name",
},
},
},
"billingType": &dcl.Property{
Type: "string",
GoName: "BillingType",
GoType: "OrganizationBillingTypeEnum",
ReadOnly: true,
Description: "Output only. Billing type of the Apigee organization. See (https://cloud.google.com/apigee/pricing). Possible values: BILLING_TYPE_UNSPECIFIED, SUBSCRIPTION, EVALUATION",
Immutable: true,
Enum: []string{
"BILLING_TYPE_UNSPECIFIED",
"SUBSCRIPTION",
"EVALUATION",
},
},
"caCertificate": &dcl.Property{
Type: "string",
GoName: "CaCertificate",
ReadOnly: true,
Description: "Output only. Base64-encoded public certificate for the root CA of the Apigee organization. Valid only when (#RuntimeType) is `CLOUD`.",
Immutable: true,
},
"createdAt": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "CreatedAt",
ReadOnly: true,
Description: "Output only. Time that the Apigee organization was created in milliseconds since epoch.",
Immutable: true,
},
"description": &dcl.Property{
Type: "string",
GoName: "Description",
Description: "Description of the Apigee organization.",
},
"displayName": &dcl.Property{
Type: "string",
GoName: "DisplayName",
Description: "Display name for the Apigee organization.",
},
"environments": &dcl.Property{
Type: "array",
GoName: "Environments",
ReadOnly: true,
Description: "Output only. List of environments in the Apigee organization.",
Immutable: true,
ListType: "list",
Items: &dcl.Property{
Type: "string",
GoType: "string",
},
},
"expiresAt": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "ExpiresAt",
ReadOnly: true,
Description: "Output only. Time that the Apigee organization is scheduled for deletion.",
Immutable: true,
},
"lastModifiedAt": &dcl.Property{
Type: "integer",
Format: "int64",
GoName: "LastModifiedAt",
ReadOnly: true,
Description: "Output only. Time that the Apigee organization was last modified in milliseconds since epoch.",
Immutable: true,
},
"name": &dcl.Property{
Type: "string",
GoName: "Name",
Description: "Output only. Name of the Apigee organization.",
Immutable: true,
ServerGeneratedParameter: true,
},
"project": &dcl.Property{
Type: "string",
GoName: "Project",
Description: "Required. Name of the GCP project in which to associate the Apigee organization. Pass the information as a query parameter using the following structure in your request: projects/<project> Authorization requires the following IAM permission on the specified resource parent: apigee.organizations.create",
Immutable: true,
ForwardSlashAllowed: true,
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudresourcemanager/Project",
Field: "name",
Parent: true,
},
},
},
"projectId": &dcl.Property{
Type: "string",
GoName: "ProjectId",
ReadOnly: true,
Description: "Output only. Project ID associated with the Apigee organization.",
Immutable: true,
},
"properties": &dcl.Property{
Type: "object",
AdditionalProperties: &dcl.Property{
Type: "string",
},
GoName: "Properties",
Description: "Properties defined in the Apigee organization profile.",
},
"runtimeDatabaseEncryptionKeyName": &dcl.Property{
Type: "string",
GoName: "RuntimeDatabaseEncryptionKeyName",
Description: "Cloud KMS key name used for encrypting the data that is stored and replicated across runtime instances. Update is not allowed after the organization is created. Required when (#RuntimeType) is `TRIAL`, a Google-Managed encryption key will be used. For example: \"projects/foo/locations/us/keyRings/bar/cryptoKeys/baz\". **Note:** Not supported for Apigee hybrid.",
ResourceReferences: []*dcl.PropertyResourceReference{
&dcl.PropertyResourceReference{
Resource: "Cloudkms/CryptoKey",
Field: "name",
},
},
},
"runtimeType": &dcl.Property{
Type: "string",
GoName: "RuntimeType",
GoType: "OrganizationRuntimeTypeEnum",
Description: "Required. Runtime type of the Apigee organization based on the Apigee subscription purchased. Possible values: RUNTIME_TYPE_UNSPECIFIED, CLOUD, HYBRID",
Immutable: true,
Enum: []string{
"RUNTIME_TYPE_UNSPECIFIED",
"CLOUD",
"HYBRID",
},
},
"state": &dcl.Property{
Type: "string",
GoName: "State",
GoType: "OrganizationStateEnum",
ReadOnly: true,
Description: "Output only. State of the organization. Values other than ACTIVE means the resource is not ready to use. Possible values: SNAPSHOT_STATE_UNSPECIFIED, MISSING, OK_DOCSTORE, OK_SUBMITTED, OK_EXTERNAL, DELETED",
Immutable: true,
Enum: []string{
"SNAPSHOT_STATE_UNSPECIFIED",
"MISSING",
"OK_DOCSTORE",
"OK_SUBMITTED",
"OK_EXTERNAL",
"DELETED",
},
},
"subscriptionType": &dcl.Property{
Type: "string",
GoName: "SubscriptionType",
GoType: "OrganizationSubscriptionTypeEnum",
ReadOnly: true,
Description: "Output only. DEPRECATED: This will eventually be replaced by BillingType. Subscription type of the Apigee organization. Valid values include trial (free, limited, and for evaluation purposes only) or paid (full subscription has been purchased). See (https://cloud.google.com/apigee/pricing/). Possible values: SUBSCRIPTION_TYPE_UNSPECIFIED, PAID, TRIAL",
Immutable: true,
Enum: []string{
"SUBSCRIPTION_TYPE_UNSPECIFIED",
"PAID",
"TRIAL",
},
},
},
},
},
},
},
}
}
|
package main
import "math"
import "fmt"
type Shape interface {
area() float64
}
type Circle struct {
x,y,radius float64
}
type Rectangle struct {
width,height float64
}
func ( circle Circle) area() float64{
return math.Pi *circle.radius *circle.radius
}
func(rect Rectangle) area() float64{
return rect.width *rect.height
}
func getArea(shape Shape) float64{
return shape.area()
}
func main() {
circle:=Circle{x:0,y:0,radius:6}
rectangle:=Rectangle{width:10,height:5}
fmt.Printf("circle area :%f \n",getArea(circle))
fmt.Printf("Rectangle area:%f\n",getArea(rectangle))
}
|
package main
import (
"flag"
"fmt"
"strings"
"github.com/antchfx/htmlquery"
"golang.org/x/net/html"
"zliu.org/goutil"
)
var (
url = flag.String("url", "http://www.qq.com/", "url to fetch and parse")
)
func main() {
doc, err := htmlquery.LoadURL(*url)
if err != nil {
panic(err)
}
var links []string
htmlquery.FindEach(doc, "//a", func(i int, node *html.Node) {
link := htmlquery.SelectAttr(node, "href")
if u, err := goutil.MakeAbsoluteUrl(link, *url); err == nil {
if strings.HasPrefix(u, "http") && !strings.HasSuffix(u, ".exe") {
links = append(links, u)
}
}
})
for i, link := range links {
fmt.Println(i, link)
}
}
|
package basic
import (
"fmt"
"sync"
)
// chan<- //只写
func producer(out chan<- int) {
defer close(out) // 在最后一个写通道动作后,close通道
for i := 0; i < 5; i++ {
fmt.Println("produce: ", i)
out <- i //如果对方不读 会阻塞
}
}
// <-chan //只读
func consumer(in <-chan int) {
for num := range in { // range无缓存通道, 要求必须在最后一个写通道的后面,close通道
fmt.Println("consume: ", num)
}
}
func producerConsumer() {
c := make(chan int) // chan //读写
go producer(c) //生产者
consumer(c) //消费者
fmt.Println("done")
}
func writeTwoChan() {
ch := make(chan string)
go func() {
for m := range ch {
fmt.Println("processed:", m)
//global.Logger.Println("processed:", m)
}
}()
/*
这是bug2
一般在主程序里都是读通道的动作, 现在把两个写通道放在主程序里,是为了验证个结论,
我们故意把这个函数放在main函数最后执行, 最后程序只输出了
processed: cmd.1
没有输出 processed: cmd.2
原因是在主程序在写了管道之后, routine的读管道的阻塞的解除是需要一点时间的,
而主程序在写完管道后,输出一条语句后,就直接退除了,也就失去了对控制台的输出权利
这时routine再向控制台输出时,控制台是接收不到的。
想看到routine的完整输出打印,有两个办法
一是在最后一个写管道后等待两秒,即添加
time.Sleep(2*time.Second)
另一种办法是把输出的log打印到文件里,即在routine里用下面语句打印:
global.Logger.Println("processed:", m)
bug1和bug2的原因相同,都是主程序在routine还没结束就退出了
*/
ch <- "cmd.1"
ch <- "cmd.2" //won't be processed
//time.Sleep(2*time.Second)
close(ch)
fmt.Println("writeTwoChan end")
}
func closeChan() {
done := make(chan struct{})
go func() {
/*
这是bug1,
原因:从<- done routine exit没有打印出来,
可以断定此routine在主程序退出前没有结束,
通过实验得知, close(done)能够解除所有读done通道的阻塞操作,
但是稍微会晚一点时间,1秒左右的时间, 但是主程序退出了
导致1秒后routine的读done通道阻塞解除时, 主程序已经结束了,
对控制台的输出就结束了,导致routine不能向控制台输出。
所以close通道, 能解除routine里读通道的阻塞,只是时间稍微延后
为了能从log判断出routine是否正常退出,
每个routine的结尾都要打印一个结束log info,不是只有在debug时才打开的log.
*/
<-done
fmt.Println("<- done closeChan routine exit")
}()
close(done)
fmt.Println("closeChan programe exit")
}
func closeChan1() {
done := make(chan struct{})
go func() {
for {
select {
case <-done:
fmt.Println("<- done closeChan1 routine exit")
return
}
}
}()
close(done)
fmt.Println("closeChan1 programe exit")
}
/*
管道操作的样本程序
用于生产者消费者的wq管道
用于让每个routine开始执行退出动作的done管道
用于等待每个routine都执行完的wg信号
*/
func chanStddPrograme() {
/*
命名习惯:
用于信号通知的通道命名为done, 因为不存放具体数据, 所以用空结构体
用于生产者消费者模式的通道命名为wq, 因为要存放数据,而数据类型不确定, 所以用interface类型,即可以是任意类型数据
等待每个routine结束的wg信号
*/
done := make(chan struct{})
wq := make(chan interface{})
var wg sync.WaitGroup
workerCount := 2
for i := 0; i < workerCount; i++ {
wg.Add(1)
go consumer2(i, wq, done, &wg)
}
/*
通道被用在两种情况:
一,用于生产者消费者模式,
判断标准: 有连续写的动作就是被用于生产者消费者模式, 这里连续写两次,这里的wq即是, wq是work queue缩写
二,用于信号通知,
判断标准: 只有一次写,或没有写,只有一个close动作, done通道即是
*/
for i := 0; i < workerCount; i++ {
wq <- i
}
/*
解除所有的读通道, 不管是几次读,
这里读通道阻塞的routine被启动了两次, 就有两个读通道阻塞,close将这两个阻塞全部解除
*/
close(done)
/*
用信号等待所有routine结束的原因
本来上面的close动作能够解锁所有routine里的done通道的阻塞,然后routine就结束了,
但是routine做这些事的时候是磨磨蹭蹭的, 主程序是很麻利的往下执行了,
主程序下面事也少, 主程序就麻利的退出了
而这时候routine还没磨蹭完,routine需要用的log文件以及控制台,都被主程序给关闭了,
routine就执行异常了
所以主程序必须用个信号等待所有的routine的磨蹭的把事情做完,主程序才退出
所以,在主程序的末尾都会有这种标准写法:
close(done)
wg.wait()
两个动作挨在一起
close让每个routine开始执行退出动作
wg.wait()等待每个routine执行完退出动作
*/
wg.Wait()
fmt.Println("all done!")
}
func consumer2(routineId int, wq <-chan interface{}, done <-chan struct{}, wg *sync.WaitGroup) {
fmt.Printf("routine[%v] is running\n", routineId)
Logger.Printf("[%v] is running\n", routineId)
/*
defer 在本函数的结束前的最后一步调用, 但很多时候, 不能确定本函数在哪里结束
*/
defer wg.Done()
/*
接收管道的标准写法:
go 起个routine
在rouinte里 用
for {
select {
建立等待消息的循环, 一般都不是只接收一次消息,所以用for无限循环
有两个case
一个case, 做消费者, 即读通道wq
一个case, 做接收结束信号用, 即读通道done, 整个routine就在这结束,即return
*/
for {
select {
/*
对wq通道读操作,可以判断本routine充当消费者角色
*/
case product := <-wq:
fmt.Printf("routine[%v] product[%v] is consumed \n", routineId, product)
Logger.Printf("routine[%v] product[%v] is consumed \n", routineId, product)
case <-done:
fmt.Printf("[%v] is done\n", routineId)
Logger.Printf("[%v] is done\n", routineId)
return
}
}
}
func Chan() {
fmt.Println("<------------------------- Chan begin -------------------->")
producerConsumer()
closeChan1()
chanStddPrograme()
closeChan()
writeTwoChan()
fmt.Println("<------------------------- Chan end -------------------->")
} |
package main
import (
"context"
"fmt"
"github.com/mailgun/mailgun-go/v3"
)
//verify email after signup
func (e *SendEmailInfo) VerifyEmail(email string, veriToken string) error {
url := fmt.Sprintf("%s://%s/api/confirm-email/%s", e.Scheme, e.ServerDomain, veriToken)
mg := mailgun.NewMailgun(e.EmailDomain, e.EmailAPIKey)
m := mg.NewMessage(
"Crypto Tracker<mailgun@"+e.EmailDomain+">",
"Confirm Your Registration",
"You're one click away from getting latest information on cryptocurrencies! \n\nPlease click on the link below to verify your account: \n"+url,
email,
)
_, _, err := mg.Send(context.Background(), m)
if err != nil {
fmt.Println("signup error: ", err)
return VerifyEmailError
}
return nil
}
//reset password for Forgot Password
func (e *SendEmailInfo) ResetPassword(email string, resetPassToken string) error {
url := fmt.Sprintf("%s://%s/api/reset-password/%s", e.Scheme, e.ServerDomain, resetPassToken)
mg := mailgun.NewMailgun(e.EmailDomain, e.EmailAPIKey)
m := mg.NewMessage(
"Crypto Tracker<mailgun@"+e.EmailDomain+">",
"Reset Your Password",
"You've requested a password reset.\n To change your password, click on the link below: \n"+url,
email,
)
_, _, err := mg.Send(context.Background(), m)
if err != nil {
return ResetPasswordError
}
return nil
}
|
package main
import "fmt"
func double(number int) {
number *= 2
}
func main() {
amount := 6
double(amount)
fmt.Print(amount)
}
|
//Description - This program is used to add the stock details to the list
//dynamically or to delete the stock dynamiclly.
package main
import "fmt"
//stock - Declaring a structure of stock datatype.
type stock struct {
Name string `json:"Name"`
Number_of_shares int `json:"Number_of_shares"`
Share_price int `json:"Share_price"`
next *stock
}
//LinkedList - A structure.
type LinkedList struct {
head *stock
count int
}
//DeleteStock - This function is used to Delete the particular node(In this program stock).
func (l *LinkedList) DeleteStock(name string) {
//fmt.Printf("length = %d", l.count)
if l.count == 0 {
fmt.Println("Delete not possible, add stock details") //if there are no elements in List
} else if l.count == 1 { //if Only one element in the list
l.head = nil
l.count--
fmt.Println("Delete successful..")
} else if l.head.Name == name {
l.head = l.head.next
l.count--
fmt.Println("Delete successful..")
} else {
current := l.head
prev := current
for current.Name != name {
prev = current
current = current.next
}
prev.next = current.next
l.count--
fmt.Println("Delete successful..")
}
}
//AddStock - Add new element to the list.
func (l *LinkedList) AddStock(s1 *stock) {
if l.count == 0 {
l.head = s1 //No elements in the list
} else {
currentPost := l.head
for currentPost.next != nil {
currentPost = currentPost.next
}
currentPost.next = s1
}
l.count++
fmt.Println("Added successfully")
}
//Display-Display the list Items
func (l *LinkedList) Display() {
if l.count == 0 {
fmt.Println("No items in the stock")
} else {
fmt.Println("..........The stock report is......")
currentPost := l.head
for currentPost != nil {
fmt.Println("\n-----------------------------------------\n")
fmt.Printf("\nName : %s", currentPost.Name)
fmt.Printf("\nShare_Price : %d", currentPost.Share_price)
fmt.Printf("\nShares : %d", currentPost.Number_of_shares)
fmt.Println("\n----------------------------------------\n")
currentPost = currentPost.next
}
}
}
//main - In this user will have the options to add,delete or view the stock details.
func main() {
l := &LinkedList{}
var flag bool
var n int
flag = true
for flag {
fmt.Println("..............................")
fmt.Println("1.Add stocks to the list")
fmt.Println("2.Delete stocks from the list")
fmt.Println("3.View the stocks in the list")
fmt.Println("4.Exit")
fmt.Print("Enter your choice :")
fmt.Scanf("%d", &n)
if n == 1 {
//get details from user and add it to list
var Numberofshares, Shareprice int
var name string
fmt.Print("Enter the name : ")
fmt.Scanf("%s", &name)
fmt.Print("Enter the Number_of_shares : ")
fmt.Scanf("%d", &Numberofshares)
fmt.Print("Enter the Share_price : ")
fmt.Scanf("%d", &Shareprice)
s := stock{
Name: name,
Number_of_shares: Numberofshares,
Share_price: Shareprice,
}
l.AddStock(&s) //call AddStock to add data to list.
}
if n == 2 {
// Get string input from user and delete the particular stock from list.
var name string
fmt.Print("Enter the name of the stock you want to delete : ")
fmt.Scanf("%s", &name)
l.DeleteStock(name)
}
if n == 3 {
l.Display() //Display the list items
}
if n == 4 {
flag = false //exit
}
}
fmt.Printf("First: %v\n", l.head)
}
|
// Package core contains the core Bazelisk logic, as well as abstractions for Bazel repositories.
package core
// TODO: split this file into multiple smaller ones in dedicated packages (e.g. execution, incompatible, ...).
import (
"bufio"
"crypto/rand"
"crypto/sha256"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"os/exec"
"os/signal"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
"syscall"
"github.com/bazelbuild/bazelisk/config"
"github.com/bazelbuild/bazelisk/httputil"
"github.com/bazelbuild/bazelisk/platforms"
"github.com/bazelbuild/bazelisk/versions"
"github.com/bazelbuild/bazelisk/ws"
"github.com/mitchellh/go-homedir"
)
const (
bazelReal = "BAZEL_REAL"
skipWrapperEnv = "BAZELISK_SKIP_WRAPPER"
wrapperPath = "./tools/bazel"
maxDirLength = 255
)
var (
// BazeliskVersion is filled in via x_defs when building a release.
BazeliskVersion = "development"
fileConfig map[string]string
fileConfigOnce sync.Once
)
// ArgsFunc is a function that receives a resolved Bazel version and returns the arguments to invoke
// Bazel with.
type ArgsFunc func(resolvedBazelVersion string) []string
func MakeDefaultConfig() config.Config {
configs := []config.Config{config.FromEnv()}
workspaceConfigPath, err := config.LocateWorkspaceConfigFile()
if err == nil {
c, err := config.FromFile(workspaceConfigPath)
if err != nil {
log.Fatal(err)
}
configs = append(configs, c)
}
userConfigPath, err := config.LocateUserConfigFile()
if err == nil {
c, err := config.FromFile(userConfigPath)
if err != nil {
log.Fatal(err)
}
configs = append(configs, c)
}
return config.Layered(configs...)
}
// RunBazelisk runs the main Bazelisk logic for the given arguments and Bazel repositories.
func RunBazelisk(args []string, repos *Repositories) (int, error) {
return RunBazeliskWithArgsFunc(func(_ string) []string { return args }, repos)
}
// RunBazeliskWithArgsFunc runs the main Bazelisk logic for the given ArgsFunc and Bazel
// repositories.
func RunBazeliskWithArgsFunc(argsFunc ArgsFunc, repos *Repositories) (int, error) {
return RunBazeliskWithArgsFuncAndConfig(argsFunc, repos, MakeDefaultConfig())
}
// RunBazeliskWithArgsFuncAndConfig runs the main Bazelisk logic for the given ArgsFunc and Bazel
// repositories and config.
func RunBazeliskWithArgsFuncAndConfig(argsFunc ArgsFunc, repos *Repositories, config config.Config) (int, error) {
httputil.UserAgent = getUserAgent(config)
bazeliskHome := config.Get("BAZELISK_HOME")
if len(bazeliskHome) == 0 {
userCacheDir, err := os.UserCacheDir()
if err != nil {
return -1, fmt.Errorf("could not get the user's cache directory: %v", err)
}
bazeliskHome = filepath.Join(userCacheDir, "bazelisk")
}
err := os.MkdirAll(bazeliskHome, 0755)
if err != nil {
return -1, fmt.Errorf("could not create directory %s: %v", bazeliskHome, err)
}
bazelVersionString, err := getBazelVersion(config)
if err != nil {
return -1, fmt.Errorf("could not get Bazel version: %v", err)
}
bazelPath, err := homedir.Expand(bazelVersionString)
if err != nil {
return -1, fmt.Errorf("could not expand home directory in path: %v", err)
}
// If the Bazel version is an absolute path to a Bazel binary in the filesystem, we can
// use it directly. In that case, we don't know which exact version it is, though.
resolvedBazelVersion := "unknown"
// If we aren't using a local Bazel binary, we'll have to parse the version string and
// download the version that the user wants.
if !filepath.IsAbs(bazelPath) {
bazelPath, err = downloadBazel(bazelVersionString, bazeliskHome, repos, config)
if err != nil {
return -1, fmt.Errorf("could not download Bazel: %v", err)
}
} else {
baseDirectory := filepath.Join(bazeliskHome, "local")
bazelPath, err = linkLocalBazel(baseDirectory, bazelPath)
if err != nil {
return -1, fmt.Errorf("could not link local Bazel: %v", err)
}
}
args := argsFunc(resolvedBazelVersion)
// --print_env must be the first argument.
if len(args) > 0 && args[0] == "--print_env" {
// print environment variables for sub-processes
cmd := makeBazelCmd(bazelPath, args, nil, config)
for _, val := range cmd.Env {
fmt.Println(val)
}
return 0, nil
}
// --strict and --migrate and --bisect must be the first argument.
if len(args) > 0 && (args[0] == "--strict" || args[0] == "--migrate") {
cmd, err := getBazelCommand(args)
if err != nil {
return -1, err
}
newFlags, err := getIncompatibleFlags(bazelPath, cmd, config)
if err != nil {
return -1, fmt.Errorf("could not get the list of incompatible flags: %v", err)
}
if args[0] == "--migrate" {
migrate(bazelPath, args[1:], newFlags, config)
} else {
// When --strict is present, it expands to the list of --incompatible_ flags
// that should be enabled for the given Bazel version.
args = insertArgs(args[1:], newFlags)
}
} else if len(args) > 0 && strings.HasPrefix(args[0], "--bisect") {
// When --bisect is present, we run the bisect logic.
if !strings.HasPrefix(args[0], "--bisect=") {
return -1, fmt.Errorf("Error: --bisect must have a value. Expected format: '--bisect=<good bazel commit>..<bad bazel commit>'")
}
value := args[0][len("--bisect="):]
commits := strings.Split(value, "..")
if len(commits) == 2 {
bisect(commits[0], commits[1], args[1:], bazeliskHome, repos, config)
} else {
return -1, fmt.Errorf("Error: Invalid format for --bisect. Expected format: '--bisect=<good bazel commit>..<bad bazel commit>'")
}
}
// print bazelisk version information if "version" is the first argument
// bazel version is executed after this command
if len(args) > 0 && args[0] == "version" {
// Check if the --gnu_format flag is set, if that is the case,
// the version is printed differently
var gnuFormat bool
for _, arg := range args {
if arg == "--gnu_format" {
gnuFormat = true
break
}
}
if gnuFormat {
fmt.Printf("Bazelisk %s\n", BazeliskVersion)
} else {
fmt.Printf("Bazelisk version: %s\n", BazeliskVersion)
}
}
exitCode, err := runBazel(bazelPath, args, nil, config)
if err != nil {
return -1, fmt.Errorf("could not run Bazel: %v", err)
}
return exitCode, nil
}
func getBazelCommand(args []string) (string, error) {
for _, a := range args {
if !strings.HasPrefix(a, "-") {
return a, nil
}
}
return "", fmt.Errorf("could not find a valid Bazel command in %q. Please run `bazel help` if you need help on how to use Bazel", strings.Join(args, " "))
}
func getUserAgent(config config.Config) string {
agent := config.Get("BAZELISK_USER_AGENT")
if len(agent) > 0 {
return agent
}
return fmt.Sprintf("Bazelisk/%s", BazeliskVersion)
}
// TODO(go 1.18): remove backport of strings.Cut
func cutString(s, sep string) (before, after string, found bool) {
if i := strings.Index(s, sep); i >= 0 {
return s[:i], s[i+len(sep):], true
}
return s, "", false
}
func getBazelVersion(config config.Config) (string, error) {
// Check in this order:
// - env var "USE_BAZEL_VERSION" is set to a specific version.
// - workspace_root/.bazeliskrc exists -> read contents, in contents:
// var "USE_BAZEL_VERSION" is set to a specific version.
// - env var "USE_NIGHTLY_BAZEL" or "USE_BAZEL_NIGHTLY" is set -> latest
// nightly. (TODO)
// - env var "USE_CANARY_BAZEL" or "USE_BAZEL_CANARY" is set -> latest
// rc. (TODO)
// - the file workspace_root/tools/bazel exists -> that version. (TODO)
// - workspace_root/.bazelversion exists -> read contents, that version.
// - workspace_root/WORKSPACE contains a version -> that version. (TODO)
// - env var "USE_BAZEL_FALLBACK_VERSION" is set to a fallback version format.
// - workspace_root/.bazeliskrc exists -> read contents, in contents:
// var "USE_BAZEL_FALLBACK_VERSION" is set to a fallback version format.
// - fallback version format "silent:latest"
bazelVersion := config.Get("USE_BAZEL_VERSION")
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
workingDirectory, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("could not get working directory: %v", err)
}
workspaceRoot := ws.FindWorkspaceRoot(workingDirectory)
if len(workspaceRoot) != 0 {
bazelVersionPath := filepath.Join(workspaceRoot, ".bazelversion")
if _, err := os.Stat(bazelVersionPath); err == nil {
f, err := os.Open(bazelVersionPath)
if err != nil {
return "", fmt.Errorf("could not read %s: %v", bazelVersionPath, err)
}
defer f.Close()
scanner := bufio.NewScanner(f)
scanner.Scan()
bazelVersion := scanner.Text()
if err := scanner.Err(); err != nil {
return "", fmt.Errorf("could not read version from file %s: %v", bazelVersion, err)
}
if len(bazelVersion) != 0 {
return bazelVersion, nil
}
}
}
fallbackVersionFormat := config.Get("USE_BAZEL_FALLBACK_VERSION")
fallbackVersionMode, fallbackVersion, hasFallbackVersionMode := cutString(fallbackVersionFormat, ":")
if !hasFallbackVersionMode {
fallbackVersionMode, fallbackVersion, hasFallbackVersionMode = "silent", fallbackVersionMode, true
}
if len(fallbackVersion) == 0 {
fallbackVersion = "latest"
}
if fallbackVersionMode == "error" {
return "", fmt.Errorf("not allowed to use fallback version %q", fallbackVersion)
}
if fallbackVersionMode == "warn" {
log.Printf("Warning: used fallback version %q\n", fallbackVersion)
return fallbackVersion, nil
}
if fallbackVersionMode == "silent" {
return fallbackVersion, nil
}
return "", fmt.Errorf("invalid fallback version format %q (effectively %q)", fallbackVersionFormat, fmt.Sprintf("%s:%s", fallbackVersionMode, fallbackVersion))
}
func parseBazelForkAndVersion(bazelForkAndVersion string) (string, string, error) {
var bazelFork, bazelVersion string
versionInfo := strings.Split(bazelForkAndVersion, "/")
if len(versionInfo) == 1 {
bazelFork, bazelVersion = versions.BazelUpstream, versionInfo[0]
} else if len(versionInfo) == 2 {
bazelFork, bazelVersion = versionInfo[0], versionInfo[1]
} else {
return "", "", fmt.Errorf("invalid version \"%s\", could not parse version with more than one slash", bazelForkAndVersion)
}
return bazelFork, bazelVersion, nil
}
func downloadBazel(bazelVersionString string, bazeliskHome string, repos *Repositories, config config.Config) (string, error) {
bazelFork, bazelVersion, err := parseBazelForkAndVersion(bazelVersionString)
if err != nil {
return "", fmt.Errorf("could not parse Bazel fork and version: %v", err)
}
resolvedBazelVersion, downloader, err := repos.ResolveVersion(bazeliskHome, bazelFork, bazelVersion)
if err != nil {
return "", fmt.Errorf("could not resolve the version '%s' to an actual version number: %v", bazelVersion, err)
}
bazelForkOrURL := dirForURL(config.Get(BaseURLEnv))
if len(bazelForkOrURL) == 0 {
bazelForkOrURL = bazelFork
}
bazelPath, err := downloadBazelIfNecessary(resolvedBazelVersion, bazeliskHome, bazelForkOrURL, repos, config, downloader)
return bazelPath, err
}
// downloadBazelIfNecessary returns a path to a bazel which can be run, which may have been cached.
// The directory it returns may depend on version and bazeliskHome, but does not depend on bazelForkOrURLDirName.
// This is important, as the directory may be added to $PATH, and varying the path for equivalent files may cause unnecessary repository rule cache invalidations.
// Where a file was downloaded from shouldn't affect cache behaviour of Bazel invocations.
//
// The structure of the downloads directory is as follows ([]s indicate variables):
//
// downloads/metadata/[fork-or-url]/bazel-[version-os-etc] is a text file containing a hex sha256 of the contents of the downloaded bazel file.
// downloads/sha256/[sha256]/bin/bazel[extension] contains the bazel with a particular sha256.
func downloadBazelIfNecessary(version string, bazeliskHome string, bazelForkOrURLDirName string, repos *Repositories, config config.Config, downloader DownloadFunc) (string, error) {
pathSegment, err := platforms.DetermineBazelFilename(version, false)
if err != nil {
return "", fmt.Errorf("could not determine path segment to use for Bazel binary: %v", err)
}
destFile := "bazel" + platforms.DetermineExecutableFilenameSuffix()
mappingPath := filepath.Join(bazeliskHome, "downloads", "metadata", bazelForkOrURLDirName, pathSegment)
digestFromMappingFile, err := os.ReadFile(mappingPath)
if err == nil {
pathToBazelInCAS := filepath.Join(bazeliskHome, "downloads", "sha256", string(digestFromMappingFile), "bin", destFile)
if _, err := os.Stat(pathToBazelInCAS); err == nil {
return pathToBazelInCAS, nil
}
}
pathToBazelInCAS, downloadedDigest, err := downloadBazelToCAS(version, bazeliskHome, repos, config, downloader)
if err != nil {
return "", fmt.Errorf("failed to download bazel: %w", err)
}
expectedSha256 := strings.ToLower(config.Get("BAZELISK_VERIFY_SHA256"))
if len(expectedSha256) > 0 {
if expectedSha256 != downloadedDigest {
return "", fmt.Errorf("%s has sha256=%s but need sha256=%s", pathToBazelInCAS, downloadedDigest, expectedSha256)
}
}
if err := atomicWriteFile(mappingPath, []byte(downloadedDigest), 0644); err != nil {
return "", fmt.Errorf("failed to write mapping file after downloading bazel: %w", err)
}
return pathToBazelInCAS, nil
}
func atomicWriteFile(path string, contents []byte, perm os.FileMode) error {
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return fmt.Errorf("failed to MkdirAll parent of %s: %w", path, err)
}
tmpPath := path + ".tmp"
if err := os.WriteFile(tmpPath, contents, perm); err != nil {
return fmt.Errorf("failed to write file %s: %w", tmpPath, err)
}
if err := os.Rename(tmpPath, path); err != nil {
return fmt.Errorf("failed to rename %s to %s: %w", tmpPath, path, err)
}
return nil
}
func downloadBazelToCAS(version string, bazeliskHome string, repos *Repositories, config config.Config, downloader DownloadFunc) (string, string, error) {
downloadsDir := filepath.Join(bazeliskHome, "downloads")
temporaryDownloadDir := filepath.Join(downloadsDir, "_tmp")
casDir := filepath.Join(bazeliskHome, "downloads", "sha256")
tmpDestFileBytes := make([]byte, 32)
if _, err := rand.Read(tmpDestFileBytes); err != nil {
return "", "", fmt.Errorf("failed to generate temporary file name: %w", err)
}
tmpDestFile := fmt.Sprintf("%x", tmpDestFileBytes)
var tmpDestPath string
var err error
baseURL := config.Get(BaseURLEnv)
formatURL := config.Get(FormatURLEnv)
if baseURL != "" && formatURL != "" {
return "", "", fmt.Errorf("cannot set %s and %s at once", BaseURLEnv, FormatURLEnv)
} else if formatURL != "" {
tmpDestPath, err = repos.DownloadFromFormatURL(config, formatURL, version, temporaryDownloadDir, tmpDestFile)
} else if baseURL != "" {
tmpDestPath, err = repos.DownloadFromBaseURL(baseURL, version, temporaryDownloadDir, tmpDestFile)
} else {
tmpDestPath, err = downloader(temporaryDownloadDir, tmpDestFile)
}
if err != nil {
return "", "", fmt.Errorf("failed to download bazel: %w", err)
}
f, err := os.Open(tmpDestPath)
if err != nil {
return "", "", fmt.Errorf("failed to open downloaded bazel to digest it: %w", err)
}
h := sha256.New()
if _, err := io.Copy(h, f); err != nil {
f.Close()
return "", "", fmt.Errorf("cannot compute sha256 of %s after download: %v", tmpDestPath, err)
}
f.Close()
actualSha256 := strings.ToLower(fmt.Sprintf("%x", h.Sum(nil)))
pathToBazelInCAS := filepath.Join(casDir, actualSha256, "bin", "bazel"+platforms.DetermineExecutableFilenameSuffix())
if err := os.MkdirAll(filepath.Dir(pathToBazelInCAS), 0755); err != nil {
return "", "", fmt.Errorf("failed to MkdirAll parent of %s: %w", pathToBazelInCAS, err)
}
tmpPathInCorrectDirectory := pathToBazelInCAS + ".tmp"
if err := os.Rename(tmpDestPath, tmpPathInCorrectDirectory); err != nil {
return "", "", fmt.Errorf("failed to move %s to %s: %w", tmpDestPath, tmpPathInCorrectDirectory, err)
}
if err := os.Rename(tmpPathInCorrectDirectory, pathToBazelInCAS); err != nil {
return "", "", fmt.Errorf("failed to move %s to %s: %w", tmpPathInCorrectDirectory, pathToBazelInCAS, err)
}
return pathToBazelInCAS, actualSha256, nil
}
func copyFile(src, dst string, perm os.FileMode) error {
srcFile, err := os.Open(src)
if err != nil {
return err
}
defer srcFile.Close()
dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perm)
if err != nil {
return err
}
defer dstFile.Close()
_, err = io.Copy(dstFile, srcFile)
return err
}
func linkLocalBazel(baseDirectory string, bazelPath string) (string, error) {
normalizedBazelPath := dirForURL(bazelPath)
destinationDir := filepath.Join(baseDirectory, normalizedBazelPath, "bin")
err := os.MkdirAll(destinationDir, 0755)
if err != nil {
return "", fmt.Errorf("could not create directory %s: %v", destinationDir, err)
}
destinationPath := filepath.Join(destinationDir, "bazel"+platforms.DetermineExecutableFilenameSuffix())
if _, err := os.Stat(destinationPath); err != nil {
err = os.Symlink(bazelPath, destinationPath)
// If can't create Symlink, fallback to copy
if err != nil {
err = copyFile(bazelPath, destinationPath, 0755)
if err != nil {
return "", fmt.Errorf("could not copy file from %s to %s: %v", bazelPath, destinationPath, err)
}
}
}
return destinationPath, nil
}
func maybeDelegateToWrapperFromDir(bazel string, wd string, config config.Config) string {
if config.Get(skipWrapperEnv) != "" {
return bazel
}
root := ws.FindWorkspaceRoot(wd)
wrapper := filepath.Join(root, wrapperPath)
if stat, err := os.Stat(wrapper); err == nil && !stat.Mode().IsDir() && stat.Mode().Perm()&0111 != 0 {
return wrapper
}
if runtime.GOOS == "windows" {
powershellWrapper := filepath.Join(root, wrapperPath+".ps1")
if stat, err := os.Stat(powershellWrapper); err == nil && !stat.Mode().IsDir() {
return powershellWrapper
}
batchWrapper := filepath.Join(root, wrapperPath+".bat")
if stat, err := os.Stat(batchWrapper); err == nil && !stat.Mode().IsDir() {
return batchWrapper
}
}
return bazel
}
func maybeDelegateToWrapper(bazel string, config config.Config) string {
wd, err := os.Getwd()
if err != nil {
return bazel
}
return maybeDelegateToWrapperFromDir(bazel, wd, config)
}
func prependDirToPathList(cmd *exec.Cmd, dir string) {
found := false
for idx, val := range cmd.Env {
splits := strings.Split(val, "=")
if len(splits) != 2 {
continue
}
if strings.EqualFold(splits[0], "PATH") {
found = true
cmd.Env[idx] = fmt.Sprintf("PATH=%s%s%s", dir, string(os.PathListSeparator), splits[1])
break
}
}
if !found {
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s", dir))
}
}
func makeBazelCmd(bazel string, args []string, out io.Writer, config config.Config) *exec.Cmd {
execPath := maybeDelegateToWrapper(bazel, config)
cmd := exec.Command(execPath, args...)
cmd.Env = append(os.Environ(), skipWrapperEnv+"=true")
if execPath != bazel {
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", bazelReal, bazel))
}
prependDirToPathList(cmd, filepath.Dir(execPath))
cmd.Stdin = os.Stdin
if out == nil {
cmd.Stdout = os.Stdout
} else {
cmd.Stdout = out
}
cmd.Stderr = os.Stderr
return cmd
}
func runBazel(bazel string, args []string, out io.Writer, config config.Config) (int, error) {
cmd := makeBazelCmd(bazel, args, out, config)
err := cmd.Start()
if err != nil {
return 1, fmt.Errorf("could not start Bazel: %v", err)
}
c := make(chan os.Signal)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
s := <-c
// Only forward SIGTERM to our child process.
if s != os.Interrupt {
cmd.Process.Kill()
}
}()
err = cmd.Wait()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
waitStatus := exitError.Sys().(syscall.WaitStatus)
return waitStatus.ExitStatus(), nil
}
return 1, fmt.Errorf("could not launch Bazel: %v", err)
}
return 0, nil
}
// getIncompatibleFlags returns all incompatible flags for the current Bazel command in alphabetical order.
func getIncompatibleFlags(bazelPath, cmd string, config config.Config) ([]string, error) {
var incompatibleFlagsStr = config.Get("BAZELISK_INCOMPATIBLE_FLAGS")
if len(incompatibleFlagsStr) > 0 {
return strings.Split(incompatibleFlagsStr, ","), nil
}
out := strings.Builder{}
if _, err := runBazel(bazelPath, []string{"help", cmd, "--short"}, &out, config); err != nil {
return nil, fmt.Errorf("unable to determine incompatible flags with binary %s: %v", bazelPath, err)
}
re := regexp.MustCompile(`(?m)^\s*--\[no\](incompatible_\w+)$`)
flags := make([]string, 0)
for _, m := range re.FindAllStringSubmatch(out.String(), -1) {
flags = append(flags, fmt.Sprintf("--%s", m[1]))
}
sort.Strings(flags)
return flags, nil
}
// insertArgs will insert newArgs in baseArgs. If baseArgs contains the
// "--" argument, newArgs will be inserted before that. Otherwise, newArgs
// is appended.
func insertArgs(baseArgs []string, newArgs []string) []string {
var result []string
inserted := false
for _, arg := range baseArgs {
if !inserted && arg == "--" {
result = append(result, newArgs...)
inserted = true
}
result = append(result, arg)
}
if !inserted {
result = append(result, newArgs...)
}
return result
}
func parseStartupOptions(baseArgs []string) []string {
var result []string
var bazelCommands = map[string]bool{
"analyze-profile": true,
"aquery": true,
"build": true,
"canonicalize-flags": true,
"clean": true,
"coverage": true,
"cquery": true,
"dump": true,
"fetch": true,
"help": true,
"info": true,
"license": true,
"mobile-install": true,
"mod": true,
"print_action": true,
"query": true,
"run": true,
"shutdown": true,
"sync": true,
"test": true,
"version": true,
}
// Arguments before a Bazel command are startup options.
for _, arg := range baseArgs {
if _, ok := bazelCommands[arg]; ok {
return result
}
result = append(result, arg)
}
return result
}
func shutdownIfNeeded(bazelPath string, startupOptions []string, config config.Config) {
bazeliskClean := config.Get("BAZELISK_SHUTDOWN")
if len(bazeliskClean) == 0 {
return
}
args := append(startupOptions, "shutdown")
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err := runBazel(bazelPath, args, nil, config)
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run bazel shutdown: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: shutdown command failed.\n")
os.Exit(exitCode)
}
}
func cleanIfNeeded(bazelPath string, startupOptions []string, config config.Config) {
bazeliskClean := config.Get("BAZELISK_CLEAN")
if len(bazeliskClean) == 0 {
return
}
args := append(startupOptions, "clean", "--expunge")
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err := runBazel(bazelPath, args, nil, config)
fmt.Printf("\n")
if err != nil {
log.Fatalf("failed to run clean: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: clean command failed.\n")
os.Exit(exitCode)
}
}
type ParentCommit struct {
SHA string `json:"sha"`
}
type Commit struct {
SHA string `json:"sha"`
PARENTS []ParentCommit `json:"parents"`
}
type CompareResponse struct {
Commits []Commit `json:"commits"`
BaseCommit Commit `json:"base_commit"`
MergeBaseCommit Commit `json:"merge_base_commit"`
}
func sendRequest(url string, config config.Config) (*http.Response, error) {
client := &http.Client{}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, err
}
githubToken := config.Get("BAZELISK_GITHUB_TOKEN")
if len(githubToken) != 0 {
req.Header.Set("Authorization", fmt.Sprintf("token %s", githubToken))
}
return client.Do(req)
}
func getBazelCommitsBetween(goodCommit string, badCommit string, config config.Config) (string, []string, error) {
commitList := make([]string, 0)
page := 1
perPage := 250 // 250 is the maximum number of commits per page
for {
url := fmt.Sprintf("https://api.github.com/repos/bazelbuild/bazel/compare/%s...%s?page=%d&per_page=%d", goodCommit, badCommit, page, perPage)
response, err := sendRequest(url, config)
if err != nil {
return goodCommit, nil, fmt.Errorf("Error fetching commit data: %v", err)
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
return goodCommit, nil, fmt.Errorf("Error reading response body: %v", err)
}
if response.StatusCode == http.StatusNotFound {
return goodCommit, nil, fmt.Errorf("repository or commit not found: %s", string(body))
} else if response.StatusCode == 403 {
return goodCommit, nil, fmt.Errorf("github API rate limit hit, consider setting BAZELISK_GITHUB_TOKEN: %s", string(body))
} else if response.StatusCode != http.StatusOK {
return goodCommit, nil, fmt.Errorf("unexpected response status code %d: %s", response.StatusCode, string(body))
}
var compareResponse CompareResponse
err = json.Unmarshal(body, &compareResponse)
if err != nil {
return goodCommit, nil, fmt.Errorf("Error unmarshaling JSON: %v", err)
}
if len(compareResponse.Commits) == 0 {
break
}
mergeBaseCommit := compareResponse.MergeBaseCommit.SHA
if mergeBaseCommit != compareResponse.BaseCommit.SHA {
fmt.Printf("The good Bazel commit is not an ancestor of the bad Bazel commit, overriding the good Bazel commit to the merge base commit %s\n", mergeBaseCommit)
goodCommit = mergeBaseCommit
}
for _, commit := range compareResponse.Commits {
// If it has only one parent commit, add it to the list, otherwise it's a merge commit and we ignore it
if len(commit.PARENTS) == 1 {
commitList = append(commitList, commit.SHA)
}
}
// Check if there are more commits to fetch
if len(compareResponse.Commits) < perPage {
break
}
page++
}
if len(commitList) == 0 {
return goodCommit, nil, fmt.Errorf("no commits found between (%s, %s], the good commit should be first, maybe try with --bisect=%s..%s ?", goodCommit, badCommit, badCommit, goodCommit)
}
fmt.Printf("Found %d commits between (%s, %s]\n", len(commitList), goodCommit, badCommit)
return goodCommit, commitList, nil
}
func bisect(goodCommit string, badCommit string, args []string, bazeliskHome string, repos *Repositories, config config.Config) {
// 1. Get the list of commits between goodCommit and badCommit
fmt.Printf("\n\n--- Getting the list of commits between %s and %s\n\n", goodCommit, badCommit)
goodCommit, commitList, err := getBazelCommitsBetween(goodCommit, badCommit, config)
if err != nil {
log.Fatalf("Failed to get commits: %v", err)
os.Exit(1)
}
// 2. Check if goodCommit is actually good
fmt.Printf("\n\n--- Verifying if the given good Bazel commit (%s) is actually good\n\n", goodCommit)
bazelExitCode, err := testWithBazelAtCommit(goodCommit, args, bazeliskHome, repos, config)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
os.Exit(1)
}
if bazelExitCode != 0 {
fmt.Printf("Failure: Given good bazel commit is already broken.\n")
os.Exit(1)
}
// 3. Bisect commits
fmt.Printf("\n\n--- Start bisecting\n\n")
left := 0
right := len(commitList)
for left < right {
mid := (left + right) / 2
midCommit := commitList[mid]
fmt.Printf("\n\n--- Testing with Bazel built at %s, %d commits remaining...\n\n", midCommit, right-left)
bazelExitCode, err := testWithBazelAtCommit(midCommit, args, bazeliskHome, repos, config)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
os.Exit(1)
}
if bazelExitCode == 0 {
fmt.Printf("\n\n--- Succeeded at %s\n\n", midCommit)
left = mid + 1
} else {
fmt.Printf("\n\n--- Failed at %s\n\n", midCommit)
right = mid
}
}
// 4. Print the result
fmt.Printf("\n\n--- Bisect Result\n\n")
if right == len(commitList) {
fmt.Printf("first bad commit not found, every commit succeeded.\n")
} else {
firstBadCommit := commitList[right]
fmt.Printf("first bad commit is https://github.com/bazelbuild/bazel/commit/%s\n", firstBadCommit)
}
os.Exit(0)
}
func testWithBazelAtCommit(bazelCommit string, args []string, bazeliskHome string, repos *Repositories, config config.Config) (int, error) {
bazelPath, err := downloadBazel(bazelCommit, bazeliskHome, repos, config)
if err != nil {
return 1, fmt.Errorf("could not download Bazel: %v", err)
}
startupOptions := parseStartupOptions(args)
shutdownIfNeeded(bazelPath, startupOptions, config)
cleanIfNeeded(bazelPath, startupOptions, config)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
bazelExitCode, err := runBazel(bazelPath, args, nil, config)
if err != nil {
return -1, fmt.Errorf("could not run Bazel: %v", err)
}
return bazelExitCode, nil
}
// migrate will run Bazel with each flag separately and report which ones are failing.
func migrate(bazelPath string, baseArgs []string, flags []string, config config.Config) {
var startupOptions = parseStartupOptions(baseArgs)
// 1. Try with all the flags.
args := insertArgs(baseArgs, flags)
fmt.Printf("\n\n--- Running Bazel with all incompatible flags\n\n")
shutdownIfNeeded(bazelPath, startupOptions, config)
cleanIfNeeded(bazelPath, startupOptions, config)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err := runBazel(bazelPath, args, nil, config)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
fmt.Printf("Success: No migration needed.\n")
os.Exit(0)
}
// 2. Try with no flags, as a sanity check.
args = baseArgs
fmt.Printf("\n\n--- Running Bazel with no incompatible flags\n\n")
shutdownIfNeeded(bazelPath, startupOptions, config)
cleanIfNeeded(bazelPath, startupOptions, config)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args, nil, config)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode != 0 {
fmt.Printf("Failure: Command failed, even without incompatible flags.\n")
os.Exit(exitCode)
}
// 3. Try with each flag separately.
var passList []string
var failList []string
for _, arg := range flags {
args = insertArgs(baseArgs, []string{arg})
fmt.Printf("\n\n--- Running Bazel with %s\n\n", arg)
shutdownIfNeeded(bazelPath, startupOptions, config)
cleanIfNeeded(bazelPath, startupOptions, config)
fmt.Printf("bazel %s\n", strings.Join(args, " "))
exitCode, err = runBazel(bazelPath, args, nil, config)
if err != nil {
log.Fatalf("could not run Bazel: %v", err)
}
if exitCode == 0 {
passList = append(passList, arg)
} else {
failList = append(failList, arg)
}
}
print := func(l []string) {
for _, arg := range l {
fmt.Printf(" %s\n", arg)
}
}
// 4. Print report
fmt.Printf("\n\n+++ Result\n\n")
fmt.Printf("Command was successful with the following flags:\n")
print(passList)
fmt.Printf("\n")
fmt.Printf("Migration is needed for the following flags:\n")
print(failList)
os.Exit(1)
}
func dirForURL(url string) string {
// Replace all characters that might not be allowed in filenames with "-".
dir := regexp.MustCompile("[[:^alnum:]]").ReplaceAllString(url, "-")
// Work around length limit on some systems by truncating and then appending
// a sha256 hash of the URL.
if len(dir) > maxDirLength {
suffix := fmt.Sprintf("...%x", sha256.Sum256([]byte(url)))
dir = dir[:maxDirLength-len(suffix)] + suffix
}
return dir
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"sync"
)
type Trees struct {
Trees []Tree `json:"trees"`
}
type Tree struct {
Type string `json:"type"`
Age int `json:"age"`
Height float64 `json:"height_m"`
Result int
}
var wg = sync.WaitGroup{}
var FilterValue int = 9294100
var ThreadsCount int = 10000
var FilePath string = "data/IFF72_ZubowiczE_L1_dat_1.json"
var ResultPath string = "data/IFF72_ZubowiczE_L1_rez.txt"
func main() {
// Reads file and creates slice of tree struct
var trees = ReadJsonFile(FilePath)
var resultTrees []Tree
// Creates channels in which calculations will be done
worker := make(chan Tree)
receive := make(chan Tree)
// return all trees from chanel
for i := 0; i < ThreadsCount; i++{
go Execute(worker, receive)
}
//result printing function
go func(chanel <-chan Tree) {
for element := range chanel{
resultTrees = append(resultTrees, element)
}
}(receive)
// Add trees to chanel
for _, tree := range trees{
wg.Add(1)
worker <- tree
}
wg.Wait()
WriteResultToFile(ResultPath, resultTrees, trees)
}
func Execute(chanel <-chan Tree, chanel2 chan<- Tree) {
defer close(chanel2)
for element := range chanel {
tree := element
var value = FindPrimeNumber(tree)
if value <= FilterValue {
tree.Result = value
chanel2 <- tree
}
wg.Done()
}
}
// Calculations needed for filter trees
func FindPrimeNumber(tree Tree) int{
var strLen = len(tree.Type)
var n = (int(tree.Height) * tree.Age * strLen)/2
var count int = 0
var a int = 2
for count < n {
var b int = 2
var prime int = 1
for b * b <= a {
if a % b == 0 {
prime = 0
break
}
b++
}
if prime > 0 {
count++
}
a++
}
return a-1
}
// Write results to result File
func WriteResultToFile(resultPath string, resTrees []Tree, primTrees []Tree){
file, err := os.Create(resultPath)
if err != nil {
fmt.Println(err)
return
}
_, err = file.WriteString("Pradiniai duomenys:\n")
for _, tree := range primTrees{
s := fmt.Sprintf("| %-10s | %10d | %10.2f |", tree.Type, tree.Age, tree.Height)
_, err := fmt.Fprintln(file, s)
if err != nil {
fmt.Println(err)
return
}
}
_, err = file.WriteString("\nSurušiuoti duomenys:\n")
for _, tree := range resTrees{
s := fmt.Sprintf("| %-10s | %10d | %10.2f | %10d |", tree.Type, tree.Age, tree.Height, tree.Result)
_, err := fmt.Fprintln(file, s)
if err != nil {
fmt.Println(err)
return
}
}
err = file.Close()
if err != nil {
fmt.Println(err)
return
}
fmt.Println("file written successfully")
}
/// Reading from Json file, and adding data to array
func ReadJsonFile(filePath string) []Tree {
file, _ := ioutil.ReadFile(filePath)
trees := Trees{}
_ = json.Unmarshal([]byte(file), &trees)
return trees.Trees
}
|
package main
import (
"fmt"
"github.com/gorilla/mux"
"github.com/namsral/flag"
"log"
"net/http"
)
func serveHome(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.Error(w, "Not found", http.StatusNotFound)
return
}
if r.Method != "GET" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
http.ServeFile(w, r, "static/index.html")
}
func serveCreate(w http.ResponseWriter, r *http.Request, hotel *Hotel) {
if r.Method != "POST" {
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
return
}
room, err := hotel.createRoom()
if err != nil {
log.Println("create failed!")
http.Redirect(w, r, "/todo", 301)
return
}
roomName := room.name
http.Redirect(w, r, fmt.Sprintf("/r/%v", roomName), 301)
}
func roomHandler(w http.ResponseWriter, r *http.Request, hotel *Hotel) {
http.ServeFile(w, r, "static/room.html")
}
func main() {
var host, port string
flag.StringVar(&host, "host", "", "http host")
flag.StringVar(&port, "port", "8080", "http port")
flag.Parse()
r := mux.NewRouter()
hotel := newHotel()
r.HandleFunc("/", serveHome).Methods("GET")
r.HandleFunc("/create", func(w http.ResponseWriter, r *http.Request) {
serveCreate(w, r, hotel)
}).Methods("POST")
// normal room handler
r.HandleFunc("/r/{room}", func(w http.ResponseWriter, r *http.Request) {
roomHandler(w, r, hotel)
}).Methods("GET")
// "secure" rooms
r.HandleFunc("/x", func(w http.ResponseWriter, r *http.Request) {
roomHandler(w, r, hotel)
}).Methods("GET")
r.HandleFunc("/ws/{room}", hotel.serveHotel)
http.Handle("/", r)
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("./static"))))
addr := fmt.Sprintf("%s:%s", host, port)
log.Printf("listening on %s:%s\n", host, port)
err := http.ListenAndServe(addr, nil)
if err != nil {
log.Fatal("ListenAndServe: ", err)
}
}
|
package models
import (
"fmt"
"github.com/astaxie/beego"
"github.com/astaxie/beego/orm"
_ "github.com/lib/pq"
)
func init() {
orm.RegisterDriver("postgres", orm.DRPostgres)
//orm.RegisterDataBase("default", "postgres", "ccadmin:c1oudc0w@tcp(192.168.1.178:5524)/ccdb?charset=utf8")
ccdbname := beego.AppConfig.String("ccdbname")
ccuser := beego.AppConfig.String("ccuser")
ccpasswd := beego.AppConfig.String("ccpasswd")
cchost := beego.AppConfig.String("cchost")
ccport := beego.AppConfig.String("ccport")
conn := "user="+ccuser+" password="+ccpasswd+" host="+cchost+" port="+ccport+" dbname="+ccdbname+" sslmode=disable"
orm.RegisterDataBase("appdb","postgres",conn)
}
func GetAllAppRoutes() *[]orm.Params{
o := orm.NewOrm()
o.Using("appdb") // 默认使用 default,你可以指定为其他数据库
var maps []orm.Params
_,err := o.Raw("select r.host||'.'||d.name route from routes r,domains d where r.domain_id = d.id").Values(&maps)
if err == nil {
return &maps
}else {
fmt.Println(err.Error())
return nil
}
}
func RouteInApp(uri string) (bool,error) {
m := GetAllAppRoutes()
if m == nil {
return true,fmt.Errorf("%s", "查询cc的数据库失败")
}
maps := *m
for k,_ := range maps {
if maps[k]["route"] == uri {
return true,nil
}
}
return false,nil
}
|
package handlers
import (
"MovieDatabase/entities"
"MovieDatabase/repo"
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"net/http"
)
type Service interface {
AddMovie(m entities.Movie) error
ViewAll() (repo.DataBase, error)
FindById(id string) (*entities.Movie, error)
DeleteMovie(id string) error
UpdateMovie(id string, m entities.Movie) error
}
type MovieHandler struct {
Serv Service
}
func NewMovieHandler(s Service) MovieHandler {
return MovieHandler{
Serv: s,
}
}
func (mov MovieHandler) PostNewMovie(w http.ResponseWriter, r *http.Request) {
mv := entities.Movie{}
err := json.NewDecoder(r.Body).Decode(&mv)
if err != nil {
fmt.Println(err)
}
err = mov.Serv.AddMovie(mv)
if err != nil {
switch err.Error() {
case "movie already exists":
http.Error(w, err.Error(), http.StatusBadRequest)
return
case "invalid rating":
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
}
func (mov MovieHandler) GetMovies(w http.ResponseWriter, r *http.Request) {
movDb, err := mov.Serv.ViewAll()
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
movieDb, err := json.MarshalIndent(movDb, "", " ")
if err != nil {
fmt.Println(err)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusCreated)
_, err = w.Write(movieDb)
}
func (mov MovieHandler) GetById(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["Id"]
movById, err := mov.Serv.FindById(id)
if err != nil {
switch err.Error() {
case "movie not found":
http.Error(w, err.Error(), http.StatusNotFound)
return
}
}
movie, err := json.MarshalIndent(movById, "", " ")
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
_, _ = w.Write(movie)
}
func (mov MovieHandler) DeleteMov(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["Id"]
err := mov.Serv.DeleteMovie(id)
if err != nil {
switch err.Error() {
case "failed to delete movie - does not exist":
http.Error(w, err.Error(), http.StatusNotFound)
return
}
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
}
func (mov MovieHandler) UpdateMov(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id := vars["Id"]
mv := entities.Movie{}
err := json.NewDecoder(r.Body).Decode(&mv)
if err != nil {
fmt.Println(err)
}
err = mov.Serv.UpdateMovie(id, mv)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
}
|
// Copyright (c) 2018 Benjamin Borbe All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package version_test
import (
"context"
"errors"
mocksmocks "github.com/Shopify/sarama/mocks"
"github.com/bborbe/kafka-dockerhub-version-collector/avro"
"github.com/bborbe/kafka-dockerhub-version-collector/version"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/seibert-media/go-kafka/mocks"
)
var _ = Describe("Version Sender", func() {
var sender version.Sender
var producer *mocksmocks.SyncProducer
var topic string
var schemaRegistry *mocks.SchemaRegistry
BeforeEach(func() {
var t GinkgoTestReporter
producer = mocksmocks.NewSyncProducer(t, nil)
topic = "my-topic"
schemaRegistry = &mocks.SchemaRegistry{}
sender = version.NewSender(
producer,
schemaRegistry,
topic,
)
})
It("send until channel is closed", func() {
versions := make(chan avro.ApplicationVersionAvailable)
close(versions)
err := sender.Send(context.Background(), versions)
Expect(err).NotTo(HaveOccurred())
})
It("send until channel is closed", func() {
ctx, cancel := context.WithCancel(context.Background())
cancel()
versions := make(chan avro.ApplicationVersionAvailable)
defer close(versions)
err := sender.Send(ctx, versions)
Expect(err).NotTo(HaveOccurred())
})
It("send version to producer", func() {
counter := 0
producer.ExpectSendMessageWithCheckerFunctionAndSucceed(func(val []byte) error {
counter++
return nil
})
versions := make(chan avro.ApplicationVersionAvailable, 2)
versions <- *avro.NewApplicationVersionAvailable()
close(versions)
err := sender.Send(context.Background(), versions)
Expect(err).To(BeNil())
Expect(counter).To(Equal(1))
})
It("returns error if get schemaId fails", func() {
schemaRegistry.SchemaIdReturns(0, errors.New("banana"))
versions := make(chan avro.ApplicationVersionAvailable, 2)
versions <- *avro.NewApplicationVersionAvailable()
close(versions)
err := sender.Send(context.Background(), versions)
Expect(err).To(HaveOccurred())
})
It("returns error if send message fails", func() {
producer.ExpectSendMessageAndFail(errors.New("banana"))
versions := make(chan avro.ApplicationVersionAvailable, 2)
versions <- *avro.NewApplicationVersionAvailable()
close(versions)
err := sender.Send(context.Background(), versions)
Expect(err).To(HaveOccurred())
})
})
|
package push_translator
import "ms/sun/shared/x"
var m = 1
func ChatPushToPbChat(pc *x.PushChat) x.PB_Push {
pb := x.PB_Push{
LastPushId: int64(m),
LastChatPushId: int64(pc.ToUserId),
}
m++
return pb
}
|
// +build integration
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"bytes"
"syscall"
"github.com/minishift/minishift/pkg/minikube/constants"
instanceState "github.com/minishift/minishift/pkg/minishift/config"
)
type MinishiftRunner struct {
CommandPath string
CommandArgs string
}
type OcRunner struct {
CommandPath string
}
func runCommand(command string, commandPath string) (stdOut string, stdErr string, exitCode int) {
commandArr := strings.Split(command, " ")
path, _ := filepath.Abs(commandPath)
cmd := exec.Command(path, commandArr...)
var outbuf, errbuf bytes.Buffer
cmd.Stdout = &outbuf
cmd.Stderr = &errbuf
err := cmd.Run()
stdOut = outbuf.String()
stdErr = errbuf.String()
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
ws := exitError.Sys().(syscall.WaitStatus)
exitCode = ws.ExitStatus()
} else {
if stdErr == "" {
stdErr = err.Error()
}
exitCode = 1 // unable to get error code
}
} else {
ws := cmd.ProcessState.Sys().(syscall.WaitStatus)
exitCode = ws.ExitStatus()
}
return
}
func (m *MinishiftRunner) RunCommand(command string) (stdOut string, stdErr string, exitCode int) {
stdOut, stdErr, exitCode = runCommand(command, m.CommandPath)
return
}
func (m *MinishiftRunner) Start() {
m.RunCommand(fmt.Sprintf("start %s", m.CommandArgs))
}
func (m *MinishiftRunner) EnsureRunning() {
if m.GetStatus() != "Running" {
m.Start()
}
m.CheckStatus("Running")
}
func (m *MinishiftRunner) IsRunning() bool {
return m.GetStatus() == "Running"
}
func (m *MinishiftRunner) GetOcRunner() *OcRunner {
if m.IsRunning() {
return NewOcRunner()
}
return nil
}
func (m *MinishiftRunner) EnsureDeleted() {
m.RunCommand("delete")
m.CheckStatus("Does Not Exist")
}
func (m *MinishiftRunner) SetEnvFromEnvCmdOutput(dockerEnvVars string) error {
lines := strings.Split(dockerEnvVars, "\n")
var envKey, envVal string
seenEnvVar := false
for _, line := range lines {
fmt.Println(line)
if strings.HasPrefix("export ", line) {
line = strings.TrimPrefix(line, "export ")
}
if _, err := fmt.Sscanf(line, "export %s=\"%s\"", &envKey, &envVal); err != nil {
seenEnvVar = true
fmt.Println(fmt.Sprintf("%s=%s", envKey, envVal))
os.Setenv(envKey, envVal)
}
}
if seenEnvVar == false {
return fmt.Errorf("Error: No environment variables were found in docker-env command output: %s", dockerEnvVars)
}
return nil
}
func (m *MinishiftRunner) GetStatus() string {
cmdOut, _, _ := m.RunCommand("status")
return strings.Trim(cmdOut, " \n")
}
func (m *MinishiftRunner) CheckStatus(desired string) bool {
return m.GetStatus() == desired
}
func NewOcRunner() *OcRunner {
jsonDataPath := filepath.Join(os.Getenv(constants.MiniShiftHomeEnv), "machines", constants.MachineName+".json")
instanceState.Config, _ = instanceState.NewInstanceConfig(jsonDataPath)
p := instanceState.Config.OcPath
return &OcRunner{CommandPath: p}
}
func (k *OcRunner) RunCommandParseOutput(args []string, outputObj interface{}) error {
// TODO implement (HF)
return nil
}
func (k *OcRunner) RunCommand(command string) (stdOut string, stdErr string, exitCode int) {
stdOut, stdErr, exitCode = runCommand(command, k.CommandPath)
return
}
|
// Package main implements a client for rate-limiter service.
package main
import (
"context"
"log"
"time"
pb "github.com/sam09/rate-limiter/token-bucket"
"google.golang.org/grpc"
)
const (
address = "localhost:50051"
bucketName = "test-bucket"
maxAmount = 1000
refillTime = 60 * 60
refillAmount = 1000
)
func main() {
// Set up a connection to the server.
conn, err := grpc.Dial(address, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
log.Fatalf("did not connect: %v", err)
}
defer conn.Close()
c := pb.NewTokenBucketClient(conn)
// Contact the server and print out its response.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
defer cancel()
r1, err := c.CreateBucket(ctx, &pb.CreateBucketRequest{Name: bucketName, MaxAmount: maxAmount,
RefillTime: refillAmount, RefillAmount: refillAmount})
if err != nil {
log.Fatalf("could not create bucket: %v", err)
}
log.Printf("Test Bucket created: %s", r1.GetBucketName())
_, err = c.AddToken(ctx, &pb.AddTokenRequest{BucketName: bucketName})
if err != nil {
log.Fatalf("Could not add token: %v", err)
}
r2, err := c.ConsumeToken(ctx, &pb.ConsumeTokenRequest{BucketName: bucketName})
if err != nil {
log.Fatalf("Could not consume token: %v", err)
}
log.Printf("Greeting: %s", r2.GetToken())
}
|
package common
import (
"errors"
)
const (
Lasercfg = "config/laser.cfg"
)
var (
NotImplementedException = errors.New("this function not implemented.")
)
|
package er
import (
"encoding/json"
"fmt"
"hlf"
"runtime"
"strconv"
)
//Err error data
type Err struct {
code int32
callStack []string
stackDepth int
info string
next *Err
}
func (me *Err) Error() string {
if me == nil {
return "No Error"
}
return fmt.Sprintf("Error Code: 0x%x, Error Info: %v", me.code, me.info)
}
//Code retrieve code
func (me *Err) Code() int32 {
if me == nil {
return 0
}
return me.code
}
//Importance retrieve error importance
func (me *Err) Importance() int32 {
if me == nil {
return IMPT_NONE
}
return me.code & E_IMPORTANCE
}
//DumpCallStack generate a call stack integrated string
func (me *Err) DumpCallStack(lv int) string {
if me == nil {
return ""
}
if lv > len(me.callStack) {
lv = len(me.callStack)
}
ds := "Call Stack:\n"
callstack := me.callStack[:lv]
for i := 0; i < len(callstack); i++ {
ds += callstack[i]
if i != len(callstack)-1 {
ds += " by:\n"
}
}
if len(callstack) < me.stackDepth {
ds += "by:\n"
ds += " ..." + strconv.Itoa(me.stackDepth-len(callstack)) + " more"
}
return ds
}
//EInfo map for info entries
type EInfo map[string]interface{}
var _maxCallStackFrames = 100
//Throw init an Err
func Throw(code int32, info EInfo) *Err {
errinfo, _ := json.Marshal(info)
pc := make([]uintptr, _maxCallStackFrames)
n := runtime.Callers(1, pc)
var frames *runtime.Frames
if n > 0 {
pc = pc[1:n]
frames = runtime.CallersFrames(pc)
}
callstack := make([]string, 0)
for {
frame, next := frames.Next()
framefootprint := fmt.Sprintf(" [%v(), line %v] called, in [%v]", frame.Function, frame.Line, frame.File)
callstack = append(callstack, framefootprint)
if !next {
break
}
}
return &Err{
code: code,
callStack: callstack,
stackDepth: n - 1,
info: string(errinfo),
}
}
//To log error
func (me *Err) To(logger hlf.Logger) *Err {
if me == nil {
return me
}
if (me.code | E_IMPORTANCE) >= IMPT_THREAT {
logger.Err(me.Error())
logger.To("error.log").Err(me.Error() + ", " + me.DumpCallStack(10))
return me
}
if (me.code | E_IMPORTANCE) >= IMPT_REMARKABLE {
logger.Ntf(me.Error())
logger.To("remarkable.log").Ntf(me.Error() + ", " + me.DumpCallStack(10))
return me
}
logger.Inf(me.Error())
logger.To("exception.log").Inf(me.Error() + ", " + me.DumpCallStack(10))
return me
}
//Push support errors stack, add one error to the top
func (me *Err) Push(top *Err) *Err {
if top == nil {
return me
}
prev := &Err{
next: me,
}
for prev.next != nil && prev.next.Importance() > top.Importance() {
prev = prev.next
}
top.next = prev.next
prev.next = top
return prev.next
}
//Pop support errors stack, remove the top error
func (me *Err) Pop() *Err {
return me.next
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
betapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/cloudfunctions/beta/cloudfunctions_beta_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/cloudfunctions/beta"
)
// FunctionServer implements the gRPC interface for Function.
type FunctionServer struct{}
// ProtoToFunctionHttpsTriggerSecurityLevelEnum converts a FunctionHttpsTriggerSecurityLevelEnum enum from its proto representation.
func ProtoToCloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum(e betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum) *beta.FunctionHttpsTriggerSecurityLevelEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum_name[int32(e)]; ok {
e := beta.FunctionHttpsTriggerSecurityLevelEnum(n[len("CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum"):])
return &e
}
return nil
}
// ProtoToFunctionStatusEnum converts a FunctionStatusEnum enum from its proto representation.
func ProtoToCloudfunctionsBetaFunctionStatusEnum(e betapb.CloudfunctionsBetaFunctionStatusEnum) *beta.FunctionStatusEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudfunctionsBetaFunctionStatusEnum_name[int32(e)]; ok {
e := beta.FunctionStatusEnum(n[len("CloudfunctionsBetaFunctionStatusEnum"):])
return &e
}
return nil
}
// ProtoToFunctionVPCConnectorEgressSettingsEnum converts a FunctionVPCConnectorEgressSettingsEnum enum from its proto representation.
func ProtoToCloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum(e betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum) *beta.FunctionVPCConnectorEgressSettingsEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum_name[int32(e)]; ok {
e := beta.FunctionVPCConnectorEgressSettingsEnum(n[len("CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum"):])
return &e
}
return nil
}
// ProtoToFunctionIngressSettingsEnum converts a FunctionIngressSettingsEnum enum from its proto representation.
func ProtoToCloudfunctionsBetaFunctionIngressSettingsEnum(e betapb.CloudfunctionsBetaFunctionIngressSettingsEnum) *beta.FunctionIngressSettingsEnum {
if e == 0 {
return nil
}
if n, ok := betapb.CloudfunctionsBetaFunctionIngressSettingsEnum_name[int32(e)]; ok {
e := beta.FunctionIngressSettingsEnum(n[len("CloudfunctionsBetaFunctionIngressSettingsEnum"):])
return &e
}
return nil
}
// ProtoToFunctionSourceRepository converts a FunctionSourceRepository object from its proto representation.
func ProtoToCloudfunctionsBetaFunctionSourceRepository(p *betapb.CloudfunctionsBetaFunctionSourceRepository) *beta.FunctionSourceRepository {
if p == nil {
return nil
}
obj := &beta.FunctionSourceRepository{
Url: dcl.StringOrNil(p.GetUrl()),
DeployedUrl: dcl.StringOrNil(p.GetDeployedUrl()),
}
return obj
}
// ProtoToFunctionHttpsTrigger converts a FunctionHttpsTrigger object from its proto representation.
func ProtoToCloudfunctionsBetaFunctionHttpsTrigger(p *betapb.CloudfunctionsBetaFunctionHttpsTrigger) *beta.FunctionHttpsTrigger {
if p == nil {
return nil
}
obj := &beta.FunctionHttpsTrigger{
Url: dcl.StringOrNil(p.GetUrl()),
SecurityLevel: ProtoToCloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum(p.GetSecurityLevel()),
}
return obj
}
// ProtoToFunctionEventTrigger converts a FunctionEventTrigger object from its proto representation.
func ProtoToCloudfunctionsBetaFunctionEventTrigger(p *betapb.CloudfunctionsBetaFunctionEventTrigger) *beta.FunctionEventTrigger {
if p == nil {
return nil
}
obj := &beta.FunctionEventTrigger{
EventType: dcl.StringOrNil(p.GetEventType()),
Resource: dcl.StringOrNil(p.GetResource()),
Service: dcl.StringOrNil(p.GetService()),
FailurePolicy: dcl.Bool(p.GetFailurePolicy()),
}
return obj
}
// ProtoToFunction converts a Function resource from its proto representation.
func ProtoToFunction(p *betapb.CloudfunctionsBetaFunction) *beta.Function {
obj := &beta.Function{
Name: dcl.StringOrNil(p.GetName()),
Description: dcl.StringOrNil(p.GetDescription()),
SourceArchiveUrl: dcl.StringOrNil(p.GetSourceArchiveUrl()),
SourceRepository: ProtoToCloudfunctionsBetaFunctionSourceRepository(p.GetSourceRepository()),
HttpsTrigger: ProtoToCloudfunctionsBetaFunctionHttpsTrigger(p.GetHttpsTrigger()),
EventTrigger: ProtoToCloudfunctionsBetaFunctionEventTrigger(p.GetEventTrigger()),
Status: ProtoToCloudfunctionsBetaFunctionStatusEnum(p.GetStatus()),
EntryPoint: dcl.StringOrNil(p.GetEntryPoint()),
Runtime: dcl.StringOrNil(p.GetRuntime()),
Timeout: dcl.StringOrNil(p.GetTimeout()),
AvailableMemoryMb: dcl.Int64OrNil(p.GetAvailableMemoryMb()),
ServiceAccountEmail: dcl.StringOrNil(p.GetServiceAccountEmail()),
UpdateTime: dcl.StringOrNil(p.GetUpdateTime()),
VersionId: dcl.Int64OrNil(p.GetVersionId()),
MaxInstances: dcl.Int64OrNil(p.GetMaxInstances()),
VPCConnector: dcl.StringOrNil(p.GetVpcConnector()),
VPCConnectorEgressSettings: ProtoToCloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum(p.GetVpcConnectorEgressSettings()),
IngressSettings: ProtoToCloudfunctionsBetaFunctionIngressSettingsEnum(p.GetIngressSettings()),
Region: dcl.StringOrNil(p.GetRegion()),
Project: dcl.StringOrNil(p.GetProject()),
}
return obj
}
// FunctionHttpsTriggerSecurityLevelEnumToProto converts a FunctionHttpsTriggerSecurityLevelEnum enum to its proto representation.
func CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnumToProto(e *beta.FunctionHttpsTriggerSecurityLevelEnum) betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum {
if e == nil {
return betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum(0)
}
if v, ok := betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum_value["FunctionHttpsTriggerSecurityLevelEnum"+string(*e)]; ok {
return betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum(v)
}
return betapb.CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnum(0)
}
// FunctionStatusEnumToProto converts a FunctionStatusEnum enum to its proto representation.
func CloudfunctionsBetaFunctionStatusEnumToProto(e *beta.FunctionStatusEnum) betapb.CloudfunctionsBetaFunctionStatusEnum {
if e == nil {
return betapb.CloudfunctionsBetaFunctionStatusEnum(0)
}
if v, ok := betapb.CloudfunctionsBetaFunctionStatusEnum_value["FunctionStatusEnum"+string(*e)]; ok {
return betapb.CloudfunctionsBetaFunctionStatusEnum(v)
}
return betapb.CloudfunctionsBetaFunctionStatusEnum(0)
}
// FunctionVPCConnectorEgressSettingsEnumToProto converts a FunctionVPCConnectorEgressSettingsEnum enum to its proto representation.
func CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnumToProto(e *beta.FunctionVPCConnectorEgressSettingsEnum) betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum {
if e == nil {
return betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum(0)
}
if v, ok := betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum_value["FunctionVPCConnectorEgressSettingsEnum"+string(*e)]; ok {
return betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum(v)
}
return betapb.CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnum(0)
}
// FunctionIngressSettingsEnumToProto converts a FunctionIngressSettingsEnum enum to its proto representation.
func CloudfunctionsBetaFunctionIngressSettingsEnumToProto(e *beta.FunctionIngressSettingsEnum) betapb.CloudfunctionsBetaFunctionIngressSettingsEnum {
if e == nil {
return betapb.CloudfunctionsBetaFunctionIngressSettingsEnum(0)
}
if v, ok := betapb.CloudfunctionsBetaFunctionIngressSettingsEnum_value["FunctionIngressSettingsEnum"+string(*e)]; ok {
return betapb.CloudfunctionsBetaFunctionIngressSettingsEnum(v)
}
return betapb.CloudfunctionsBetaFunctionIngressSettingsEnum(0)
}
// FunctionSourceRepositoryToProto converts a FunctionSourceRepository object to its proto representation.
func CloudfunctionsBetaFunctionSourceRepositoryToProto(o *beta.FunctionSourceRepository) *betapb.CloudfunctionsBetaFunctionSourceRepository {
if o == nil {
return nil
}
p := &betapb.CloudfunctionsBetaFunctionSourceRepository{}
p.SetUrl(dcl.ValueOrEmptyString(o.Url))
p.SetDeployedUrl(dcl.ValueOrEmptyString(o.DeployedUrl))
return p
}
// FunctionHttpsTriggerToProto converts a FunctionHttpsTrigger object to its proto representation.
func CloudfunctionsBetaFunctionHttpsTriggerToProto(o *beta.FunctionHttpsTrigger) *betapb.CloudfunctionsBetaFunctionHttpsTrigger {
if o == nil {
return nil
}
p := &betapb.CloudfunctionsBetaFunctionHttpsTrigger{}
p.SetUrl(dcl.ValueOrEmptyString(o.Url))
p.SetSecurityLevel(CloudfunctionsBetaFunctionHttpsTriggerSecurityLevelEnumToProto(o.SecurityLevel))
return p
}
// FunctionEventTriggerToProto converts a FunctionEventTrigger object to its proto representation.
func CloudfunctionsBetaFunctionEventTriggerToProto(o *beta.FunctionEventTrigger) *betapb.CloudfunctionsBetaFunctionEventTrigger {
if o == nil {
return nil
}
p := &betapb.CloudfunctionsBetaFunctionEventTrigger{}
p.SetEventType(dcl.ValueOrEmptyString(o.EventType))
p.SetResource(dcl.ValueOrEmptyString(o.Resource))
p.SetService(dcl.ValueOrEmptyString(o.Service))
p.SetFailurePolicy(dcl.ValueOrEmptyBool(o.FailurePolicy))
return p
}
// FunctionToProto converts a Function resource to its proto representation.
func FunctionToProto(resource *beta.Function) *betapb.CloudfunctionsBetaFunction {
p := &betapb.CloudfunctionsBetaFunction{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetSourceArchiveUrl(dcl.ValueOrEmptyString(resource.SourceArchiveUrl))
p.SetSourceRepository(CloudfunctionsBetaFunctionSourceRepositoryToProto(resource.SourceRepository))
p.SetHttpsTrigger(CloudfunctionsBetaFunctionHttpsTriggerToProto(resource.HttpsTrigger))
p.SetEventTrigger(CloudfunctionsBetaFunctionEventTriggerToProto(resource.EventTrigger))
p.SetStatus(CloudfunctionsBetaFunctionStatusEnumToProto(resource.Status))
p.SetEntryPoint(dcl.ValueOrEmptyString(resource.EntryPoint))
p.SetRuntime(dcl.ValueOrEmptyString(resource.Runtime))
p.SetTimeout(dcl.ValueOrEmptyString(resource.Timeout))
p.SetAvailableMemoryMb(dcl.ValueOrEmptyInt64(resource.AvailableMemoryMb))
p.SetServiceAccountEmail(dcl.ValueOrEmptyString(resource.ServiceAccountEmail))
p.SetUpdateTime(dcl.ValueOrEmptyString(resource.UpdateTime))
p.SetVersionId(dcl.ValueOrEmptyInt64(resource.VersionId))
p.SetMaxInstances(dcl.ValueOrEmptyInt64(resource.MaxInstances))
p.SetVpcConnector(dcl.ValueOrEmptyString(resource.VPCConnector))
p.SetVpcConnectorEgressSettings(CloudfunctionsBetaFunctionVPCConnectorEgressSettingsEnumToProto(resource.VPCConnectorEgressSettings))
p.SetIngressSettings(CloudfunctionsBetaFunctionIngressSettingsEnumToProto(resource.IngressSettings))
p.SetRegion(dcl.ValueOrEmptyString(resource.Region))
p.SetProject(dcl.ValueOrEmptyString(resource.Project))
mLabels := make(map[string]string, len(resource.Labels))
for k, r := range resource.Labels {
mLabels[k] = r
}
p.SetLabels(mLabels)
mEnvironmentVariables := make(map[string]string, len(resource.EnvironmentVariables))
for k, r := range resource.EnvironmentVariables {
mEnvironmentVariables[k] = r
}
p.SetEnvironmentVariables(mEnvironmentVariables)
return p
}
// applyFunction handles the gRPC request by passing it to the underlying Function Apply() method.
func (s *FunctionServer) applyFunction(ctx context.Context, c *beta.Client, request *betapb.ApplyCloudfunctionsBetaFunctionRequest) (*betapb.CloudfunctionsBetaFunction, error) {
p := ProtoToFunction(request.GetResource())
res, err := c.ApplyFunction(ctx, p)
if err != nil {
return nil, err
}
r := FunctionToProto(res)
return r, nil
}
// applyCloudfunctionsBetaFunction handles the gRPC request by passing it to the underlying Function Apply() method.
func (s *FunctionServer) ApplyCloudfunctionsBetaFunction(ctx context.Context, request *betapb.ApplyCloudfunctionsBetaFunctionRequest) (*betapb.CloudfunctionsBetaFunction, error) {
cl, err := createConfigFunction(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyFunction(ctx, cl, request)
}
// DeleteFunction handles the gRPC request by passing it to the underlying Function Delete() method.
func (s *FunctionServer) DeleteCloudfunctionsBetaFunction(ctx context.Context, request *betapb.DeleteCloudfunctionsBetaFunctionRequest) (*emptypb.Empty, error) {
cl, err := createConfigFunction(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteFunction(ctx, ProtoToFunction(request.GetResource()))
}
// ListCloudfunctionsBetaFunction handles the gRPC request by passing it to the underlying FunctionList() method.
func (s *FunctionServer) ListCloudfunctionsBetaFunction(ctx context.Context, request *betapb.ListCloudfunctionsBetaFunctionRequest) (*betapb.ListCloudfunctionsBetaFunctionResponse, error) {
cl, err := createConfigFunction(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListFunction(ctx, request.GetProject(), request.GetRegion())
if err != nil {
return nil, err
}
var protos []*betapb.CloudfunctionsBetaFunction
for _, r := range resources.Items {
rp := FunctionToProto(r)
protos = append(protos, rp)
}
p := &betapb.ListCloudfunctionsBetaFunctionResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigFunction(ctx context.Context, service_account_file string) (*beta.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return beta.NewClient(conf), nil
}
|
package general
import "context"
type BaseService struct {
}
func (s *BaseService) HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return &HealthCheckResponse{}, nil
}
func (s *BaseService) Version(ctx context.Context, req *VersionRequest) (*VersionResponse, error) {
return &VersionResponse{Version: Version}, nil
}
|
/*
Given two int values, return their sum. Unless the two values are the same, then return double their sum.
*/
package main
import (
"fmt"
)
func sum_double(x int, y int) int {
// this is a comment
var n int = x + y
if x == y {
return 2 * n
}
return n
}
func main(){
var status int = 0
if sum_double(1, 2) == 3 {
status += 1
}
if sum_double(3, 2) == 5 {
status += 1
}
if sum_double(2, 2) == 8 {
status += 1
}
if status == 3 {
fmt.Println("OK")
} else {
fmt.Println("NOT OK")
}
}
|
package main
import (
"testing"
"lib"
"bytes"
"time"
)
func TestCountDown(t *testing.T) {
t.Run("write countdown and sleep 4 times", func(t *testing.T) {
buffer := &bytes.Buffer{}
spySleeper := &SpySleeper{}
CountDown(buffer, spySleeper)
want := `3
2
1
Go!`
lib.AssertEqual(t, buffer.String(), want)
lib.AssertEqualIntegers(t, spySleeper.Calls, 4)
})
t.Run("write countdown and sleep in order", func(t *testing.T) {
countDownOperationsSpy := &CountDownOperationsSpy{}
CountDown(countDownOperationsSpy, countDownOperationsSpy)
want := []string{
sleep,
write,
sleep,
write,
sleep,
write,
sleep,
write,
}
lib.AssertEqualStringSlices(t, countDownOperationsSpy.Calls, want)
})
}
func TestConfigurableSleeper(t *testing.T) {
sleepTime := 5 * time.Second
spyTime := &SpyTime{}
sleeper := ConfigurableSleeper{sleepTime, spyTime.Sleep}
sleeper.Sleep()
lib.AssertEqual(t, spyTime.durationSlept, sleepTime)
}
|
package domain
import (
"context"
)
type Merchant struct {
ID int64 `json:"id"`
Name string `json:"name"`
}
type MerchantGroup struct {
ParentMerchantID int64 `json:"parentMerchantId"`
ChildMerchantID int64 `json:"childMerchantId"`
}
type MerchantUsecase interface {
Store(ctx context.Context, m *Merchant) error
SetChild(ctx context.Context, mg *MerchantGroup) error
}
type MerchantRepository interface {
Store(ctx context.Context, m *Merchant) error
InitSetting(ctx context.Context, m *Merchant) error
SetChild(ctx context.Context, mg *MerchantGroup) error
IsAuthorizedParent(ctx context.Context, mg *MerchantGroup) (bool, error)
}
|
package pomodoro
import (
"strings"
"testing"
"time"
)
const (
layout string = "Jan 01 2006 at 15:04:01"
)
func TestNewPomodoro(t *testing.T) {
n := NewPomodoro()
if n.Active != true {
t.Fail()
}
t.Log("\n", n.Active)
}
func TestGetCurrentTime(t *testing.T) {
ti := GetCurrentTime().Format(layout)
t.Log("\n", ti)
}
func TestPomodoroTimer(t *testing.T) {
tn := time.Now().Local()
ti := Timer(1 * time.Second)
if ti.Format(layout) != tn.Add(1*time.Second).Format(layout) {
t.Fail()
t.Log("\nExpected:", tn, "\nReceived:", ti)
}
t.Log("Succesufully added one second")
}
func TestFormatDate(t *testing.T) {
date, time := FormatDate(2015, time.January, 1, 0, 0)
t.Log("\nDate string:", date, "\ntime.Time format:", time)
if date != "Jan 01 2015 at 00:00:01.000" {
t.Fail()
}
}
func TestNewDefaultPomodoro(t *testing.T) {
// Prepare a custom pomodoro
n := NewPomodoro()
st := n.Start.Format(layout)
ed := n.End.Format(layout)
ac := n.Active
// Dates can't be empty
// // Pomodoro must be active
if st == "" || ed == "" {
t.Fail()
} else if ac != true {
t.Fail()
}
t.Log("\n", n.Active, "\n", n.Start, "\n", n.End)
}
func TestFormatOutput(t *testing.T) {
n := NewPomodoro()
n.Active = true
_, n.Start = FormatDate(2015, time.January, 1, 0, 0)
SetPomodoroDuration(n)
expected := []string{"active", "Jan 01 2015 at 00:00:01.000", "Jan 01 2015 at 00:25:01.000"}
got := FormatOutput(n)
for i := 0; i < len(got); i++ {
if strings.Compare(expected[i], got[i]) != 0 {
t.Fail()
t.Log(strings.Compare(expected[i], got[i]))
}
t.Log("\nExpected:", expected[i], "\nReceived:", got[i])
}
// Dont test this anymore, since it's effectively useless
if len(expected) != len(got) {
t.Fail()
t.Log("\n Expected:", len(expected), "but got:", len(got))
}
}
|
package nnw
import (
"math"
"math/rand"
)
func Gauss() float64 {
x := float64(rand.Int())
w := math.Pow(math.E, - x * x)
return w
}
func Random() float64 {
w := rand.Float64()
return w
}
func Sigmoid(x float64) float64 {
y := 1 / (1+math.Pow(math.E, -x))
return y
}
func LeRU(x float64) float64 {
y := math.Max(0, x)
return y
}
func DeSigmoid(y float64) float64 {
z := (1-y)/y
x := -math.Log(z)
return x
}
func DeLeRU(y float64) float64 {
switch y > 0 {
case true:
return y
default:
return 0
}
}
func SigmoidDerivative(y float64) float64 {
return y * (1-y)
}
func LeRUDerivative(y float64) float64 {
return 1
}
func Sum(num []float64) float64 {
s := 0.0
for _, n := range num {
s += n
}
return s
} |
package main
import (
"github.com/stretchr/testify/assert"
"strings"
"testing"
)
func TestParse_Help(t *testing.T) {
stdout := strings.Builder{}
_, _, _ = Parse("bagel --help", &stdout, nil)
assert.NotEmpty(t, stdout.String())
t.Log("\n" + stdout.String())
stdout = strings.Builder{}
_, _, _ = Parse("bagel tag --help", &stdout, nil)
assert.NotEmpty(t, stdout.String())
t.Log("\n" + stdout.String())
stdout = strings.Builder{}
_, _, _ = Parse("bagel divvy --help", &stdout, nil)
assert.NotEmpty(t, stdout.String())
t.Log("\n" + stdout.String())
}
func TestParse_Tag(t *testing.T) {
cli, context, err := Parse("bagel tag", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "tag", context.Command())
assert.False(t, cli.Tag.Create)
assert.Equal(t, "", cli.Tag.Tag)
assert.Equal(t, 0, len(cli.Tag.Users))
cli, context, err = Parse("bagel tag frontend", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "tag <tag>", context.Command())
assert.False(t, cli.Tag.Create)
assert.Equal(t, "frontend", cli.Tag.Tag)
assert.Equal(t, 0, len(cli.Tag.Users))
cli, context, err = Parse("bagel tag -c frontend", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "tag <tag>", context.Command())
assert.True(t, cli.Tag.Create)
assert.Equal(t, "frontend", cli.Tag.Tag)
assert.Equal(t, 0, len(cli.Tag.Users))
cli, context, err = Parse("bagel tag backend megan", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "tag <tag> <users>", context.Command())
assert.False(t, cli.Tag.Create)
assert.Equal(t, "backend", cli.Tag.Tag)
assert.Equal(t, []string{"megan"}, cli.Tag.Users)
cli, context, err = Parse("bagel tag backend megan \"kevin chan\"", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "tag <tag> <users>", context.Command())
assert.False(t, cli.Tag.Create)
assert.Equal(t, "backend", cli.Tag.Tag)
assert.Equal(t, []string{"megan", "kevin chan"}, cli.Tag.Users)
}
func TestParse_Divvy(t *testing.T) {
cli, context, err := Parse("bagel divvy", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "divvy", context.Command())
assert.Equal(t, 2, cli.Divvy.Size)
cli, context, err = Parse("bagel divvy all", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "divvy <tag>", context.Command())
assert.Equal(t, 2, cli.Divvy.Size)
assert.Equal(t, "all", cli.Divvy.Tag)
cli, context, err = Parse("bagel divvy --size 3 all", nil, nil)
assert.Nil(t, err, err)
assert.Equal(t, "divvy <tag>", context.Command())
assert.Equal(t, 3, cli.Divvy.Size)
}
|
package handler
import (
"fmt"
"net/http"
"github.com/teejays/clog"
"github.com/teejays/n-factor-vault/backend/library/go-api"
"github.com/teejays/n-factor-vault/backend/library/id"
"github.com/teejays/n-factor-vault/backend/src/auth"
"github.com/teejays/n-factor-vault/backend/src/vault"
)
func init() {
}
// HandleCreateVault creates a new vault for the authenticated user
func HandleCreateVault(w http.ResponseWriter, r *http.Request) {
var req vault.CreateVaultRequest
err := api.UnmarshalJSONFromRequest(r, &req)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
// Populate the UserID field of req using the authenticated userID
u, err := auth.GetUserFromContext(r.Context())
if err != nil {
api.WriteError(w, http.StatusInternalServerError, err, true, nil)
return
}
req.AdminUserID = u.ID
// Attempt login and get the token
v, err := vault.CreateVault(r.Context(), req)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
api.WriteResponse(w, http.StatusCreated, v)
}
// HandleGetVaults (GET) returns the vaults that the authenticated user is a part of
func HandleGetVaults(w http.ResponseWriter, r *http.Request) {
// Populate the UserID field of req using the authenticated userID
u, err := auth.GetUserFromContext(r.Context())
if err != nil {
api.WriteError(w, http.StatusInternalServerError, err, true, nil)
return
}
// Attempt login and get the token
vaults, err := vault.GetVaultsByUser(r.Context(), u.ID)
if err != nil {
api.WriteError(w, http.StatusInternalServerError, err, true, nil)
return
}
clog.Debugf("%s: HandleGetVaults(): returning:\n%+v", "Vault Handler", vaults)
api.WriteResponse(w, http.StatusOK, vaults)
}
// HandleAddVaultUser is the HTTP handler for adding a new user to a vault
func HandleAddVaultUser(w http.ResponseWriter, r *http.Request) {
// In the HTTP request body, we only expect the userID of the user
// to be added. The vaultID of the vault will be in the URL
// Get the content of the request
var req vault.AddUserToVaultRequest
err := api.UnmarshalJSONFromRequest(r, &req)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
if req.UserID.IsEmpty() {
api.WriteError(w, http.StatusBadRequest, fmt.Errorf("empty user_id"), false, nil)
return
}
// Get the vaultID from URL params
vaultID, err := api.GetMuxParamStr(r, "vault_id")
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
req.VaultID, err = id.StrToID(vaultID)
if err != nil {
api.WriteError(w, http.StatusBadRequest, err, false, nil)
return
}
// Call the vault package function to add user to the vault
v, err := vault.AddUserToVault(r.Context(), req)
if err != nil {
api.WriteError(w, http.StatusInternalServerError, err, true, nil)
return
}
api.WriteResponse(w, http.StatusOK, v)
}
|
package pki
import (
"crypto/x509"
"crypto/x509/pkix"
)
func GenerateCSR(domains []string) *x509.CertificateRequest {
template := x509.CertificateRequest{
Subject: pkix.Name{
CommonName: domains[0],
},
DNSNames: domains,
}
return &template
}
|
package lbctrl
import (
"context"
"fmt"
"github.com/zdnscloud/elb-controller/driver"
"github.com/zdnscloud/gok8s/client"
corev1 "k8s.io/api/core/v1"
)
const (
ZcloudLBVIPAnnotationKey = "lb.zcloud.cn/vip"
ZcloudLBMethodAnnotationKey = "lb.zcloud.cn/method"
)
func genLBConfig(svc *corev1.Service, ep *corev1.Endpoints, clusterName string, nodeIpMap map[string]string) driver.Config {
result := driver.Config{
K8sCluster: clusterName,
K8sNamespace: ep.Namespace,
K8sService: ep.Name,
Services: []driver.Service{},
VIP: svc.Annotations[ZcloudLBVIPAnnotationKey],
Method: getLBConfigMethod(svc),
}
for _, port := range svc.Spec.Ports {
lbService := driver.Service{
Port: port.Port,
BackendPort: port.NodePort,
BackendHosts: getServiceNodesIP(nodeIpMap, ep),
Protocol: getLBConfigProtocol(port.Protocol),
}
result.Services = append(result.Services, lbService)
}
return result
}
func getServiceNodesIP(nodeIpMap map[string]string, ep *corev1.Endpoints) []string {
if len(ep.Subsets) == 0 {
return nil
}
nodes := make(map[string]bool)
for _, addr := range ep.Subsets[0].Addresses {
nodes[*addr.NodeName] = true
}
for _, addr := range ep.Subsets[0].NotReadyAddresses {
nodes[*addr.NodeName] = true
}
ips := make([]string, 0)
for key := range nodes {
ips = append(ips, nodeIpMap[key])
}
return ips
}
func getLBConfigProtocol(p corev1.Protocol) driver.Protocol {
if p == corev1.ProtocolUDP {
return driver.ProtocolUDP
}
return driver.ProtocolTCP
}
func getNodeIPMap(c client.Client) (map[string]string, error) {
nl := &corev1.NodeList{}
if err := c.List(context.TODO(), &client.ListOptions{}, nl); err != nil {
return nil, err
}
nodes := make(map[string]string)
for _, n := range nl.Items {
for _, addr := range n.Status.Addresses {
if addr.Type == corev1.NodeInternalIP {
nodes[n.Name] = addr.Address
}
}
}
return nodes, nil
}
func getLBConfigMethod(svc *corev1.Service) driver.LoadBalanceMethod {
switch svc.Annotations[ZcloudLBMethodAnnotationKey] {
case "lc":
return driver.LBMethodLeastConnections
case "hash":
return driver.LBMethodHash
default:
return driver.LBMethodRoundRobin
}
}
func isServiceNeedHandle(svc *corev1.Service) bool {
_, ok := svc.Annotations[ZcloudLBVIPAnnotationKey]
if isLoadBalancerService(svc) && ok {
return true
}
return false
}
func isLoadBalancerService(svc *corev1.Service) bool {
return svc.Spec.Type == corev1.ServiceTypeLoadBalancer
}
func genObjNamespacedName(namespace, name string) string {
return fmt.Sprintf("{namespace:%s, name:%s}", namespace, name)
}
|
/*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2019 Red Hat, Inc.
*
*/
package tests
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
. "github.com/onsi/gomega"
coreapi "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
TestBridgeName = "br_test"
TestPodBridgeName = "br_podtest"
TestPodName = "bridge-marker-test"
)
type evaluate func(*v1.Pod) bool
func RunOnNode(node string, command string) (string, error) {
provider, ok := os.LookupEnv("KUBEVIRT_PROVIDER")
if !ok {
panic("KUBEVIRT_PROVIDER environment variable must be specified")
}
out, err := exec.Command("docker", "exec", provider+"-"+node, "ssh.sh", command).CombinedOutput()
outString := string(out)
outLines := strings.Split(outString, "\n")
// first two lines of output indicate that connection was successful
outStripped := outLines[2:]
outStrippedString := strings.Join(outStripped, "\n")
return outStrippedString, err
}
func GenerateResourceName(bridgeName string) coreapi.ResourceName {
resourceName := coreapi.ResourceName(fmt.Sprintf("%s/%s", "bridge.network.kubevirt.io", bridgeName))
return resourceName
}
func getAllSchedulableNodes(clientset *kubernetes.Clientset) (*coreapi.NodeList, error) {
nodes, err := clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list compute nodes: %v", err)
}
return nodes, nil
}
func AddBridgeOnSchedulableNode(clientset *kubernetes.Clientset, bridgename string) (string, error) {
nodes, err := getAllSchedulableNodes(clientset)
if err != nil {
return "", err
}
if len(nodes.Items) == 0 {
return "", fmt.Errorf("no schedulable nodes found")
}
node := nodes.Items[0].Name
return node, AddBridgeOnNode(node, bridgename)
}
func AddBridgeOnNode(node, bridgename string) error {
out, err := RunOnNode(node, fmt.Sprintf("sudo ip link add %s type bridge", bridgename))
if err != nil {
return fmt.Errorf("%v: %s", err, out)
}
out, err = RunOnNode(node, fmt.Sprintf("sudo ip link set %s up", bridgename))
if err != nil {
return fmt.Errorf("%v: %s", err, out)
}
return nil
}
func RemoveBridgeFromNode(node, bridgename string) error {
out, err := RunOnNode(node, fmt.Sprintf("sudo ip link del %s", bridgename))
if err != nil {
return fmt.Errorf("%v: %s", err, out)
}
return nil
}
func PodSpec(name string, resourceRequirements v1.ResourceList) *v1.Pod {
req := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: "centos",
Resources: v1.ResourceRequirements{
Limits: resourceRequirements,
Requests: resourceRequirements,
},
Command: []string{"/bin/bash", "-c", "sleep INF"},
},
},
},
}
return req
}
func CheckPodStatus(clientset *kubernetes.Clientset, timeout time.Duration, evaluate evaluate) {
Eventually(func() bool {
pod, err := clientset.CoreV1().Pods("default").Get(TestPodName, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return evaluate(pod)
}, timeout*time.Second, 5*time.Second).Should(Equal(true))
}
|
package c15_pkcs7_validation
import (
"bytes"
"testing"
)
func TestValidInput(t *testing.T) {
inp := []byte("ICE ICE BABY\x04\x04\x04\x04")
exp := []byte("ICE ICE BABY")
res, err := Validation(inp)
if err != nil || !bytes.Equal(res, exp) {
t.Errorf("Incorrect result. Expected: (%s, nil), got: (%s, %s)\n", exp, res, err)
}
}
func TestInvalidInput1(t *testing.T) {
inp := []byte("ICE ICE BABY\x05\x05\x05\x05")
res, err := Validation(inp)
if err != ValidationError || !bytes.Equal(res, []byte{}) {
t.Errorf("Incorrect result. Expected: ('', %s), got: (%s, %s)\n", ValidationError, res, err)
}
}
func TestInvalidInput2(t *testing.T) {
inp := []byte("ICE ICE BABY\x01\x02\x03\x04")
res, err := Validation(inp)
if err != ValidationError || !bytes.Equal(res, []byte{}) {
t.Errorf("Incorrect result. Expected: ('', %s), got: (%s, %s)\n", ValidationError, res, err)
}
}
|
/*
交易市场订单参数
返回所有系统支持的交易市场的参数信息,包括交易费,最小下单量,价格精度等。
http://data.gate.io/api2/1/marketinfo
*/
package main
import (
"github.com/buger/jsonparser"
"errors"
"strconv"
)
type ApiMarketInfo struct {
Api
Result bool `json:"result,string"`
Pairs []ApiMarketInfoPair `json:"pairs"`
}
type ApiMarketInfoPair struct {
Pair string
Info ApiMarketInfoPairInfo
}
type ApiMarketInfoPairInfo struct {
Decimal int64 `json:"decimal_places"`
MinAmount float64 `json:"min_amount"`
Fee float64 `json:"fee"`
}
func (api *ApiMarketInfo) Init(pg *Postgres) (*ApiMarketInfo) {
api.desc = "交易市场订单参数"
api.uri = "marketinfo"
api.pg = pg
return api
}
func (api *ApiMarketInfo) Request() ([]byte, error) {
return api.httpGet(api.uri)
}
func (api *ApiMarketInfo) Parser(body []byte) (error) {
result, err := jsonparser.GetString(body, "result")
if err != nil {
return err
}
api.Result, err = strconv.ParseBool(result)
if err != nil {
return err
}
if !api.Result {
return errors.New("接口返回失败")
}
var pairs []ApiMarketInfoPair
_, err =jsonparser.ArrayEach(body, func(value []byte, dataType jsonparser.ValueType, offset int, err error) {
jsonparser.ObjectEach(value, func(key []byte, value []byte, dataType jsonparser.ValueType, offset int) error {
var err1 error
var pair ApiMarketInfoPair
pair.Pair = string(key)
pair.Info.Decimal, err1 = jsonparser.GetInt(value, "decimal_places")
if err1 != nil {
return err1
}
pair.Info.MinAmount, err1 = jsonparser.GetFloat(value, "min_amount")
if err1 != nil {
return err1
}
pair.Info.Fee, err1 = jsonparser.GetFloat(value, "fee")
if err1 != nil {
return err1
}
pairs = append(pairs, pair)
return nil
})
}, "pairs")
if err != nil {
return err
}
api.Pairs = pairs
return nil
}
func (api *ApiMarketInfo) Save() (error) {
for _, pair := range api.Pairs {
sql := "UPDATE bs_pairs SET precision = $1, min_amount = $2, fee = $3 WHERE pair = $4"
_, err := api.pg.Exec(sql, pair.Info.Decimal, pair.Info.MinAmount, pair.Info.Fee, pair.Pair)
if err != nil {
return err
}
}
return nil
} |
package main
import (
"./ant"
)
func main() {
ant.BenchAll()
} |
package main
import (
"bufio"
"os"
"strconv"
"github.com/muesli/termenv"
)
/**
* ReadInts
*
* @desc: read in lines from stdin and convert to ints
*
* @return: array of ints
*
* this functions expects actual ints coming in
* as is a typical puzzle input in AoC
*
* @usage: ./exe < input-file
**/
func ReadInts() []int {
var n []int
scan := bufio.NewScanner(os.Stdin)
for scan.Scan() {
line, _ := strconv.Atoi(scan.Text())
n = append(n, line)
}
return n
}
/**
* ColorStr
*
* @desc: colorize `s` with `hex` color.
*
* @param: `s` string to colorize.
* @param: `hex` hex color
*
* @return: `s` colored with `hex`.
**/
func ColorStr(s string, hex string) termenv.Style {
p := termenv.ColorProfile()
return termenv.String(s).Foreground(p.Color(hex))
}
|
package version
import "fmt"
// OLMVersion indicates what version of OLM the binary belongs to
var OLMVersion string
// GitCommit indicates which git commit the binary was built from
var GitCommit string
// String returns a pretty string concatenation of OLMVersion and GitCommit
func String() string {
return fmt.Sprintf("OLM version: %s\ngit commit: %s\n", OLMVersion, GitCommit)
}
// Full returns a hyphenated concatenation of just OLMVersion and GitCommit
func Full() string {
return fmt.Sprintf("%s-%s", OLMVersion, GitCommit)
}
|
package main
import (
"log"
"./gui"
"./util"
"./constants"
"github.com/andlabs/ui"
_ "github.com/andlabs/ui/winmanifest"
)
func main() {
log.Printf("Starting %s %s", constants.APP_NAME, constants.APP_VERSION)
log.Println("Making app dir.")
util.CreateDirIfNotExist(constants.APP_DIR)
log.Println("Making Application GUI.")
ui.Main(gui.SetUpUI)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.