text stringlengths 11 4.05M |
|---|
package routes
import (
"encoding/json"
"net/http"
"strconv"
"github.com/cjburchell/go-uatu"
"github.com/cjburchell/reefstatus-common/communication"
"github.com/cjburchell/reefstatus-commands/settings"
"github.com/gorilla/mux"
)
var session communication.Session
// SetupCommandRoute setup the route
func SetupCommandRoute(r *mux.Router, s communication.Session) {
session = s
commandRoute := r.PathPrefix("/command").Subrouter()
commandRoute.Use(tokenMiddleware)
commandRoute.HandleFunc("/feedpause", handleFeedPasue).Methods("POST")
commandRoute.HandleFunc("/thunderstorm", handleThunderstorm).Methods("POST")
commandRoute.HandleFunc("/resetReminder/{Index}", handleResetReminder).Methods("POST")
commandRoute.HandleFunc("/maintenance/{Index}", handleMaintenance).Methods("POST")
commandRoute.HandleFunc("/clearlevelalarm/{ID}", handleClearLevelAlarm).Methods("POST")
commandRoute.HandleFunc("/startwaterchange/{ID}", handleStartWaterChange).Methods("POST")
}
func tokenMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(response http.ResponseWriter, request *http.Request) {
auth := request.Header.Get("Authorization")
if auth != "APIKEY "+settings.DataServiceToken {
response.WriteHeader(http.StatusUnauthorized)
return
}
next.ServeHTTP(response, request)
})
}
func handleFeedPasue(w http.ResponseWriter, r *http.Request) {
log.Printf("handleFeedPasue %s", r.URL.String())
var body []byte
r.Body.Read(body)
var enable bool
json.Unmarshal(body, &enable)
communication.FeedPause(session, enable)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
func handleThunderstorm(w http.ResponseWriter, r *http.Request) {
log.Printf("handleThunderstorm %s", r.URL.String())
var body []byte
r.Body.Read(body)
var duration int
json.Unmarshal(body, &duration)
communication.Thunderstorm(session, duration)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
func handleResetReminder(w http.ResponseWriter, r *http.Request) {
log.Printf("handleResetReminder %s", r.URL.String())
vars := mux.Vars(r)
index, _ := strconv.Atoi(vars["Index"])
communication.ResetReminder(session, index)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
func handleMaintenance(w http.ResponseWriter, r *http.Request) {
log.Printf("handleMaintenance %s", r.URL.String())
vars := mux.Vars(r)
index, _ := strconv.Atoi(vars["Index"])
var body []byte
r.Body.Read(body)
var enable bool
json.Unmarshal(body, &enable)
communication.Maintenance(session, index, enable)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
func handleClearLevelAlarm(w http.ResponseWriter, r *http.Request) {
log.Printf("handleClearLevelAlarm %s", r.URL.String())
vars := mux.Vars(r)
id, _ := vars["ID"]
communication.ClearLevelAlarm(session, id)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
func handleStartWaterChange(w http.ResponseWriter, r *http.Request) {
log.Printf("handleThunderstorm %s", r.URL.String())
vars := mux.Vars(r)
id, _ := vars["ID"]
communication.WaterChange(session, id)
reply, _ := json.Marshal(true)
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
w.Write(reply)
}
|
package main
import (
"bufio"
"fmt"
"log"
"net"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"pixivic/pixiv"
"pixivic/pixiv/strategy"
"golang.org/x/net/proxy"
)
func main() {
log.SetFlags(log.Ldate | log.Ltime | log.Lshortfile)
countdown := sync.WaitGroup{}
done := make(chan bool)
memo := make(map[string]bool)
dialer, _ := proxy.SOCKS5("tcp", "127.0.0.1:7890",
nil, &net.Dialer {
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,})
trans := &http.Transport{
Dial: dialer.Dial,
}
client := &http.Client{
Transport: trans,
Timeout: time.Second * 30, //超时时间
}
// 获取Cookie
cookie := getCookie()
p := &pixiv.Pixiv{
GoroutinePool: make(chan struct{}, 50), // 设置线程数量
PicChan: make(chan *pixiv.PicDetail, 200), // 存储图片id的通道
RequestPool: make(chan struct{}, 50), // 通过DoRequest方法限制请求并发度
Client: client, // http请求代理客户端
Cookie: cookie,
CountDown: &countdown, // 控制程序平稳结束的栅栏
Memo: memo, // 缓存,防止下载重复图片
Done: done, // 如果主动停止程序,依靠Done通知其他协程结束任务
CrawlStrategy: strategy.KeywordStrategy,
Mutex: &sync.Mutex{},
}
// 加载缓存,防止下载之前的重复图片
getOldImg(memo)
fmt.Println("具体操作详见博客: https://www.vergessen.top/article/v/9942142761049736")
fmt.Println("默认输入关键字爬取关键字对应的收藏数大于1000的图片")
input := bufio.NewScanner(os.Stdin)
var inputCtx string
if input.Scan() {
inputCtx = strings.ToLower(input.Text())
}
if initPixiv(p, inputCtx) {
// 设置输入任意字符退出,如回车
go func() {
for {
if input.Scan() {
scan := strings.ToLower(input.Text())
if scan == "q" {
fmt.Println("停止进程中, 程序将在执行完已提交任务后退出...")
done <- true
p.PicChan <- &pixiv.PicDetail{}
break
}
}
}
}()
// 开启根据关键词下载策略
p.GetUrls()
// 开启图片下载任务
p.CrawUrl()
// 等待已经启动的任务结束
countdown.Wait()
} else {
fmt.Println("输入参数有误!")
}
fmt.Println()
fmt.Println("进程已停止, 按回车退出程序...")
fmt.Println()
input.Scan()
}
func initPixiv(p *pixiv.Pixiv, inputCtx string) bool {
inputCtx = strings.Trim(strings.ToLower(inputCtx), " ")
keywords := strings.Split(inputCtx, " ")
if len(keywords) == 0 {
return false
}
if keywords[0] == "all" {
keywords[0] = ""
}
p.KeyWord = keywords[0]
p.Bookmarks = 1000
p.PicType = "wh"
for _, keyword := range keywords[1:] {
switch keyword[:2] {
case "-b":
bookmarks, err := strconv.Atoi(keyword[2:])
if err != nil {
return false
}
p.Bookmarks = bookmarks
case "-t":
p.PicType = keyword[2:]
case "-s":
switch keyword[2:] {
case "keyword":
p.CrawlStrategy = strategy.KeywordStrategy
fmt.Println("即将根据搜索关键字爬取图片")
case "related":
p.CrawlStrategy = strategy.PicIdStrategy
fmt.Println("即将根据图片ID爬取相关图片")
case "author":
if _, err := strconv.Atoi(keywords[0]); err != nil {
return false
}
p.CrawlStrategy = strategy.AuthorStrategy
fmt.Println("即将根据作者ID爬取该作者的所有图片")
default:
p.CrawlStrategy = strategy.KeywordStrategy
fmt.Println("即将根据搜索关键字爬取图片")
}
}
}
return true
}
// 获取之前下载的缓存的函数, images/memos 缓存了曾经所有下载过的图片的id,以空格分隔
func getOldImg(memo map[string]bool) {
os.MkdirAll("images",0644)
memoFile, _ := os.OpenFile("images/memos",
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
reader := bufio.NewReader(memoFile)
for {
s, e := reader.ReadString(byte(' '))
if e != nil {
break
}
memo[strings.Split(s," ")[0]] = true
}
memoFile.Close()
}
func getCookie() string {
cookieFile, _ := os.OpenFile("cookie.txt",
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0644)
reader := bufio.NewReader(cookieFile)
line, _, _ := reader.ReadLine()
cookie := fmt.Sprintf("%s", line)
return cookie[:len(cookie)-1]
}
|
package handlers
import (
"github.com/KyleWS/blog-api/api-server/models"
"github.com/KyleWS/blog-api/api-server/sessions"
)
type ReqCtx struct {
PostStore *models.MongoStore
SessionStore *sessions.MemStore
}
|
package scsprotov1
import (
"github.com/abiosoft/semaphore"
"github.com/eclipse/paho.mqtt.golang"
"github.com/op/go-logging"
)
func NewSeismoCloudProtocolV1(maxconcurrent int, mqttc mqtt.Client, log *logging.Logger, callbacks V1Callbacks) SeismoCloudProtocolV1 {
ret := &scsv1{
callbacks,
log,
mqttc,
semaphore.New(maxconcurrent),
}
mqttc.Publish("server", 2, true, []byte{}).Wait()
mqttc.Subscribe("server", 2, func(client mqtt.Client, message mqtt.Message) {
if len(message.Payload()) == 0 {
return
}
if len(message.Payload()) < 3 {
log.Warning("Message too short")
return
}
cmd, deviceid, payload := decodeMessage(message.Payload())
ret.concurrentRoutinesPool.Acquire()
ret.handleMessage(cmd, deviceid, payload)
ret.concurrentRoutinesPool.Release()
})
return ret
}
|
package main
import (
"fmt"
"github.com/spf13/viper"
)
func main() {
viper.SetConfigName("config")
viper.SetConfigType("ini")
viper.AddConfigPath(".")
if err := viper.ReadInConfig(); err != nil {
panic(err)
}
fmt.Println(viper.Get("app.global1")) // someValue
fmt.Println(viper.Get("emails.general")) // general@gmail.com
fmt.Println(viper.Get("employees.manager")) // dave
}
|
// Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package executor
import (
"context"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/config"
"github.com/pingcap/tidb/executor/internal/exec"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/tidb-binlog/node"
"github.com/pingcap/tidb/util/chunk"
)
// ChangeExec represents a change executor.
type ChangeExec struct {
exec.BaseExecutor
*ast.ChangeStmt
}
// Next implements the Executor Next interface.
func (e *ChangeExec) Next(ctx context.Context, _ *chunk.Chunk) error {
kind := strings.ToLower(e.NodeType)
urls := config.GetGlobalConfig().Path
registry, needToClose, err := getOrCreateBinlogRegistry(urls)
if err != nil {
return err
}
if needToClose {
defer func() {
_ = registry.Close()
}()
}
nodes, _, err := registry.Nodes(ctx, node.NodePrefix[kind])
if err != nil {
return err
}
state := e.State
nodeID := e.NodeID
for _, n := range nodes {
if n.NodeID != nodeID {
continue
}
switch state {
case node.Online, node.Pausing, node.Paused, node.Closing, node.Offline:
n.State = state
return registry.UpdateNode(ctx, node.NodePrefix[kind], n)
default:
return errors.Errorf("state %s is illegal", state)
}
}
return errors.NotFoundf("node %s, id %s from etcd %s", kind, nodeID, urls)
}
|
package main
import (
"flag"
"log"
)
var name string
func init() {
flag.StringVar(&name, "name", "Kean", "your wonderful name")
}
var age = flag.Int("age", 0, "your graceful age")
func main() {
flag.Parse()
log.Printf("Hello %s (%d years), Welcome to the command line world", name, *age)
} |
package main
import "fmt"
type person struct {
fname string
lname string
age int
}
type secretAgent struct {
person
hasLicenceGun bool
}
type employee struct {
Id int
name string
}
type human interface {
speak()
}
func (s secretAgent) speak() {
fmt.Println("I am ", s.fname, s.lname, " - the person speak")
}
func (p person) speak() {
fmt.Println("I am", p.fname, p.lname, "- the person speak")
}
func bar(h human) {
h.speak()
}
func main() {
sa1 := secretAgent{
person: person{
fname: "James",
lname: "Bond",
age: 32,
},
hasLicenceGun: true,
}
fmt.Println(sa1)
p1 := person{
fname: "Joker",
lname: "",
age: 35,
}
/*e := employee{
Id: 1234,
name: "Venkat",
}*/
fmt.Println(p1)
bar(p1)
bar(sa1)
//bar(e)
}
|
// Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kv
import (
"testing"
"github.com/pingcap/tipb/go-tipb"
"github.com/stretchr/testify/assert"
)
func TestVersion(t *testing.T) {
le := NewVersion(42).Cmp(NewVersion(43))
gt := NewVersion(42).Cmp(NewVersion(41))
eq := NewVersion(42).Cmp(NewVersion(42))
assert.True(t, le < 0)
assert.True(t, gt > 0)
assert.True(t, eq == 0)
assert.True(t, MinVersion.Cmp(MaxVersion) < 0)
}
func TestMppVersion(t *testing.T) {
assert.Equal(t, int64(2), GetNewestMppVersion().ToInt64())
{
v, ok := ToMppVersion("unspecified")
assert.True(t, ok)
assert.Equal(t, v, MppVersionUnspecified)
}
{
v, ok := ToMppVersion("-1")
assert.True(t, ok)
assert.Equal(t, v, MppVersionUnspecified)
}
{
v, ok := ToMppVersion("0")
assert.True(t, ok)
assert.Equal(t, v, MppVersionV0)
}
{
v, ok := ToMppVersion("1")
assert.True(t, ok)
assert.Equal(t, v, MppVersionV1)
}
{
v, ok := ToMppVersion("2")
assert.True(t, ok)
assert.Equal(t, v, MppVersionV2)
}
}
func TestExchangeCompressionMode(t *testing.T) {
assert.Equal(t, "UNSPECIFIED", ExchangeCompressionModeUnspecified.Name())
{
a, ok := ToExchangeCompressionMode("UNSPECIFIED")
assert.Equal(t, a, ExchangeCompressionModeUnspecified)
assert.True(t, ok)
}
assert.Equal(t, "NONE", ExchangeCompressionModeNONE.Name())
{
a, ok := ToExchangeCompressionMode("NONE")
assert.Equal(t, a, ExchangeCompressionModeNONE)
assert.True(t, ok)
}
assert.Equal(t, "FAST", ExchangeCompressionModeFast.Name())
{
a, ok := ToExchangeCompressionMode("FAST")
assert.Equal(t, a, ExchangeCompressionModeFast)
assert.True(t, ok)
}
assert.Equal(t, "HIGH_COMPRESSION", ExchangeCompressionModeHC.Name())
{
a, ok := ToExchangeCompressionMode("HIGH_COMPRESSION")
assert.Equal(t, a, ExchangeCompressionModeHC)
assert.True(t, ok)
}
// default `FAST`
assert.Equal(t, ExchangeCompressionModeFast, RecommendedExchangeCompressionMode)
assert.Equal(t, tipb.CompressionMode_FAST, RecommendedExchangeCompressionMode.ToTipbCompressionMode())
}
|
package jwt
import (
"MI/models"
"MI/models/req"
"MI/pkg/cache"
"MI/pkg/logger"
"MI/pkg/setting"
"context"
"github.com/dgrijalva/jwt-go"
"github.com/go-redis/redis/v8"
"time"
)
type Claims struct{
Id uint `json:"id"`
NikeName string `json:"nike_name"`
RealName string `json:"real_name"`
Mobile string `json:"mobile"`
jwt.StandardClaims
}
var JwtKey = []byte(setting.JwtConf.Key)
//生成令牌
func GenerateToken(user models.Users,expTime time.Time)(string,error){
tokenClaim := jwt.NewWithClaims(jwt.SigningMethodHS256,Claims{
Id:user.Id,
NikeName: user.NikeName,
RealName: user.RealName,
Mobile: user.Mobile,
StandardClaims:jwt.StandardClaims{
ExpiresAt: expTime.Unix(),
Subject: "go-mi",
},
})
return tokenClaim.SignedString(JwtKey)
}
//解析令牌
func ParseToken(token string)(*Claims,error){
tokenClaims, err := jwt.ParseWithClaims(token, &Claims{}, func(token *jwt.Token) (interface{}, error) {
return JwtKey, nil
})
if tokenClaims != nil {
if claims, ok := tokenClaims.Claims.(*Claims); ok && tokenClaims.Valid {
return claims, nil
}
}
logger.Logger.Error("解析jwt出错 : ", err)
return nil, err
}
//加入黑名单
func AddBlack(key,value string) error{
key = "black-token"+key+value
return cache.Set(context.Background(), key, value, 3600*24*time.Second)
}
//检查是否存在
func IsBlackExist(key,token string)bool {
key = "black-token"+key+token
val, err := cache.Get(context.Background(), key)
if err == redis.Nil && val != token {
return false
}
return true
}
type EmailClaims struct{
UserID int `json:"user_id"`
Email string `json:"email"`
OperationType int `json:"operation_type"`
jwt.StandardClaims
}
func GenerateEmailToken(req req.EmailReq) (string, error) {
nowTime := time.Now()
expireTime := nowTime.Add(15 * time.Minute)
claims := EmailClaims{
req.UserID,
req.Email,
req.OperationType,
jwt.StandardClaims{
ExpiresAt: expireTime.Unix(),
Issuer: "go-mi",
},
}
tokenClaims := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
token, err := tokenClaims.SignedString(JwtKey)
return token, err
}
func ParseEmailToken(token string) (*EmailClaims, error) {
tokenClaims, err := jwt.ParseWithClaims(token, &EmailClaims{}, func(token *jwt.Token) (interface{}, error) {
return JwtKey, nil
})
if tokenClaims != nil {
if claims, ok := tokenClaims.Claims.(*EmailClaims); ok && tokenClaims.Valid {
return claims, nil
}
}
logger.Logger.Error("解析jwt出错 : ", err)
return nil, err
} |
package logging
import (
"fmt"
"github.com/apache/arrow/go/v8/arrow"
"github.com/apache/arrow/go/v8/arrow/array"
"github.com/apache/arrow/go/v8/arrow/memory"
"github.com/feast-dev/feast/go/protos/feast/types"
gotypes "github.com/feast-dev/feast/go/types"
)
type MemoryBuffer struct {
logs []*Log
schema *FeatureServiceSchema
arrowSchema *arrow.Schema
records []arrow.Record
}
const (
LOG_TIMESTAMP_FIELD = "__log_timestamp"
LOG_DATE_FIELD = "__log_date"
LOG_REQUEST_ID_FIELD = "__request_id"
RECORD_SIZE = 1000
)
func NewMemoryBuffer(schema *FeatureServiceSchema) (*MemoryBuffer, error) {
arrowSchema, err := getArrowSchema(schema)
if err != nil {
return nil, err
}
return &MemoryBuffer{
logs: make([]*Log, 0),
records: make([]arrow.Record, 0),
schema: schema,
arrowSchema: arrowSchema,
}, nil
}
// Acquires the logging schema from the feature service, converts the memory buffer array of rows of logs and flushes
// them to the offline storage.
func (b *MemoryBuffer) writeBatch(sink LogSink) error {
if len(b.logs) > 0 {
err := b.Compact()
if err != nil {
return err
}
}
if len(b.records) == 0 {
return nil
}
err := sink.Write(b.records)
if err != nil {
return err
}
b.records = b.records[:0]
return nil
}
func (b *MemoryBuffer) Append(log *Log) error {
b.logs = append(b.logs, log)
if len(b.logs) == RECORD_SIZE {
return b.Compact()
}
return nil
}
func (b *MemoryBuffer) Compact() error {
rec, err := b.convertToArrowRecord()
if err != nil {
return err
}
b.records = append(b.records, rec)
b.logs = b.logs[:0]
return nil
}
func getArrowSchema(schema *FeatureServiceSchema) (*arrow.Schema, error) {
fields := make([]arrow.Field, 0)
for _, joinKey := range schema.JoinKeys {
arrowType, err := gotypes.ValueTypeEnumToArrowType(schema.JoinKeysTypes[joinKey])
if err != nil {
return nil, err
}
fields = append(fields, arrow.Field{Name: joinKey, Type: arrowType})
}
for _, requestParam := range schema.RequestData {
arrowType, err := gotypes.ValueTypeEnumToArrowType(schema.RequestDataTypes[requestParam])
if err != nil {
return nil, err
}
fields = append(fields, arrow.Field{Name: requestParam, Type: arrowType})
}
for _, featureName := range schema.Features {
arrowType, err := gotypes.ValueTypeEnumToArrowType(schema.FeaturesTypes[featureName])
if err != nil {
return nil, err
}
fields = append(fields, arrow.Field{Name: featureName, Type: arrowType})
fields = append(fields, arrow.Field{
Name: fmt.Sprintf("%s__timestamp", featureName),
Type: arrow.FixedWidthTypes.Timestamp_s})
fields = append(fields, arrow.Field{
Name: fmt.Sprintf("%s__status", featureName),
Type: arrow.PrimitiveTypes.Int32})
}
fields = append(fields, arrow.Field{Name: LOG_TIMESTAMP_FIELD, Type: arrow.FixedWidthTypes.Timestamp_us})
fields = append(fields, arrow.Field{Name: LOG_DATE_FIELD, Type: arrow.FixedWidthTypes.Date32})
fields = append(fields, arrow.Field{Name: LOG_REQUEST_ID_FIELD, Type: arrow.BinaryTypes.String})
return arrow.NewSchema(fields, nil), nil
}
// convertToArrowRecord Takes memory buffer of logs in array row and converts them to columnar with generated fcoschema generated by GetFcoSchema
// and writes them to arrow table.
// Returns arrow table that contains all of the logs in columnar format.
func (b *MemoryBuffer) convertToArrowRecord() (arrow.Record, error) {
arrowMemory := memory.NewCgoArrowAllocator()
numRows := len(b.logs)
columns := make(map[string][]*types.Value)
fieldNameToIdx := make(map[string]int)
for idx, field := range b.arrowSchema.Fields() {
fieldNameToIdx[field.Name] = idx
}
builder := array.NewRecordBuilder(arrowMemory, b.arrowSchema)
defer builder.Release()
builder.Reserve(numRows)
for rowIdx, logRow := range b.logs {
for colIdx, joinKey := range b.schema.JoinKeys {
if _, ok := columns[joinKey]; !ok {
columns[joinKey] = make([]*types.Value, numRows)
}
columns[joinKey][rowIdx] = logRow.EntityValue[colIdx]
}
for colIdx, requestParam := range b.schema.RequestData {
if _, ok := columns[requestParam]; !ok {
columns[requestParam] = make([]*types.Value, numRows)
}
columns[requestParam][rowIdx] = logRow.RequestData[colIdx]
}
for colIdx, featureName := range b.schema.Features {
if _, ok := columns[featureName]; !ok {
columns[featureName] = make([]*types.Value, numRows)
}
columns[featureName][rowIdx] = logRow.FeatureValues[colIdx]
timestamp := arrow.Timestamp(logRow.EventTimestamps[colIdx].GetSeconds())
timestampFieldIdx := fieldNameToIdx[fmt.Sprintf("%s__timestamp", featureName)]
statusFieldIdx := fieldNameToIdx[fmt.Sprintf("%s__status", featureName)]
builder.Field(timestampFieldIdx).(*array.TimestampBuilder).UnsafeAppend(timestamp)
builder.Field(statusFieldIdx).(*array.Int32Builder).UnsafeAppend(int32(logRow.FeatureStatuses[colIdx]))
}
logTimestamp := arrow.Timestamp(logRow.LogTimestamp.UnixMicro())
logDate := arrow.Date32FromTime(logRow.LogTimestamp)
builder.Field(fieldNameToIdx[LOG_TIMESTAMP_FIELD]).(*array.TimestampBuilder).UnsafeAppend(logTimestamp)
builder.Field(fieldNameToIdx[LOG_DATE_FIELD]).(*array.Date32Builder).UnsafeAppend(logDate)
builder.Field(fieldNameToIdx[LOG_REQUEST_ID_FIELD]).(*array.StringBuilder).Append(logRow.RequestId)
}
for columnName, protoArray := range columns {
fieldIdx := fieldNameToIdx[columnName]
err := gotypes.CopyProtoValuesToArrowArray(builder.Field(fieldIdx), protoArray)
if err != nil {
return nil, err
}
}
return builder.NewRecord(), nil
}
|
// 就是一个进位加法,要学会如何组织代码
package main
import (
"fmt"
)
type ListNode struct {
Val int
Next *ListNode
}
func addTwoNumber(l1 *ListNode, l2 *ListNode) *ListNode {
sum, carry := 0, 0
head := &ListNode{}
cur := head
for {
sum, carry = add(l1, l2, carry)
cur.Val = sum
l1 = next(l1)
l2 = next(l2)
if l1 == nil && l2 == nil {
break
}
cur.Next = &ListNode{}
cur = cur.Next
}
if carry == 1 {
cur.Next = &ListNode{Val: carry}
}
return head
}
func next(l *ListNode) *ListNode {
if l != nil {
return l.Next
}
return nil
}
func add(l1 *ListNode, l2 *ListNode, preCarry int) (sum, carry int) {
if l1 != nil {
sum += l1.Val
}
if l2 != nil {
sum += l2.Val
}
sum += preCarry
if sum > 9 {
sum -= 10
carry = 1
}
return
}
func test_2() {
l11 := &ListNode{Val: 2}
l12 := &ListNode{Val: 4}
l13 := &ListNode{Val: 3}
l11.Next = l12
l12.Next = l13
l21 := &ListNode{Val: 5}
l22 := &ListNode{Val: 6}
l23 := &ListNode{Val: 4}
l21.Next = l22
l22.Next = l23
result := addTwoNumber(l11, l21)
cur := result
for {
if cur != nil {
fmt.Print(cur.Val)
cur = cur.Next
}
}
}
|
package model
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
)
var filename = ""
var data_by_id = map[int]*Entry{}
var data_by_prefix = map[string]*Entry{}
var next_id = 0
type Entry struct {
Id int `json:"id"`
Prefix string `json:"prefix"`
Type string `json:"type"`
TypeCustom TypeCustom `json:"type_custom"`
TypeStatics TypeStatics `json:"type_statics"`
TypeProxy TypeProxy `json:"type_proxy"`
TypeScript TypeScript `json:"type_script"`
}
type TypeCustom struct {
StatusCode int `json:"status_code"`
ResponseHeaders Headers `json:"response_headers"`
Body string `json:"body"`
}
type TypeStatics struct {
ResponseHeaders Headers `json:"response_headers"`
Directory string `json:"directory"`
}
type TypeProxy struct {
Url string `json:"url"`
ResponseHeaders Headers `json:"response_headers"`
ProxyHeaders Headers `json:"proxy_headers"`
Key string `json:"key"`
Cert string `json:"cert"`
}
type TypeScript struct {
Code string `json:"code"`
}
type Headers []Header
type Header struct {
Key string `json:"key"`
Value string `json:"value"`
}
func All() map[int]*Entry {
return data_by_id
}
func GetById(id int) *Entry {
if item, exist := data_by_id[id]; exist {
return item
}
return nil
}
func GetByPrefix(prefix string) *Entry {
if item, exist := data_by_prefix[prefix]; exist {
return item
}
return nil
}
func Set(item *Entry) {
// Check if exists
if old_item, exist := data_by_prefix[item.Prefix]; exist {
delete(data_by_prefix, old_item.Prefix)
item.Id = old_item.Id
} else {
item.Id = next_id
next_id++
}
data_by_id[item.Id] = item
data_by_prefix[item.Prefix] = item
if nil == item.TypeCustom.ResponseHeaders {
item.TypeCustom.ResponseHeaders = Headers{}
}
if nil == item.TypeProxy.ResponseHeaders {
item.TypeProxy.ResponseHeaders = Headers{}
}
if nil == item.TypeProxy.ProxyHeaders {
item.TypeProxy.ProxyHeaders = Headers{}
}
if nil == item.TypeStatics.ResponseHeaders {
item.TypeStatics.ResponseHeaders = Headers{}
}
save()
}
func Unset(item *Entry) {
delete(data_by_prefix, item.Prefix)
delete(data_by_id, item.Id)
save()
}
func Load(f string) {
filename = f
d, err := ioutil.ReadFile(f)
if nil != err {
fmt.Println("Unable to read config file, don't worry, ReProxy is running, just configure it at /reproxy/")
return
}
items := []*Entry{}
err = json.Unmarshal(d, &items)
if nil != err {
fmt.Println("Config file is supposed to be a JSON")
return
}
for _, item := range items {
Set(item)
}
}
func save() {
fp, err := os.Create(filename)
if err != nil {
fmt.Printf("Unable to create %v. Err: %v.\n", filename, err)
return
}
defer fp.Close()
data := []interface{}{}
for _, item := range All() {
data = append(data, item)
}
encoder := json.NewEncoder(fp)
if err = encoder.Encode(data); err != nil {
fmt.Printf("Unable to encode Json file. Err: %v.\n", err)
return
}
}
|
package fileWatcher
import "testing"
func Test_isValidDirPath(t *testing.T) {
tests := []struct {
name string
path string
want bool
}{
{"empty path", "", false},
{"just a slash", "/", true},
{"fake path", "/thing", false},
{"dot", ".", true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := IsValidDirPath(tt.path); got != tt.want {
t.Errorf("isValidDirPath() = %v, want %v", got, tt.want)
}
})
}
}
|
package oidc
// AuthenticationMethodsReferences holds AMR information.
type AuthenticationMethodsReferences struct {
UsernameAndPassword bool
TOTP bool
Duo bool
WebAuthn bool
WebAuthnUserPresence bool
WebAuthnUserVerified bool
}
// FactorKnowledge returns true if a "something you know" factor of authentication was used.
func (r AuthenticationMethodsReferences) FactorKnowledge() bool {
return r.UsernameAndPassword
}
// FactorPossession returns true if a "something you have" factor of authentication was used.
func (r AuthenticationMethodsReferences) FactorPossession() bool {
return r.TOTP || r.WebAuthn || r.Duo
}
// MultiFactorAuthentication returns true if multiple factors were used.
func (r AuthenticationMethodsReferences) MultiFactorAuthentication() bool {
return r.FactorKnowledge() && r.FactorPossession()
}
// ChannelBrowser returns true if a browser was used to authenticate.
func (r AuthenticationMethodsReferences) ChannelBrowser() bool {
return r.UsernameAndPassword || r.TOTP || r.WebAuthn
}
// ChannelService returns true if a non-browser service was used to authenticate.
func (r AuthenticationMethodsReferences) ChannelService() bool {
return r.Duo
}
// MultiChannelAuthentication returns true if the user used more than one channel to authenticate.
func (r AuthenticationMethodsReferences) MultiChannelAuthentication() bool {
return r.ChannelBrowser() && r.ChannelService()
}
// MarshalRFC8176 returns the AMR claim slice of strings in the RFC8176 format.
// https://datatracker.ietf.org/doc/html/rfc8176
func (r AuthenticationMethodsReferences) MarshalRFC8176() []string {
var amr []string
if r.UsernameAndPassword {
amr = append(amr, AMRPasswordBasedAuthentication)
}
if r.TOTP {
amr = append(amr, AMROneTimePassword)
}
if r.Duo {
amr = append(amr, AMRShortMessageService)
}
if r.WebAuthn {
amr = append(amr, AMRHardwareSecuredKey)
}
if r.WebAuthnUserPresence {
amr = append(amr, AMRUserPresence)
}
if r.WebAuthnUserVerified {
amr = append(amr, AMRPersonalIdentificationNumber)
}
if r.MultiFactorAuthentication() {
amr = append(amr, AMRMultiFactorAuthentication)
}
if r.MultiChannelAuthentication() {
amr = append(amr, AMRMultiChannelAuthentication)
}
return amr
}
|
package frida_go
import (
"github.com/a97077088/frida-go/cfrida"
"unsafe"
)
type FileMonitor struct {
CObj
}
func (f *FileMonitor) Free() {
cfrida.G_object_unref(f.instance)
}
func (f *FileMonitor) Enable()error{
err:=cfrida.Frida_file_monitor_enable_sync(f.instance,0,)
if err!=nil{
return err
}
return nil
}
func (f *FileMonitor) Disable()error{
err:=cfrida.Frida_file_monitor_disable_sync(f.instance,0,)
if err!=nil{
return err
}
return nil
}
// FileMonitorCreate
// 新建一个对象来自已经存在的对象实例指针。
//
// Create a new object from an existing object instance pointer.
func FileMonitor_Create(path string) *FileMonitor {
dl:=new(FileMonitor)
dl.instance=cfrida.Frida_file_monitor_new(path)
dl.ptr= unsafe.Pointer(dl.instance)
setFinalizer(dl, (*FileMonitor).Free)
return dl
}
|
package osutils
import (
"bytes"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"strings"
)
// execute cmd line
func ShellExecute(s string) (string, error) {
cmd := exec.Command("/bin/bash", "-c", s)
var cout bytes.Buffer
cmd.Stdout = &cout
var cerr bytes.Buffer
cmd.Stderr = &cerr
err := cmd.Run()
if err != nil {
return "", err
}
return cout.String(), nil
}
func GetExeName() string {
ret := ""
ex, err := os.Executable()
if err == nil {
ret = filepath.Base(ex)
}
return ret
}
func GetHttpProxyConfig() (string, error) {
return os.Getenv("http_proxy"), nil
}
func GetHttpProxy() func(*http.Request) (*url.URL, error) {
proxy, err := GetHttpProxyConfig()
if err != nil {
return nil
}
proxy = strings.TrimSpace(proxy)
if "" == proxy {
return nil
}
return func(_ *http.Request) (*url.URL, error) {
return url.Parse(proxy)
}
}
func FileSize(path string) (int64, error) {
fileInfo, err := os.Stat(path)
if err != nil {
return 0, err
}
return fileInfo.Size(), nil
}
|
package logic
import (
"github.com/aegoroff/dirstat/scan"
"github.com/aegoroff/godatastruct/rbtree"
"path/filepath"
)
type treeCreator struct {
tree rbtree.RbTree
target string
filter Filter
}
func newTreeCreator(target string, filter Filter) *treeCreator {
tc := treeCreator{
tree: rbtree.New(),
target: target,
filter: filter,
}
return &tc
}
func (t *treeCreator) Handle(evt *scan.Event) {
if evt.File == nil {
return
}
if t.filter.Skip(filepath.Base(evt.File.Path)) {
return
}
n := newFile(t.target, evt.File.Path)
t.tree.Insert(n)
}
|
package main
import (
"fmt"
"math"
"github.com/jackytck/projecteuler/tools"
)
func solve(limit int) (int, int) {
// largest r/s smaller than a/b
var r, s int
a, b := 3, 7
// loop all p/q
for q := 2; q <= limit; q++ {
f := float64(a*q-1) / float64(b)
p := int(math.Floor(f))
if r == 0 || p*s > q*r {
r, s = p, q
}
}
return tools.SimplifyFraction(r, s)
}
func main() {
fmt.Println(solve(8))
fmt.Println(solve(1000000))
}
// By listing the set of reduced proper fractions for d ≤ 1,000,000 in ascending
// order of size, find the numerator of the fraction immediately to the left of
// 3/7.
// Note:
// Instead of looping over p and q, one could loop over q only, then infer p.
|
// fmover moves files with a certain extension (or extensions)
// from a source directory to a destination directory
package main
import (
// "errors"
"io/ioutil"
"log"
"os"
"path/filepath"
// "strings"
)
type file struct {
dirpath string
name string
}
func (f *file) fullPath() string {
return f.dirpath + f.name
}
//func dirify(path string) string {}
//filterDirTwo returns file structs for just the files in a
// given directory that matches extension ext
func filterDir(dirpath, ext string) ([]file, error) {
var result []file
files, err := ioutil.ReadDir(dirpath)
if err != nil {
return nil, err
}
for _, f := range files {
if f.IsDir() {
log.Printf("Skipping %s: is a directory", f.Name())
}
e := filepath.Ext(f.Name())
if err != nil {
log.Println(err)
}
if e == ext {
result = append(result, file{dirpath, f.Name()})
}
}
return result, nil
}
// move moves the given files to the new, given path.
// The new path will be the provided newdirpath joined
// with the original file name, as determined by
// os.FileInfo.Name()
func move(files []file, newdirpath string) error {
for _, f := range files {
path := newdirpath + f.name
if err := os.Rename(
f.fullPath(),
path,
); err != nil {
return err
}
}
return nil
}
func getArgs() (string, string, string) {
if len(os.Args) != 4 {
log.Fatal("Error parsing command line arguments.\nRequires three args: SOURCE_DIRECTORY DESTINATION_DIRECTORY EXTENSION")
}
return os.Args[1], os.Args[2], os.Args[3]
}
func main() {
// first arg = source dir
// second arg = destination dir
// third arg = file extension to match
src, dst, ext := getArgs()
files, err := filterDir(src, ext)
if err != nil {
log.Fatal(err)
}
if err = move(files, dst); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"context"
"flag"
"fmt"
"log"
"time"
"github.com/ka2n/masminer/machine"
"github.com/ka2n/masminer/machine/asic"
)
type config struct {
ip string
hostname string
timeout time.Duration
}
func main() {
var cfg config
flag.StringVar(&cfg.ip, "ip", "", "Target IP Address(required)")
flag.StringVar(&cfg.hostname, "host", "", "Hostname to determine what kind of hardware")
flag.DurationVar(&cfg.timeout, "timeout", time.Second*10, "timeout")
flag.Parse()
log.SetPrefix("[inspect] ")
ctx, cancel := context.WithTimeout(context.Background(), cfg.timeout)
defer cancel()
client, err := asic.DialTimeout(machine.RemoteRig{
IPAddr: cfg.ip,
Hostname: cfg.hostname,
}, cfg.timeout)
if err != nil {
log.Fatal(err)
}
defer client.Close()
info, err := client.RigInfo(ctx)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%#v\n", info)
stat, err := client.RigStat(ctx)
if err != nil {
log.Fatal(err)
}
fmt.Printf("%#v\n", stat)
}
|
package main
import (
"errors"
"fmt"
"github.com/Unknwon/goconfig"
"net/smtp"
"strings"
"time"
)
//邮件发送结构
type mailini struct {
user string
passwd string
smtpaddress string
maillist string
smtpport int
}
//从配置文件获取邮件配置
func newmailini(g *goconfig.ConfigFile) *mailini {
var err error
var m mailini
m.maillist, err = g.GetValue("mail", "receive")
checkerr(err)
m.user, err = g.GetValue("mail", "mailuser")
checkerr(err)
m.passwd, err = g.GetValue("mail", "mailpasswd")
checkerr(err)
m.smtpaddress, err = g.GetValue("mail", "smtpaddress")
checkerr(err)
m.smtpport = g.MustInt("mail", "smtpport", 25)
return &m
}
func sendmail(m *mailini, content string) error {
//格式化邮件内容
sub := fmt.Sprintf(fmt.Sprintf("To: %s\r\nFrom: %s<%s>\r\nSubject: %s\r\nContent-Type: text/html; Charset=UTF-8\r\n\r\n%s", m.maillist, "邮箱别名", m.user, "标题", content))
mailList := strings.Split(m.maillist, ",")
auth := smtp.PlainAuth("", m.user, m.passwd, m.smtpaddress)
au := fmt.Sprintf("%s:%d", m.smtpaddress, m.smtpport)
errchan := make(chan error)
defer close(errchan)
go func() {
err := smtp.SendMail(au, auth, m.user, mailList, []byte(sub))
errchan <- err
}()
select {
case err := <-errchan:
if err != nil {
return err
} else {
return nil
}
case <-time.After(time.Second * 10):
return errors.New("send mail time out more than 10's")
}
}
|
package encrypt
import (
"github.com/bitmaelum/bitmaelum-suite/pkg/bmcrypto"
"github.com/stretchr/testify/assert"
"io/ioutil"
"testing"
)
func TestEncrypt(t *testing.T) {
data, _ := ioutil.ReadFile("../../testdata/pubkey.rsa")
pubKey, _ := bmcrypto.NewPubKey(string(data))
data, _ = ioutil.ReadFile("../../testdata/privkey.rsa")
privKey, _ := bmcrypto.NewPrivKey(string(data))
cipher, err := Encrypt(*pubKey, []byte("foobar"))
assert.Nil(t, err)
assert.NotEqual(t, []byte("foobar"), cipher)
plaintext, err := Decrypt(*privKey, cipher)
assert.Nil(t, err)
assert.Equal(t, []byte("foobar"), plaintext)
}
|
package getspot
import (
"net/http"
"github.com/doniacld/outdoorsight/internal/endpointdef"
"github.com/doniacld/outdoorsight/internal/endpoints"
"github.com/doniacld/outdoorsight/internal/spot"
)
// GetSpotMeta holds the endpoint information
var GetSpotMeta = endpointdef.New(
"getSpotDetails",
"/spots/{"+endpoints.ParamSpotName+"}",
http.MethodGet,
http.StatusOK,
)
// GetSpotRequest is the request structure
type GetSpotRequest struct {
SpotName string `json:"spotName"`
}
// GetSpotResponse holds the response structure
type GetSpotResponse spot.Details
|
// Global logger
package logger
import (
"io"
"log"
)
type Logger struct {
infoLogger *log.Logger
debugLogger *log.Logger
warningLogger *log.Logger
errorLogger *log.Logger
}
func NewLogger(writer io.Writer) (logger *Logger) {
return &Logger{
infoLogger: log.New(writer, "INFO: ", log.Ldate|log.Ltime|log.Lshortfile),
debugLogger: log.New(writer, "DEBUG: ", log.Ldate|log.Ltime|log.Lshortfile),
warningLogger: log.New(writer, "WARNGNG: ", log.Ldate|log.Ltime|log.Lshortfile),
errorLogger: log.New(writer, "ERROR: ", log.Ldate|log.Ltime|log.Lshortfile),
}
}
func (logger *Logger) Info(format string, v ...interface{}) {
logger.infoLogger.Printf(format, v...)
}
func (logger *Logger) Debug(format string, v ...interface{}) {
logger.debugLogger.Printf(format, v...)
}
func (logger *Logger) Warn(format string, v ...interface{}) {
logger.warningLogger.Printf(format, v...)
}
func (logger *Logger) Error(format string, v ...interface{}) {
logger.errorLogger.Printf(format, v...)
}
|
package main
import (
"fmt"
"ms/sun/shared/dbs"
"ms/sun/shared/x"
"sync/atomic"
"time"
)
func main() {
x.LogTableSqlReq.PostCdb = false
i := int64(0)
fn := func() {
//time.Sleep(time.Millisecond * int64(rand.Intn(10000)))
for {
atomic.AddInt64(&i, 1)
//rows, err := x.PostCdbByPostId(conns.DB_PG,1526682757615011071)
rows, err := x.NewPostCdb_Updater().UserId(int(i)).PostId_Eq(1526682757615011071).Update(dbs.DB_PG)
if i% 10 == 0{
fmt.Println((rows), err,"*")
}
time.Sleep(time.Millisecond)
}
}
go func() {
for {
time.Sleep(time.Second)
fmt.Println(i)
}
}()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
go fn()
fn()
}
|
package keypairs
import (
"bytes"
"context"
"encoding/json"
"net/http"
"strings"
"github.com/selectel/go-selvpcclient/selvpcclient"
)
const resourceURL = "keypairs"
// List gets a list of keypairs in the current domain.
func List(ctx context.Context, client *selvpcclient.ServiceClient) ([]*Keypair, *selvpcclient.ResponseResult, error) {
url := strings.Join([]string{client.Endpoint, resourceURL}, "/")
responseResult, err := client.DoRequest(ctx, http.MethodGet, url, nil)
if err != nil {
return nil, nil, err
}
if responseResult.Err != nil {
return nil, responseResult, responseResult.Err
}
// Extract keypairs. from the response body.
var result struct {
Keypairs []*Keypair `json:"keypairs"`
}
err = responseResult.ExtractResult(&result)
if err != nil {
return nil, responseResult, err
}
return result.Keypairs, responseResult, nil
}
// Create requests a creation of the keypar with the specified options.
func Create(ctx context.Context, client *selvpcclient.ServiceClient, createOpts KeypairOpts) ([]*Keypair, *selvpcclient.ResponseResult, error) {
// Nest create opts into additional body.
type nestedCreateOpts struct {
Keypair KeypairOpts `json:"keypair"`
}
var createKeypairOpts = nestedCreateOpts{
Keypair: createOpts,
}
requestBody, err := json.Marshal(&createKeypairOpts)
if err != nil {
return nil, nil, err
}
url := strings.Join([]string{client.Endpoint, resourceURL}, "/")
responseResult, err := client.DoRequest(ctx, http.MethodPost, url, bytes.NewReader(requestBody))
if err != nil {
return nil, nil, err
}
if responseResult.Err != nil {
return nil, responseResult, responseResult.Err
}
// Extract a keypair from the response body.
var result struct {
Keypair []*Keypair `json:"keypair"`
}
err = responseResult.ExtractResult(&result)
if err != nil {
return nil, responseResult, err
}
return result.Keypair, responseResult, nil
}
// Delete deletes a single keypair by its name and user ID.
func Delete(ctx context.Context, client *selvpcclient.ServiceClient, name, userID string) (*selvpcclient.ResponseResult, error) {
url := strings.Join([]string{client.Endpoint, resourceURL, name, "users", userID}, "/")
responseResult, err := client.DoRequest(ctx, http.MethodDelete, url, nil)
if err != nil {
return nil, err
}
if responseResult.Err != nil {
err = responseResult.Err
}
return responseResult, err
}
|
package helpers
import (
"fmt"
"time"
)
const COUNT_TIME = 10
func StartTimePrinter() {
count := 0
ticker := time.NewTicker(time.Duration(COUNT_TIME) * time.Second)
quit := make(chan struct{})
go func() {
for {
select {
case <-ticker.C:
count++
fmt.Print(count * COUNT_TIME)
fmt.Print(" seconds have passed")
fmt.Println()
case <-quit:
ticker.Stop()
return
}
}
}()
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package querywatch_test
import (
"context"
"testing"
"time"
mysql "github.com/pingcap/tidb/errno"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/testkit"
"github.com/stretchr/testify/require"
)
func TestQueryWatch(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("create table t1(a int)")
tk.MustExec("insert into t1 values(1)")
tk.MustExec("create table t2(a int)")
tk.MustExec("insert into t2 values(1)")
tk.MustExec("create table t3(a int)")
tk.MustExec("insert into t3 values(1)")
_, err := tk.Exec("query watch add sql text exact to 'select * from test.t1'")
require.ErrorContains(t, err, "must set runaway config for resource group `default`")
_, err = tk.Exec("query watch add resource group rg2 action DRYRUN sql text exact to 'select * from test.t1'")
require.ErrorContains(t, err, "the group rg2 does not exist")
tk.MustExec("alter resource group default QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=DRYRUN)")
tk.MustExec("query watch add sql text exact to 'select * from test.t1'")
tk.MustExec("QUERY WATCH ADD ACTION COOLDOWN SQL TEXT EXACT TO 'select * from test.t2'")
tryInterval := time.Millisecond * 200
maxWaitDuration := time.Second * 5
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from mysql.tidb_runaway_watch", nil,
testkit.Rows("default select * from test.t1 1 1", "default select * from test.t2 2 1"), maxWaitDuration, tryInterval)
tk.MustExec("create resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=KILL)")
tk.MustExec("query watch add resource group rg1 sql text exact to 'select * from test.t1'")
tk.MustExec("query watch add resource group rg1 sql text similar to 'select * from test.t2'")
ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege)
tk.MustExecWithContext(ctx, "query watch add resource group rg1 sql text plan to 'select * from test.t3'")
tk.MustExec("query watch add action KILL SQL DIGEST '4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0'")
tk.MustExec("query watch add action KILL PLAN DIGEST 'd08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57'")
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from mysql.tidb_runaway_watch order by id", nil,
testkit.Rows("default select * from test.t1 1 1",
"default select * from test.t2 2 1",
"rg1 select * from test.t1 3 1",
"rg1 02576c15e1f35a8aa3eb7e3b1f977c9f9f9921a22421b3e9f42bad5ab632b4f6 3 2",
"rg1 d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 3 3",
"default 4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0 3 2",
"default d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 3 3",
), maxWaitDuration, tryInterval)
tk.MustExec("query watch add action COOLDOWN sql text similar to 'select * from test.t1'")
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from mysql.tidb_runaway_watch order by id", nil,
testkit.Rows("default select * from test.t1 1 1",
"default select * from test.t2 2 1",
"rg1 select * from test.t1 3 1",
"rg1 02576c15e1f35a8aa3eb7e3b1f977c9f9f9921a22421b3e9f42bad5ab632b4f6 3 2",
"rg1 d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 3 3",
"default d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 3 3",
"default 4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0 2 2",
), maxWaitDuration, tryInterval)
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from information_schema.runaway_watches order by id", nil,
testkit.Rows("default select * from test.t1 DryRun Exact",
"default select * from test.t2 CoolDown Exact",
"rg1 select * from test.t1 Kill Exact",
"rg1 02576c15e1f35a8aa3eb7e3b1f977c9f9f9921a22421b3e9f42bad5ab632b4f6 Kill Similar",
"rg1 d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default 4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0 CoolDown Similar",
), maxWaitDuration, tryInterval)
tk.MustGetErrCode("select * from t3", mysql.ErrResourceGroupQueryRunawayQuarantine)
tk.MustQuery("select * from t2").Check(testkit.Rows("1"))
tk.MustQuery("select /*+ resource_group(rg1) */ * from t1").Check(testkit.Rows("1"))
tk.MustExec("SET RESOURCE GROUP rg1")
// hit and schema will affect sql digest
tk.MustGetErrCode("select * from test.t2", mysql.ErrResourceGroupQueryRunawayQuarantine)
tk.MustGetErrCode("select /*+ resource_group(rg1) */ * from t3", mysql.ErrResourceGroupQueryRunawayQuarantine)
tk.MustExec("alter resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=()")
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from information_schema.runaway_watches order by id", nil,
testkit.Rows("default select * from test.t1 DryRun Exact",
"default select * from test.t2 CoolDown Exact",
"rg1 select * from test.t1 Kill Exact",
"rg1 02576c15e1f35a8aa3eb7e3b1f977c9f9f9921a22421b3e9f42bad5ab632b4f6 Kill Similar",
"rg1 d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default 4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0 CoolDown Similar",
), maxWaitDuration, tryInterval)
tk.MustExec("alter resource group rg1 RU_PER_SEC=1000 QUERY_LIMIT=(EXEC_ELAPSED='50ms' ACTION=KILL)")
tk.EventuallyMustQueryAndCheck("select SQL_NO_CACHE resource_group_name, watch_text, action, watch from information_schema.runaway_watches order by id", nil,
testkit.Rows("default select * from test.t1 DryRun Exact",
"default select * from test.t2 CoolDown Exact",
"rg1 select * from test.t1 Kill Exact",
"rg1 02576c15e1f35a8aa3eb7e3b1f977c9f9f9921a22421b3e9f42bad5ab632b4f6 Kill Similar",
"rg1 d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default d08bc323a934c39dc41948b0a073725be3398479b6fa4f6dd1db2a9b115f7f57 Kill Plan",
"default 4ea0618129ffc6a7effbc0eff4bbcb41a7f5d4c53a6fa0b2e9be81c7010915b0 CoolDown Similar",
), maxWaitDuration, tryInterval)
// test remove
rs, err := tk.Exec("query watch remove 1")
require.NoError(t, err)
require.Nil(t, rs)
time.Sleep(1 * time.Second)
tk.MustGetErrCode("select * from test.t1", mysql.ErrResourceGroupQueryRunawayQuarantine)
}
|
package main
import(
"database/sql"
_ "github.com/lib/pq"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/log"
"github.com/davegardnerisme/phonegeocode"
)
var (
countCountryCode = make(map[string]int)
)
func StringConverter(a []uint8) string{
b := make([]byte, 0, len(a))
for _, i := range a {
b = append(b, byte(i))
}
return string(b)
}
func ParsePhoneNum(s string){
country, err := phonegeocode.New().Country(s)
if err != nil {
log.Infof("Error:", "Can't Parse Phonenum :", s)
return
}
countCountryCode[country]+=1
}
type collector struct {
target string
database string
login string
passwd string
request string
}
func (c collector) Describe(ch chan<- *prometheus.Desc) {
ch <- prometheus.NewDesc("dummy", "dummy", nil, nil)
}
func (c collector) Collect(ch chan<- prometheus.Metric){
for country, _ := range countCountryCode {
countCountryCode[country] = 0
}
conn := "postgres://"+c.login+":"+c.passwd+"@"+c.target+"/"+c.database+"?sslmode=disable"
db, err := sql.Open("postgres", conn)
if err != nil {
log.Infof("Error scraping target %s: %s", c.target, err)
return
}
r, err := db.Query(c.request)
if err != nil {
log.Infof("Error : %s", err)
return
}
defer db.Close()
defer r.Close()
for r.Next() {
var str string
if err = r.Scan(&str); err != nil {
log.Fatal(err)
}
ParsePhoneNum(str)
}
if err := r.Err(); err != nil {
log.Fatal(err)
}
for country, val := range countCountryCode {
ch <- prometheus.MustNewConstMetric(
prometheus.NewDesc("count_user_per_country", "Count User Per Country", []string{"country"}, nil),
prometheus.GaugeValue,
float64(val),
country)
}
}
|
package config
import (
"errors"
"fmt"
"log"
"github.com/debarshibasak/go-k3s/k3sclient"
"github.com/debarshibasak/go-kubeadmclient/kubeadmclient"
"github.com/debarshibasak/kubestrike/v1alpha1/engine"
"github.com/debarshibasak/kubestrike/v1alpha1/provider"
"github.com/debarshibasak/machina"
"github.com/ghodss/yaml"
)
type CreateCluster struct {
Base
Multipass *provider.MultipassCreateCluster `yaml:"multipass" json:"multipass"`
BareMetal *provider.Baremetal `yaml:"baremetal" json:"baremetal"`
OrchestrationEngine engine.Orchestrator `yaml:"-" json:"-"`
KubeadmEngine *engine.KubeadmEngine `yaml:"kubeadm" json:"kubeadm"`
K3sEngine *engine.K3SEngine `yaml:"k3s" json:"k3s"`
WorkerNodes []*machina.Node `yaml:"-" json:"-"`
MasterNodes []*machina.Node `yaml:"-" json:"-"`
HAProxy *machina.Node `yaml:"-" json:"-"`
}
func (c *CreateCluster) Parse(config []byte) (ClusterOperation, error) {
var createClusterConfiguration CreateCluster
err := yaml.Unmarshal(config, &createClusterConfiguration)
if err != nil {
return nil, errors.New("error while parsing inner configuration")
}
if c.Multipass != nil && c.BareMetal != nil {
return nil, errors.New("only 1 provider is allowed (options are multipass and baremetal)")
}
if createClusterConfiguration.KubeadmEngine != nil && createClusterConfiguration.K3sEngine != nil {
return nil, errors.New("only 1 orchestration engine is allowed")
}
if createClusterConfiguration.KubeadmEngine != nil {
createClusterConfiguration.OrchestrationEngine = createClusterConfiguration.KubeadmEngine
}
if createClusterConfiguration.K3sEngine != nil {
createClusterConfiguration.OrchestrationEngine = createClusterConfiguration.K3sEngine
}
return &createClusterConfiguration, nil
}
func (c *CreateCluster) getOrchestrator() engine.Orchestrator {
switch c.OrchestrationEngine.(type) {
case *engine.KubeadmEngine:
{
var orch *engine.KubeadmEngine
orch = c.OrchestrationEngine.(*engine.KubeadmEngine)
var masterNodes []*kubeadmclient.MasterNode
var workerNodes []*kubeadmclient.WorkerNode
var haproxy *kubeadmclient.HaProxyNode
for _, master := range c.MasterNodes {
masterNodes = append(masterNodes, kubeadmclient.NewMasterNode(master.GetUsername(), master.GetIP(), master.GetPrivateKey()))
}
if c.HAProxy != nil {
haproxy = kubeadmclient.NewHaProxyNode(c.HAProxy.GetUsername(), c.HAProxy.GetIP(), c.HAProxy.GetPrivateKey())
}
for _, worker := range c.WorkerNodes {
workerNodes = append(workerNodes, kubeadmclient.NewWorkerNode(worker.GetUsername(), worker.GetIP(), worker.GetPrivateKey()))
}
orch.HAProxy = haproxy
orch.ClusterName = c.ClusterName
orch.Masters = masterNodes
orch.Workers = workerNodes
return orch
}
case *engine.K3SEngine:
{
var orch *engine.K3SEngine
orch = c.OrchestrationEngine.(*engine.K3SEngine)
var masterNodes []*k3sclient.Master
var workerNodes []*k3sclient.Worker
var haproxy *k3sclient.HAProxy
for _, master := range c.MasterNodes {
masterNodes = append(masterNodes, k3sclient.NewMaster(master.GetUsername(), master.GetIP(), master.GetPrivateKey()))
}
if c.HAProxy != nil {
haproxy = k3sclient.NewHAProxy(c.HAProxy.GetUsername(), c.HAProxy.GetIP(), c.HAProxy.GetPrivateKey())
}
for _, worker := range c.WorkerNodes {
workerNodes = append(workerNodes, k3sclient.NewWorker(worker.GetUsername(), worker.GetIP(), worker.GetPrivateKey()))
}
orch.HAProxy = haproxy
orch.ClusterName = c.ClusterName
orch.Masters = masterNodes
orch.Workers = workerNodes
return orch
}
default:
return nil
}
}
func (c *CreateCluster) Run(verbose bool) error {
log.Println("[kubestrike] provider found - " + c.Provider)
err := Get(c)
if err != nil {
return err
}
log.Println("\n[kubestrike] creating cluster...")
orchestrator := c.getOrchestrator()
if orchestrator == nil {
return errors.New("could not determine the orchestration engine")
}
err = orchestrator.CreateCluster()
if err != nil {
return err
}
fmt.Println("")
log.Println("[kubestrike] You can access the cluster now")
fmt.Println("")
return nil
}
func (c *CreateCluster) Validate() error {
if c.ClusterName == "" {
return errClusterNameIsEmpty
}
if c.Kind != CreateClusterKind {
return errKind
}
if c.Multipass != nil && c.BareMetal != nil {
return errors.New("only one provider is allowed")
}
return nil
}
|
package proto
import "KServer/library/utils"
var proto2 utils.Protobuf
func NewIMessage(id uint32, msgId uint32, clientId string, serverId string, data []byte) []byte {
return proto2.Encode(&Message{Id: id, MsgId: msgId, ClientId: clientId, ServerId: serverId, Data: data})
}
|
// Copyright 2021 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/util/plancodec"
)
// IsTiFlashContained returns whether the plan contains TiFlash related executors.
func IsTiFlashContained(plan Plan) (tiFlashPushDown, tiFlashExchangePushDown bool) {
if plan == nil {
return
}
var tiflashProcess func(p Plan)
tiflashProcess = func(p Plan) {
if exp, isExplain := p.(*Explain); isExplain {
p = exp.TargetPlan
if p == nil {
return
}
}
pp, isPhysical := p.(PhysicalPlan)
if !isPhysical {
return
}
if tableReader, ok := pp.(*PhysicalTableReader); ok {
tiFlashPushDown = tableReader.StoreType == kv.TiFlash
if tiFlashPushDown && tableReader.GetTablePlan().TP() == plancodec.TypeExchangeSender {
tiFlashExchangePushDown = true
}
return
}
for _, child := range pp.Children() {
tiflashProcess(child)
if tiFlashPushDown {
return
}
}
}
tiflashProcess(plan)
return
}
|
// Cookie Library
package cookie
import (
"bytes"
"encoding/gob"
"fmt"
"net/http"
"strconv"
"time"
)
// Cookie
type Cookie interface {
// Get *http.Cookie
Raw() *http.Cookie
// Did Cookie Exist in User Request?
Exist() bool
// Scan Cookie Value to Pointer
//
// Support string, any builtin int type, any builtin uint type, any builtin float type
// and bool without deserialization.
// Support other data types with deserialization (gob).
//
// If you have used Crypto to set the cookie, call Crypto first.
Scan(ptr interface{}) (err error)
// Set Value
// Support string, any builtin int type, any builtin uint type, any builtin float type
// and bool without serialization.
// Support other data types with serialization (gob).
Value(v interface{})
// Set Path
Path(path string)
// Set Domain
Domain(domain string)
// Set Expiry
Expire() Expire
// Set Max Age
MaxAge(age int)
// Delete
Delete()
// Toggle Secure
Secure()
// Toggle HttpOnly
HttpOnly()
// Cryptography Tools, set Value first.
Crypto() Crypto
// Do not honor DNT
DontHonorDnt()
// Do not honor User opt out
DontHonorUser()
// Save Cookie
Save(toResOrReq ...interface{})
}
// Attempt to get Cookie from user request, if that fails create new cookie.
//
// Panic if r is nil.
func Init(r *http.Request, name string) Cookie {
if r == nil {
panic(fmt.Errorf("Cookie: 'r' cannot be nil value!"))
return nil
}
cookie_, err := r.Cookie(name)
if err != nil {
return &cookie{
r: r,
cookie: &http.Cookie{Name: name},
}
}
c := &cookie{
r: r,
cookie: cookie_,
exist: true,
}
value, err := decode(cookie_.Value)
if err == nil {
c.value = value
}
return c
}
type cookie struct {
r *http.Request
cookie *http.Cookie
exist bool
expire *expire
crypto *crypto
dontHonorDnt bool
dontHonorUser bool
value []byte
}
func (c *cookie) Raw() *http.Cookie {
return c.cookie
}
func (c *cookie) Exist() bool {
return c.exist
}
func (c *cookie) Scan(ptr interface{}) (err error) {
switch ptr := ptr.(type) {
case *string:
*ptr = c.cookie.Value
case *int:
var v int64
v, err = strconv.ParseInt(c.cookie.Value, 16, 64)
*ptr = int(v)
case *int8:
var v int64
v, err = strconv.ParseInt(c.cookie.Value, 16, 8)
*ptr = int8(v)
case *int16:
var v int64
v, err = strconv.ParseInt(c.cookie.Value, 16, 16)
*ptr = int16(v)
case *int32:
var v int64
v, err = strconv.ParseInt(c.cookie.Value, 16, 32)
*ptr = int32(v)
case *int64:
*ptr, err = strconv.ParseInt(c.cookie.Value, 16, 64)
case *uint:
var v uint64
v, err = strconv.ParseUint(c.cookie.Value, 16, 64)
*ptr = uint(v)
case *uint8:
var v uint64
v, err = strconv.ParseUint(c.cookie.Value, 16, 8)
*ptr = uint8(v)
case *uint16:
var v uint64
v, err = strconv.ParseUint(c.cookie.Value, 16, 16)
*ptr = uint16(v)
case *uint32:
var v uint64
v, err = strconv.ParseUint(c.cookie.Value, 16, 32)
*ptr = uint32(v)
case *uint64:
*ptr, err = strconv.ParseUint(c.cookie.Value, 16, 64)
case *float32:
var v float64
v, err = strconv.ParseFloat(c.cookie.Value, 32)
*ptr = float32(v)
case *float64:
*ptr, err = strconv.ParseFloat(c.cookie.Value, 64)
case *bool:
*ptr, err = strconv.ParseBool(c.cookie.Value)
default:
dec := gob.NewDecoder(bytes.NewReader(c.value))
err = dec.Decode(ptr)
}
return
}
func (c *cookie) Value(v interface{}) {
// Make sure it's clean
c.cookie.Value = ""
c.value = nil
c.crypto = nil
c.expire = nil
switch v := v.(type) {
case string:
c.cookie.Value = v
case int:
c.cookie.Value = strconv.FormatInt(int64(v), 16)
case int8:
c.cookie.Value = strconv.FormatInt(int64(v), 16)
case int16:
c.cookie.Value = strconv.FormatInt(int64(v), 16)
case int32:
c.cookie.Value = strconv.FormatInt(int64(v), 16)
case int64:
c.cookie.Value = strconv.FormatInt(v, 16)
case uint:
c.cookie.Value = strconv.FormatUint(uint64(v), 16)
case uint8:
c.cookie.Value = strconv.FormatUint(uint64(v), 16)
case uint16:
c.cookie.Value = strconv.FormatUint(uint64(v), 16)
case uint32:
c.cookie.Value = strconv.FormatUint(uint64(v), 16)
case uint64:
c.cookie.Value = strconv.FormatUint(v, 16)
case float32:
c.cookie.Value = strconv.FormatFloat(float64(v), byte('e'), -1, 64)
case float64:
c.cookie.Value = strconv.FormatFloat(v, byte('e'), -1, 64)
case bool:
c.cookie.Value = strconv.FormatBool(v)
default:
buf := &bytes.Buffer{}
defer buf.Reset()
enc := gob.NewEncoder(buf)
err := enc.Encode(v)
if err != nil {
return
}
c.value = buf.Bytes()
c.cookie.Value = encode(c.value)
}
}
func (c *cookie) Path(path string) {
c.cookie.Path = path
}
func (c *cookie) Domain(domain string) {
c.cookie.Domain = domain
}
func (c *cookie) Expire() Expire {
if c.expire == nil {
c.expire = &expire{
cookie: c,
t: time.Now(),
}
}
return c.expire
}
func (c *cookie) MaxAge(age int) {
c.cookie.MaxAge = age
}
func (c *cookie) Delete() {
c.Value("--DELETE-ME--") // Bloody Internet Explorer
c.MaxAge(-1)
}
func (c *cookie) Secure() {
c.cookie.Secure = !c.cookie.Secure
}
func (c *cookie) HttpOnly() {
c.cookie.HttpOnly = !c.cookie.HttpOnly
}
func (c *cookie) Crypto() Crypto {
if c.crypto == nil {
c.crypto = &crypto{
r: c.r,
cookie: c,
}
}
return c.crypto
}
func (c *cookie) DontHonorDnt() {
c.dontHonorDnt = true
}
func (c *cookie) DontHonorUser() {
c.dontHonorUser = true
}
func (c *cookie) Save(toResOrReq ...interface{}) {
var w http.ResponseWriter
for _, rw := range toResOrReq {
switch rw := rw.(type) {
case http.ResponseWriter:
if w == nil {
w = rw
}
case *http.Request:
cookie := *c.cookie
rw.AddCookie(&cookie)
}
}
if w == nil {
return
}
if c.dontHonorDnt && c.dontHonorUser {
goto set_cookie
} else if c.dontHonorDnt {
goto user
}
if HasDnt(c.r) {
// Don't save to response
return
}
user:
if UserHasOptOut(c.r) {
// Don't save to response
return
}
set_cookie:
if c.expire != nil {
c.expire.save()
}
if c.crypto != nil {
c.crypto.save()
}
http.SetCookie(w, c.cookie)
}
|
// +build ignore
package main
import (
"flag"
"fmt"
"log"
"sync/atomic"
"time"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
var (
idPrefix = flag.String("prefix", "", "id prefix")
table = flag.String("table", "test_table", "table name")
total = flag.Int("t", 100000, "total count to be inserted")
gophers = flag.Int("g", 8, "gophers count")
reset = flag.Bool("reset", false, "reset table")
endpoint = flag.String("endpoint", "http://localhost:8000", "endpoint")
region = flag.String("region", "us-west-2", "region")
)
func main() {
flag.Parse()
log.Println("Total:", *total)
log.Println("Table:", *table)
db := dynamodb.New(session.New(
aws.NewConfig().
WithEndpoint(*endpoint).
WithRegion(*region).
WithCredentials(credentials.NewEnvCredentials()),
))
if *reset {
if resp, err := db.ListTables(&dynamodb.ListTablesInput{}); err != nil {
panic(err)
} else {
for _, name := range resp.TableNames {
if *name == *table {
log.Println("DeleteTable")
if _, err := db.DeleteTable(&dynamodb.DeleteTableInput{
TableName: aws.String(*table),
}); err != nil {
panic(err)
}
break
}
}
}
log.Println("CreateTable")
if _, err := db.CreateTable(&dynamodb.CreateTableInput{
TableName: aws.String(*table),
AttributeDefinitions: []*dynamodb.AttributeDefinition{
// &dynamodb.AttributeDefinition{
// AttributeName: aws.String("bench_area"),
// AttributeType: aws.String("S"),
// },
&dynamodb.AttributeDefinition{
AttributeName: aws.String("id"),
AttributeType: aws.String("S"),
},
// &dynamodb.AttributeDefinition{
// AttributeName: aws.String("email"),
// AttributeType: aws.String("S"),
// },
},
KeySchema: []*dynamodb.KeySchemaElement{
// &dynamodb.KeySchemaElement{
// AttributeName: aws.String("bench_area"),
// KeyType: aws.String("HASH"),
// },
// &dynamodb.KeySchemaElement{
// AttributeName: aws.String("id"),
// KeyType: aws.String("RANGE"),
// },
&dynamodb.KeySchemaElement{
AttributeName: aws.String("id"),
KeyType: aws.String("HASH"),
},
},
ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
ReadCapacityUnits: aws.Int64(1),
WriteCapacityUnits: aws.Int64(1),
},
// GlobalSecondaryIndexes: []*dynamodb.GlobalSecondaryIndex{
// &dynamodb.GlobalSecondaryIndex{
// IndexName: aws.String("email-index"),
// KeySchema: []*dynamodb.KeySchemaElement{
// &dynamodb.KeySchemaElement{
// AttributeName: aws.String("email"),
// KeyType: aws.String("HASH"),
// },
// },
// Projection: &dynamodb.Projection{
// // NonKeyAttributes: []*string{aws.String("email")},
// ProjectionType: aws.String("KEYS_ONLY"),
// },
// ProvisionedThroughput: &dynamodb.ProvisionedThroughput{
// ReadCapacityUnits: aws.Int64(1),
// WriteCapacityUnits: aws.Int64(1),
// },
// },
// },
}); err != nil {
panic(err)
}
}
throttle := make(chan int, *gophers)
start := time.Now()
var totalDuration int64
for i := 0; i <= *total; i++ {
throttle <- i
if i%(*total/100) == 0 {
fmt.Printf("\r%s Generated %d%%", time.Now().Format("2006-01-02 15:04:05"), i/(*total/100))
}
go func(i int) {
defer func() { <-throttle }()
defer func(start time.Time) { atomic.AddInt64(&totalDuration, int64(time.Now().Sub(start))) }(time.Now())
if _, err := db.PutItem(&dynamodb.PutItemInput{
TableName: aws.String(*table),
Item: map[string]*dynamodb.AttributeValue{
"bench_area": &dynamodb.AttributeValue{S: aws.String("KingsLanding")},
"id": &dynamodb.AttributeValue{S: aws.String(fmt.Sprintf("%s%d", *idPrefix, i))},
"email": &dynamodb.AttributeValue{S: aws.String(fmt.Sprintf("test-%d@test.com", i))},
"first_name": &dynamodb.AttributeValue{S: aws.String(fmt.Sprintf("Laurence-%d", i))},
"last_name": &dynamodb.AttributeValue{S: aws.String(fmt.Sprintf("Tester-%d", i))},
"age": &dynamodb.AttributeValue{N: aws.String("25")},
},
// ConditionExpression: aws.String("id <> :id"),
// ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{
// ":id": &dynamodb.AttributeValue{S: aws.String("10")},
// },
}); err != nil {
fmt.Println(err)
}
}(i)
}
fmt.Println("")
log.Println("Took:", time.Now().Sub(start))
log.Println("Total Duration:", time.Duration(totalDuration))
log.Println("TPI:", time.Duration(totalDuration/int64(*total)))
}
|
package main
import "fmt"
//map和slice组合
func main() {
//元素类型为map的切片
var s1 = make([]map[int]string, 10, 10)
//没有对内部的map做初始化
//s1[0][100] = "a"
s1[0] = make(map[int]string, 1)
s1[0][10] = "深圳"
fmt.Println(s1)
//值为切片类型的map
var m1 = make(map[string][]int, 10)
m1["北京"] = []int{1, 2, 3, 4, 5}
fmt.Println(m1)
}
|
package main
import (
"crypto/rand"
"encoding/json"
"fmt"
"io/ioutil"
"strings"
dc "github.com/samalba/dockerclient"
"net/http"
)
func randString(n int) string {
const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
a := alphanum[b%byte(len(alphanum))]
fmt.Println(a)
bytes[i] = a
}
return string(bytes)
}
func main() {
fmt.Println("Devices runner")
// Init the client
docker, err := dc.NewDockerClient("unix:///var/run/docker.sock", nil)
if err != nil {
panic(err)
}
containers, err := docker.ListContainers(true)
if err != nil {
panic(err)
}
for _, c := range containers {
fmt.Println("container: ", c.Names, " status: ", c.Status)
}
// let's create an API for starting devices
http.HandleFunc("/device", func(w http.ResponseWriter, r *http.Request) { devicesHandler(w, r, docker) })
http.HandleFunc("/device/", func(w http.ResponseWriter, r *http.Request) { deviceHandler(w, r, docker) })
http.ListenAndServe(":8080", nil)
}
func deviceHandler(w http.ResponseWriter, r *http.Request, docker *dc.DockerClient) {
path := r.URL.Path
switch r.Method {
case "DELETE":
fmt.Println("DELETE")
deviceName := path[len("/devices"):]
fmt.Println("deletion of", deviceName, "requested")
err := stopAndDeleteDevice(deviceName, docker)
if err != nil {
fmt.Println("Error deleting the container:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.WriteHeader(http.StatusNoContent)
}
}
}
func devicesHandler(w http.ResponseWriter, r *http.Request, docker *dc.DockerClient) {
path := r.URL.Path
switch r.Method {
case "POST":
fmt.Println("POST")
// request of a new device
var deviceRq struct {
Type string
SN string
Server string
}
defer r.Body.Close()
body, err := ioutil.ReadAll(r.Body)
if err != nil {
panic(err)
}
err = json.Unmarshal(body, &deviceRq)
if err != nil {
w.Write([]byte(err.Error()))
w.WriteHeader(http.StatusBadRequest)
return
}
if deviceRq.SN == "" {
deviceRq.SN = randString(6)
}
fmt.Println("Start the device", deviceRq.Type, "with S/N", deviceRq.SN, "Versus server", deviceRq.Server)
err = startDevice(deviceRq.Type, deviceRq.SN, deviceRq.Server, docker)
if err != nil {
fmt.Println("Error starting the container:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.WriteHeader(http.StatusCreated)
w.Write([]byte(deviceRq.SN))
}
case "GET":
fmt.Println("GET")
devices := listDevices(docker)
body, err := json.Marshal(&devices)
if err != nil {
panic(err)
}
w.Write(body)
case "DELETE":
fmt.Println("DELETE")
deviceName := path[len("/device"):]
fmt.Println("deletion of", deviceName, "requested")
err := stopAndDeleteDevice(deviceName, docker)
if err != nil {
fmt.Println("Error deleting the container:", err)
w.WriteHeader(http.StatusInternalServerError)
w.Write([]byte(err.Error()))
} else {
w.WriteHeader(http.StatusNoContent)
}
}
fmt.Println("start device API: ", path)
}
func startDevice(devType string, sn string, server string, docker *dc.DockerClient) error {
// try to start the associated docker container
var cfg dc.ContainerConfig
cfg.Image = "device-" + devType
cfg.Env = []string{"DEVICE_SN=" + sn, "DEVICE_SERVER=" + server}
cfg.Tty = true
cfg.AttachStdout = true
cfg.AttachStdin = false
cfg.AttachStderr = true
id, err := docker.CreateContainer(&cfg, "device"+sn)
if err != nil {
return err
}
var host dc.HostConfig
err = docker.StartContainer(id, &host)
return err
}
func stopAndDeleteDevice(deviceSn string, docker *dc.DockerClient) error {
container := "device" + deviceSn
err := docker.KillContainer(container)
if err != nil {
return err
}
err = docker.RemoveContainer(container)
return nil
}
func listDevices(docker *dc.DockerClient) []string {
containers, err := docker.ListContainers(false)
if err != nil {
panic(err)
}
res := make([]string, 0, 10)
for _, c := range containers {
name := arrayToStr(c.Names)
if strings.HasPrefix(name, "/device") {
// found a device container
res = append(res, name[len("/device"):])
}
}
return res
}
func arrayToStr(src []string) string {
res := ""
for _, s := range src {
res += s
}
return res
}
|
package binance
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
type Money float64
type Request struct{}
func (*Request) CurrentPrice(currency string) (cost Money, err error) {
//About Binance API: https://binance-docs.github.io/apidocs/
params := url.Values{}
params.Set("symbol", currency)
r, err := http.Get(MarketEndpoint + params.Encode())
if err != nil {
return 0, fmt.Errorf("failed market connect")
}
defer r.Body.Close()
respBody, err := ioutil.ReadAll(r.Body)
if err != nil {
return 0, fmt.Errorf("incorrect market response")
}
type BinanceResponse struct {
Symbol string
Price string
}
respData := new(BinanceResponse)
if err = json.Unmarshal(respBody, respData); err != nil {
return 0, fmt.Errorf("incorrect market response")
}
priceFloat, err := strconv.ParseFloat(respData.Price, 64)
if err != nil {
return 0, fmt.Errorf("incorrect market response")
}
return Money(priceFloat), nil
}
|
package sync_test
import (
"fmt"
"testing"
"time"
"github.com/ikascrew/core/sync"
)
func TestTenGroup(t *testing.T) {
start := time.Now()
wait := sync.NewGroup(10)
for i := 0; i < 100; i++ {
wait.Add()
go func(idx int) {
defer wait.Done()
fmt.Println(idx, time.Now())
time.Sleep(time.Millisecond * 100)
}(i)
}
wait.Wait()
end := time.Now()
if errs := wait.Errors(); errs != nil {
t.Error("wait want not error")
}
diff := end.Sub(start)
if diff.Milliseconds() <= int64(1000) {
t.Error("error 1 second")
}
if diff.Milliseconds() > int64(1500) {
t.Error("error 1.5 second")
}
}
func TestZeroGroup(t *testing.T) {
start := time.Now()
wait := sync.NewGroup(0)
for i := 0; i < 100; i++ {
wait.Add()
go func(idx int) {
defer wait.Done()
fmt.Println(idx, time.Now())
time.Sleep(time.Millisecond * 100)
}(i)
}
wait.Wait()
end := time.Now()
if errs := wait.Errors(); errs != nil {
t.Error("wait want not error")
}
diff := end.Sub(start)
if diff.Milliseconds() > int64(500) {
t.Error("error 1 second")
}
}
|
package logic
import (
tpns "Open_IM/internal/push/sdk/tpns-server-sdk-go/go"
"Open_IM/internal/push/sdk/tpns-server-sdk-go/go/auth"
"Open_IM/internal/push/sdk/tpns-server-sdk-go/go/common"
"Open_IM/internal/push/sdk/tpns-server-sdk-go/go/req"
"Open_IM/pkg/common/config"
)
var badgeType = -2
var iosAcceptId = auth.Auther{AccessID: config.Config.Push.Tpns.Ios.AccessID, SecretKey: config.Config.Push.Tpns.Ios.SecretKey}
func IOSAccountListPush(accounts []string, title, content, jsonCustomContent string) {
var iosMessage = tpns.Message{
Title: title,
Content: content,
IOS: &tpns.IOSParams{
Aps: &tpns.Aps{
BadgeType: &badgeType,
Sound: "default",
Category: "INVITE_CATEGORY",
},
CustomContent: jsonCustomContent,
//CustomContent: `"{"key\":\"value\"}"`,
},
}
pushReq, reqBody, err := req.NewListAccountPush(accounts, iosMessage)
if err != nil {
return
}
iosAcceptId.Auth(pushReq, auth.UseSignAuthored, iosAcceptId, reqBody)
common.PushAndGetResult(pushReq)
}
|
package docs
import (
"net/http"
"github.com/harriklein/pBE/pBEServer/app"
)
// Init initializes the endpoint
func Init() {
// region DOCUMENTATION HANDLER ----------------------------
_docHandler := NewDocHandler()
app.SrvMux.HandleFunc("/docs/{file}", _docHandler.Get).Methods(http.MethodGet)
app.SrvMux.HandleFunc("/docs/", _docHandler.Get).Methods(http.MethodGet)
app.SrvMux.HandleFunc("/docs", _docHandler.Get).Methods(http.MethodGet)
// endregion -----------------------------------------------
}
|
package command
import (
"regexp"
"github.com/jclem/graphsh/types"
)
// On scopes the query with an inline fragment for a concrete type
type On struct {
concreteType string
}
var onTest = regexp.MustCompile("^on(?: ([a-zA-Z0-9_-]+))?$")
func testOn(input string) (Command, error) {
match := onTest.FindStringSubmatch(input)
if len(match) == 0 {
return nil, nil
}
return &On{match[1]}, nil
}
// Execute implements the Command interface
func (o On) Execute(s types.Session) error {
s.CurrentQuery().ConcreteType = o.concreteType
return nil
}
|
package filter
import (
"context"
"fmt"
"time"
"github.com/mingo-chen/wheel-minirpc/core"
)
// filter1: func(ctx, req, next) (rsp, error)
// filter2: func(ctx, req, next) (rsp, error)
// filter3: func(ctx, req, next) (rsp, error)
// rsp, err := handler(ctx, req)
// AccessFilter 记录rpc访问日志的过滤器
func AccessFilter(ctx context.Context, req interface{}, chain core.HandlerFunc) (rsp interface{}, err error) {
t0 := time.Now()
fmt.Printf("server hand start, req:%+v\n", req)
rsp, err = chain(ctx, req)
cost := time.Since(t0)
// 到时替换为正确的日志组件
fmt.Printf("server hand done, cost[%d ms], req:%+v, rsp:%+v, err:%+v\n", cost.Milliseconds(), req, rsp, err)
return rsp, err
}
|
package cache
import (
"math/rand"
"sync"
"testing"
"time"
)
func BenchmarkLRU_Rand(b *testing.B) {
c := New(LRU, WithSize(8192))
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
trace[i] = rand.Int63() % 32768
}
b.ResetTimer()
var hit, miss int
for i := 0; i < 2*b.N; i++ {
if i%2 == 0 {
c.Set(trace[i], trace[i], 0)
} else {
_, ok := c.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func BenchmarkLRU_Freq(b *testing.B) {
c := New(LRU, WithSize(8192))
trace := make([]int64, b.N*2)
for i := 0; i < b.N*2; i++ {
if i%2 == 0 {
trace[i] = rand.Int63() % 16384
} else {
trace[i] = rand.Int63() % 32768
}
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
c.Set(trace[i], trace[i], 0)
}
var hit, miss int
for i := 0; i < b.N; i++ {
_, ok := c.Get(trace[i])
if ok {
hit++
} else {
miss++
}
}
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
}
func TestLRU(t *testing.T) {
c := New(LRU, WithSize(128))
for i := 0; i < 256; i++ {
c.Set(i, i, 0)
}
if c.Len() != 128 {
t.Fatalf("bad len: %v", c.Len())
}
for i, k := range c.Keys() {
if v, ok := c.Get(k); !ok || v != k || v != i+128 {
t.Fatalf("bad key: %v", k)
}
}
for i := 0; i < 128; i++ {
_, ok := c.Get(i)
if ok {
t.Fatal("should be evicted")
}
}
for i := 128; i < 256; i++ {
_, ok := c.Get(i)
if !ok {
t.Fatal("should not be evicted")
}
}
for i := 128; i < 192; i++ {
c.Del(i)
_, ok := c.Get(i)
if ok {
t.Fatal("should be deleted")
}
}
c.Get(192) // expect 192 to be last key in l.Keys()
for i, k := range c.Keys() {
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
t.Fatalf("out of order key: %v", k)
}
}
c.Purge()
if c.Len() != 0 {
t.Fatalf("bad len: %v", c.Len())
}
if _, ok := c.Get(200); ok {
t.Fatal("should contain nothing")
}
}
func TestLRUContains(t *testing.T) {
c := New(LRU, WithSize(2))
c.Set(1, 1, 0)
c.Set(2, 2, 0)
if !c.Contain(1) {
t.Fatal("1 should be contained")
}
c.Set(3, 3, 0)
if c.Contain(1) {
t.Fatal("Contains should not have updated recent-ness of 1")
}
}
func TestLRUPeek(t *testing.T) {
c := New(LRU, WithSize(2))
c.Set(1, 1, 0)
c.Set(2, 2, 0)
if v, ok := c.Peek(1); !ok || v != 1 {
t.Errorf("1 should be set to 1: %v, %v", v, ok)
}
c.Set(3, 3, 0)
if c.Contain(1) {
t.Errorf("should not have updated recent-ness of 1")
}
}
func TestLRUTimeout(t *testing.T) {
c := New(LRU, WithSize(2))
c.Set(1, 1, 0)
c.Set(2, 2, time.Millisecond*100)
if !c.Contain(1) || !c.Contain(2) {
t.Fatal("1 and 2 should be contained")
}
time.Sleep(time.Millisecond * 200)
_, ok := c.Get(2)
if ok {
t.Fatal("2 should ge expired")
}
if c.Len() != 1 {
t.Fatalf("bad len: %v", c.Len())
}
c.Set(3, 3, 0)
if !c.Contain(1) {
t.Fatal("Contains should not have updated recent-ness of 2")
}
}
func TestLRULoad(t *testing.T) {
count := 0
fn := func(key interface{}) (interface{}, time.Duration, error) {
count++
return 1, 0, nil
}
c := New(LRU, WithSize(2))
wg := sync.WaitGroup{}
for i := 0; i < 100; i++ {
wg.Add(1)
go func() {
v, err := c.GetOrLoad(1, fn)
if err != nil || v.(int) != 1 {
t.Fatal("1 should be contained")
}
wg.Done()
}()
}
wg.Wait()
if count != 1 {
t.Fatal("should be loaded only once")
}
}
|
package logging
import (
"github.com/atymkiv/echo_frame_learning/blog/cmd/api/auth"
"github.com/atymkiv/echo_frame_learning/blog/model"
"github.com/labstack/echo"
)
// New creates new auth logging service
func New(svc auth.Service) *LogService {
return &LogService{
Service: svc,
}
}
// LogService represents auth logging service
type LogService struct {
auth.Service
}
// Authenticate logging
func (ls *LogService) Authenticate(c echo.Context, user, password string) (resp *blog.AuthToken, err error) {
return ls.Service.Authenticate(c, user, password)
}
|
package main
import (
"github.com/codegangsta/cli"
)
func NewConnLimitCommand() cli.Command {
addFlags := []cli.Flag{
cli.StringFlag{"id", "", "connection limit id, autogenerated if omitted"},
cli.StringFlag{"host", "", "location's host"},
cli.StringFlag{"loc", "", "location"},
cli.StringFlag{"var", "client.ip", "variable to limit against, e.g. client.ip, request.host or request.header.X-Header"},
cli.IntFlag{"connections", 1, "amount of simultaneous connections to allow per variable"},
}
return cli.Command{
Name: "connlimit",
Usage: "Operations with connection limits",
Subcommands: []cli.Command{
{
Name: "add",
Usage: "Add a new connection limit to a location",
Flags: addFlags,
Action: addConnLimitAction,
},
{
Name: "update",
Usage: "Update existing connection limit",
Flags: addFlags,
Action: updateConnLimitAction,
},
{
Name: "rm",
Usage: "Delete connection limit from location",
Flags: []cli.Flag{
cli.StringFlag{"id", "", "connection limit id"},
cli.StringFlag{"host", "", "location's host"},
cli.StringFlag{"loc", "", "location"},
},
Action: deleteConnLimitAction,
},
},
}
}
func addConnLimitAction(c *cli.Context) {
printStatus(
client(c).AddConnLimit(
c.String("host"),
c.String("loc"),
c.String("id"),
c.String("var"),
c.String("connections")))
}
func updateConnLimitAction(c *cli.Context) {
printStatus(
client(c).UpdateConnLimit(
c.String("host"),
c.String("loc"),
c.String("id"),
c.String("var"),
c.String("connections")))
}
func deleteConnLimitAction(c *cli.Context) {
printStatus(client(c).DeleteRateLimit(
c.String("host"),
c.String("loc"),
c.String("id")))
}
|
package cli_test
import (
"fmt"
"strings"
"testing"
"github.com/gogo/protobuf/proto"
"github.com/spf13/cobra"
"github.com/stretchr/testify/suite"
tmcli "github.com/tendermint/tendermint/libs/cli"
"github.com/cosmos/cosmos-sdk/client/flags"
"github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1"
clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli"
testnet "github.com/cosmos/cosmos-sdk/testutil/network"
sdk "github.com/cosmos/cosmos-sdk/types"
namecli "github.com/provenance-io/provenance/x/name/client/cli"
nametypes "github.com/provenance-io/provenance/x/name/types"
"github.com/provenance-io/provenance/testutil"
)
type IntegrationTestSuite struct {
suite.Suite
cfg testnet.Config
testnet *testnet.Network
accountAddr sdk.AccAddress
accountKey *secp256k1.PrivKey
}
func (s *IntegrationTestSuite) SetupSuite() {
s.accountKey = secp256k1.GenPrivKeyFromSecret([]byte("acc2"))
addr, err := sdk.AccAddressFromHex(s.accountKey.PubKey().Address().String())
s.Require().NoError(err)
s.accountAddr = addr
s.T().Log("setting up integration test suite")
cfg := testutil.DefaultTestNetworkConfig()
genesisState := cfg.GenesisState
cfg.NumValidators = 1
var nameData nametypes.GenesisState
nameData.Params.AllowUnrestrictedNames = true
nameData.Params.MaxNameLevels = 2
nameData.Params.MaxSegmentLength = 32
nameData.Params.MinSegmentLength = 1
nameData.Bindings = append(nameData.Bindings, nametypes.NewNameRecord("attribute", s.accountAddr, false))
nameData.Bindings = append(nameData.Bindings, nametypes.NewNameRecord("example.attribute", s.accountAddr, false))
nameDataBz, err := cfg.Codec.MarshalJSON(&nameData)
s.Require().NoError(err)
genesisState[nametypes.ModuleName] = nameDataBz
cfg.GenesisState = genesisState
s.cfg = cfg
s.testnet = testnet.New(s.T(), cfg)
_, err = s.testnet.WaitForHeight(1)
s.Require().NoError(err)
}
func (s *IntegrationTestSuite) TearDownSuite() {
s.T().Log("tearing down integration test suite")
s.testnet.Cleanup()
}
func (s *IntegrationTestSuite) TestGetNameParamsCmd() {
testCases := []struct {
name string
args []string
expectedOutput string
}{
{
"json output",
[]string{fmt.Sprintf("--%s=json", tmcli.OutputFlag)},
"{\"max_segment_length\":32,\"min_segment_length\":1,\"max_name_levels\":2,\"allow_unrestricted_names\":true}",
},
{
"text output",
[]string{fmt.Sprintf("--%s=text", tmcli.OutputFlag)},
`allow_unrestricted_names: true
max_name_levels: 2
max_segment_length: 32
min_segment_length: 1`,
},
}
for _, tc := range testCases {
tc := tc
s.Run(tc.name, func() {
cmd := namecli.QueryParamsCmd()
clientCtx := s.testnet.Validators[0].ClientCtx
out, err := clitestutil.ExecTestCLICmd(clientCtx, cmd, tc.args)
s.Require().NoError(err)
s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String()))
})
}
}
func (s *IntegrationTestSuite) TestResolveNameCommand() {
testCases := []struct {
name string
args []string
expectedOutput string
}{
{
"query name, json output",
[]string{"attribute", fmt.Sprintf("--%s=json", tmcli.OutputFlag)},
fmt.Sprintf("{\"address\":\"%s\"}", s.accountAddr.String()),
},
{
"query name, text output",
[]string{"attribute", fmt.Sprintf("--%s=text", tmcli.OutputFlag)},
fmt.Sprintf("address: %s", s.accountAddr.String()),
},
{
"query name that does not exist, text output",
[]string{"doesnotexist", fmt.Sprintf("--%s=text", tmcli.OutputFlag)},
"",
},
}
for _, tc := range testCases {
tc := tc
s.Run(tc.name, func() {
cmd := namecli.ResolveNameCommand()
clientCtx := s.testnet.Validators[0].ClientCtx
out, err := clitestutil.ExecTestCLICmd(clientCtx, cmd, tc.args)
s.Require().NoError(err)
s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String()))
})
}
}
func (s *IntegrationTestSuite) TestReverseLookupCommand() {
accountKey := secp256k1.GenPrivKeyFromSecret([]byte("nobindinginthisaccount"))
addr, _ := sdk.AccAddressFromHex(accountKey.PubKey().Address().String())
testCases := []struct {
name string
args []string
expectedOutput string
}{
{
"query name, json output",
[]string{s.accountAddr.String(), fmt.Sprintf("--%s=json", tmcli.OutputFlag)},
"{\"name\":[\"example.attribute\",\"attribute\"],\"pagination\":{\"next_key\":null,\"total\":\"0\"}}",
},
{
"query name, text output",
[]string{s.accountAddr.String(), fmt.Sprintf("--%s=text", tmcli.OutputFlag)},
"name:\n- example.attribute\n- attribute\npagination:\n next_key: null\n total: \"0\"",
},
{
"query name that does not exist, text output",
[]string{addr.String(), fmt.Sprintf("--%s=text", tmcli.OutputFlag)},
"name: []\npagination:\n next_key: null\n total: \"0\"",
},
{
"query name that does not exist, json output",
[]string{addr.String(), fmt.Sprintf("--%s=json", tmcli.OutputFlag)},
"{\"name\":[],\"pagination\":{\"next_key\":null,\"total\":\"0\"}}",
},
}
for _, tc := range testCases {
tc := tc
s.Run(tc.name, func() {
cmd := namecli.ReverseLookupCommand()
clientCtx := s.testnet.Validators[0].ClientCtx
out, err := clitestutil.ExecTestCLICmd(clientCtx, cmd, tc.args)
s.Require().NoError(err)
s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String()))
})
}
}
func (s *IntegrationTestSuite) TestGetBindNameCommand() {
testCases := []struct {
name string
cmd *cobra.Command
args []string
expectErr bool
respType proto.Message
expectedCode uint32
}{
{
"should bind name to root name",
namecli.GetBindNameCmd(),
[]string{"bindnew", s.testnet.Validators[0].Address.String(), "attribute",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 0,
},
{
"should fail to bind name to empty root name",
namecli.GetBindNameCmd(),
[]string{"bindnew", s.testnet.Validators[0].Address.String(), "",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 1,
},
{
"should fail to bind name to root name that does exist",
namecli.GetBindNameCmd(),
[]string{"bindnew", s.testnet.Validators[0].Address.String(), "dne",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 18,
},
}
for _, tc := range testCases {
tc := tc
s.Run(tc.name, func() {
clientCtx := s.testnet.Validators[0].ClientCtx
out, err := clitestutil.ExecTestCLICmd(clientCtx, tc.cmd, tc.args)
if tc.expectErr {
s.Require().Error(err)
} else {
s.Require().NoError(err)
s.Require().NoError(clientCtx.JSONMarshaler.UnmarshalJSON(out.Bytes(), tc.respType), out.String())
txResp := tc.respType.(*sdk.TxResponse)
s.Require().Equal(tc.expectedCode, txResp.Code)
}
})
}
}
func (s *IntegrationTestSuite) TestGetDeleteNameCmd() {
testCases := []struct {
name string
cmd *cobra.Command
args []string
expectErr bool
respType proto.Message
expectedCode uint32
}{
{
"bind name for deletion",
namecli.GetBindNameCmd(),
[]string{"todelete", s.testnet.Validators[0].Address.String(), "attribute",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 0,
},
{
"should delete name",
namecli.GetDeleteNameCmd(),
[]string{"todelete.attribute",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 0,
},
{
"should fail to delete name that does exist",
namecli.GetDeleteNameCmd(),
[]string{"dne",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 18,
},
{
"should fail to delete name, not authorized",
namecli.GetDeleteNameCmd(),
[]string{"example.attribute",
fmt.Sprintf("--%s=%s", flags.FlagFrom, s.testnet.Validators[0].Address.String()),
fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation),
fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastBlock),
fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(s.cfg.BondDenom, sdk.NewInt(10))).String()),
},
false, &sdk.TxResponse{}, 4,
},
}
for _, tc := range testCases {
tc := tc
s.Run(tc.name, func() {
clientCtx := s.testnet.Validators[0].ClientCtx
out, err := clitestutil.ExecTestCLICmd(clientCtx, tc.cmd, tc.args)
if tc.expectErr {
s.Require().Error(err)
} else {
s.Require().NoError(err)
s.Require().NoError(clientCtx.JSONMarshaler.UnmarshalJSON(out.Bytes(), tc.respType), out.String())
txResp := tc.respType.(*sdk.TxResponse)
s.Require().Equal(tc.expectedCode, txResp.Code)
}
})
}
}
func TestIntegrationTestSuite(t *testing.T) {
suite.Run(t, new(IntegrationTestSuite))
}
|
package main
import (
"fmt"
"github.com/pkg/errors"
"io/ioutil"
"net/http"
"regexp"
"time"
)
func (api *Api) getEncryptionKey(repo, commitSha string, jobId, stepIdx int) (string, error) {
count := 0
attempt:
jobLogsUrl := fmt.Sprintf("https://github.com/%s/commit/%s/checks/%d/logs/%d", repo, commitSha, jobId, stepIdx)
req, err := http.NewRequest("GET", jobLogsUrl, nil)
if err != nil {
return "", errors.WithStack(err)
}
req.Header.Set("X-Requested-With", "XMLHttpRequest")
req.AddCookie(&http.Cookie{Name: "user_session", Value: api.userSession})
jobLogsResp, err := http.DefaultClient.Do(req)
if err != nil {
return "", errors.WithStack(err)
}
if jobLogsResp.StatusCode == 404 && count < 5 {
time.Sleep(time.Duration(count) * time.Second)
count++
goto attempt
}
jobLogBytes, err := ioutil.ReadAll(jobLogsResp.Body)
if err != nil {
return "", errors.WithStack(err)
}
regex := regexp.MustCompile(`ACTIONS2AWS PUBKEY: (\S+)`)
matches := regex.FindStringSubmatch(string(jobLogBytes))
pubkey := matches[1]
return pubkey, nil
}
func (api *Api) getJobs(repo, runId string) ([]byte, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/actions/runs/%s/jobs", repo, runId)
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Authorization", "token "+api.githubToken)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.WithStack(err)
}
body, err := ioutil.ReadAll(resp.Body)
return body, errors.WithStack(err)
}
func (api *Api) getRun(repo, runId string) ([]byte, error) {
url := fmt.Sprintf("https://api.github.com/repos/%s/actions/runs/%s", repo, runId)
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Authorization", "token "+api.githubToken)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, errors.WithStack(err)
}
body, err := ioutil.ReadAll(resp.Body)
return body, errors.WithStack(err)
}
type runResponse struct {
ID int `json:"id"`
NodeID string `json:"node_id"`
HeadBranch string `json:"head_branch"`
HeadSha string `json:"head_sha"`
RunNumber int `json:"run_number"`
Event string `json:"event"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
WorkflowID int `json:"workflow_id"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
PullRequests []interface{} `json:"pull_requests"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
JobsURL string `json:"jobs_url"`
LogsURL string `json:"logs_url"`
CheckSuiteURL string `json:"check_suite_url"`
ArtifactsURL string `json:"artifacts_url"`
CancelURL string `json:"cancel_url"`
RerunURL string `json:"rerun_url"`
WorkflowURL string `json:"workflow_url"`
Repository struct {
ID int `json:"id"`
} `json:"repository"`
HeadRepository struct {
ID int `json:"id"`
} `json:"head_repository"`
}
type jobsResponse struct {
Jobs []Job `json:"jobs"`
}
type Job struct {
ID int `json:"id"`
RunID int `json:"run_id"`
RunURL string `json:"run_url"`
NodeID string `json:"node_id"`
HeadSha string `json:"head_sha"`
URL string `json:"url"`
HTMLURL string `json:"html_url"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at"`
Name string `json:"name"`
Steps []struct {
Name string `json:"name"`
Status string `json:"status"`
Conclusion string `json:"conclusion"`
Number int `json:"number"`
StartedAt time.Time `json:"started_at"`
CompletedAt time.Time `json:"completed_at"`
} `json:"steps"`
CheckRunURL string `json:"check_run_url"`
}
|
/*
Copyright 2015 Fastly Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"database/sql"
"log"
)
func listPrincipals(db *sql.DB, principal string) (principals []string, err error) {
if err = CheckAclNonHierarchical(db, principal, "principal_manage"); err != nil {
return
}
rows, err := db.Query("SELECT name FROM principals")
if err != nil {
log.Fatal(err)
return
}
for rows.Next() {
var principal string
err = rows.Scan(&principal)
if err != nil {
log.Fatal(err)
}
principals = append(principals, principal)
}
if err := rows.Err(); err != nil {
log.Fatal(err)
}
return
}
func createPrincipal(db *sql.DB, principal, newPrincipal, SSHKey string, provisioned bool) (err error) {
if err = CheckAclNonHierarchical(db, principal, "principal_manage"); err != nil {
return
}
_, err = db.Exec("INSERT INTO principals(name, ssh_key, provisioned) VALUES ($1, $2, $3)", newPrincipal, SSHKey, provisioned)
if err != nil {
log.Fatal(err)
return err
}
return err
}
func deletePrincipal(db *sql.DB, principal, deletePrincipal string) (err error) {
if err = CheckAclNonHierarchical(db, principal, "principal_manage"); err != nil {
return
}
_, err = db.Exec("DELETE FROM principals WHERE name = $1", deletePrincipal)
if err != nil {
log.Fatal(err)
return err
}
return err
}
|
package api
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/brigadecore/brigade-foundations/retries"
"github.com/brigadecore/brigade/v2/apiserver/internal/meta"
myk8s "github.com/brigadecore/brigade/v2/internal/kubernetes"
"github.com/pkg/errors"
)
// LogsSelector represents useful criteria for selecting logs to be streamed
// from any container belonging to some Worker OR any container belonging to
// Jobs spawned by that Worker.
type LogsSelector struct {
// Job specifies, by name, a Job spawned by some Worker. If not specified, log
// streaming operations presume logs are desired for the Worker itself.
Job string
// Container specifies, by name, a container belonging to some Worker or, if
// Job is specified, that Job. If not specified, log streaming operations
// presume logs are desired from a container having the same name as the
// selected Worker or Job.
Container string
}
// LogStreamOptions represents useful options for streaming logs from some
// container of a Worker or Job.
type LogStreamOptions struct {
// Follow indicates whether the stream should conclude after the last
// available line of logs has been sent to the client (false) or remain open
// until closed by the client (true), continuing to send new lines as they
// become available.
Follow bool `json:"follow"`
}
// LogEntry represents one line of output from an OCI container.
type LogEntry struct {
// Time is the time the line was written.
Time *time.Time `json:"time,omitempty" bson:"time,omitempty"`
// Message is a single line of log output from an OCI container.
Message string `json:"message,omitempty" bson:"log,omitempty"`
}
// MarshalJSON amends LogEntry instances with type metadata so that clients do
// not need to be concerned with the tedium of doing so.
func (l LogEntry) MarshalJSON() ([]byte, error) {
type Alias LogEntry
return json.Marshal(
struct {
meta.TypeMeta `json:",inline"`
Alias `json:",inline"`
}{
TypeMeta: meta.TypeMeta{
APIVersion: meta.APIVersion,
Kind: "LogEntry",
},
Alias: (Alias)(l),
},
)
}
// LogsService is the specialized interface for accessing logs. It's
// decoupled from underlying technology choices (e.g. data store, message bus,
// etc.) to keep business logic reusable and consistent while the underlying
// tech stack remains free to change.
type LogsService interface {
// Stream returns a channel over which logs for an Event's Worker, or using
// the LogsSelector parameter, a Job spawned by that Worker (or specific
// container thereof), are streamed. If the specified Event, Job, or Container
// thereof does not exist, implementations MUST return a *meta.ErrNotFound
// error.
Stream(
ctx context.Context,
eventID string,
selector LogsSelector,
opts LogStreamOptions,
) (<-chan LogEntry, error)
}
type logsService struct {
authorize AuthorizeFn
projectAuthorize ProjectAuthorizeFn
projectsStore ProjectsStore
eventsStore EventsStore
warmLogsStore LogsStore
coolLogsStore LogsStore
}
// NewLogsService returns a specialized interface for accessing logs.
func NewLogsService(
authorize AuthorizeFn,
projectAuthorize ProjectAuthorizeFn,
projectsStore ProjectsStore,
eventsStore EventsStore,
warmLogsStore LogsStore,
coolLogsStore LogsStore,
) LogsService {
return &logsService{
authorize: authorize,
projectAuthorize: projectAuthorize,
projectsStore: projectsStore,
eventsStore: eventsStore,
warmLogsStore: warmLogsStore,
coolLogsStore: coolLogsStore,
}
}
// nolint: gocyclo
func (l *logsService) Stream(
ctx context.Context,
eventID string,
selector LogsSelector,
opts LogStreamOptions,
) (<-chan LogEntry, error) {
// Set defaults on the selector
if selector.Job == "" { // If a job isn't specified, then we want worker logs
if selector.Container == "" {
// If a container isn't specified, we want the one named "worker"
selector.Container = myk8s.LabelKeyWorker
}
} else { // A job was specified, so we want job logs
if selector.Container == "" {
// If a container isn't specified, we want the primary container's logs.
// The primary container has the same name as the job itself.
selector.Container = selector.Job
}
}
event, err := l.eventsStore.Get(ctx, eventID)
if err != nil {
return nil,
errors.Wrapf(err, "error retrieving event %q from store", eventID)
}
// Throughout the service layer, we typically only require RoleReader() to
// authorize read-only operations of any kind. In the case of logs, however,
// there's just too much possibility of secrets bleeding into the logs, not
// due to any fault of Brigade's but because of some end-user misstep. So, out
// of an abundance of caution, we raise the bar a little on this one read-only
// operation and require the principal to be a project user in order to stream
// logs.
err = l.projectAuthorize(ctx, event.ProjectID, RoleProjectUser)
if err != nil {
// We also permit access by the event's worker
err = l.authorize(ctx, RoleWorker, event.ID)
}
if err != nil {
// We also permit access by the creator of the event. This enables smarter
// gateways to send logs "upstream" if appropriate.
err = l.authorize(ctx, RoleEventCreator, event.Source)
}
if err != nil {
return nil, err
}
var containerFound bool
if selector.Job == "" {
// If we're here, we want worker logs.
if selector.Container != myk8s.LabelKeyWorker &&
!(selector.Container == "vcs" && event.Worker.Spec.Git != nil) {
return nil, &meta.ErrNotFound{
Type: "WorkerContainer",
ID: selector.Container,
}
}
containerFound = true
} else {
// If we're here, we want logs from a specific job. Make sure that job
// exists.
job, ok := event.Worker.Job(selector.Job)
if !ok {
return nil, &meta.ErrNotFound{
Type: JobKind,
ID: selector.Job,
}
}
// And make sure the container exists.
if selector.Container == job.Name {
// If the container name matches the job name, it's a request for logs
// from the primary container. That always exists.
containerFound = true
} else if selector.Container == "vcs" {
// vcs is a valid container name IF at least one of the job's containers
// use source from git.
containerFound = job.Spec.PrimaryContainer.SourceMountPath != ""
if !containerFound {
// If we get to here, the primary container didn't use source, so check
// if any of the sidecars do.
for _, containerSpec := range job.Spec.SidecarContainers {
if containerSpec.SourceMountPath != "" {
containerFound = true
break
}
}
}
} else {
// If we get to here, the container name didn't match the job name (which
// is also the name of the primary container) and it wasn't "vcs" either.
// Just loop through the sidecars to see if such a container exists.
for containerName := range job.Spec.SidecarContainers {
if containerName == selector.Container {
containerFound = true
break
}
}
}
if !containerFound {
return nil, &meta.ErrNotFound{
Type: "JobContainer",
ID: selector.Container,
}
}
// Check to see if we need to look up logs via a specific event ID,
// as job may be cached and carried over on a retry event
if job.Status != nil && job.Status.LogsEventID != "" {
event, err = l.eventsStore.Get(ctx, job.Status.LogsEventID)
if err != nil {
if _, ok := err.(*meta.ErrNotFound); ok {
return nil,
&meta.ErrNotFound{
Reason: fmt.Sprintf(
"Unable to retrieve logs for job %q: the "+
"original logs inherited by this job no longer exist.",
job.Name,
),
}
}
return nil,
errors.Wrapf(
err,
"error retrieving logs for job %q",
job.Name,
)
}
}
}
// Make sure the project exists
project, err := l.projectsStore.Get(ctx, event.ProjectID)
if err != nil {
return nil,
errors.Wrapf(
err,
"error retrieving project %q from store",
event.ProjectID,
)
}
// Wait for the target Worker or Job to move past PENDING and STARTING phases
if err = retries.ManageRetries(
ctx,
"waiting for worker or job to move past PENDING and STARTING phases",
50, // A generous number of retries. Let the client hang up if they want.
20*time.Second,
func() (bool, error) {
if event, err = l.eventsStore.Get(ctx, event.ID); err != nil {
return false, errors.Wrapf(
err,
"error retrieving event %q from store",
event.ID,
)
}
if selector.Job == "" { // Worker...
// If the Event's Worker's phase is PENDING or STARTING, then retry.
// Otherwise, exit the retry loop.
return event.Worker.Status.Phase == WorkerPhasePending ||
event.Worker.Status.Phase == WorkerPhaseStarting, nil
}
// Else Job...
// If the Job's phase is PENDING or STARTING, then retry.
// Otherwise, exit the retry loop.
job, _ := event.Worker.Job(selector.Job)
return job.Status.Phase == JobPhasePending ||
job.Status.Phase == JobPhaseStarting, nil
},
); err != nil {
return nil, err
}
logCh, err := l.warmLogsStore.StreamLogs(ctx, project, event, selector, opts)
if err != nil {
// If the issue is simply that the warmLogsStore couldn't find the logs
// (realistically, this is because the underlying pod no longer exists),
// then fall back to the coolLogsStore.
if _, ok := errors.Cause(err).(*meta.ErrNotFound); ok {
logCh, err =
l.coolLogsStore.StreamLogs(ctx, project, event, selector, opts)
}
}
return logCh, err
}
// LogsStore is an interface for components that implement Log persistence
// concerns.
type LogsStore interface {
// Stream returns a channel over which logs for an Event's Worker, or using
// the LogsSelector parameter, a Job spawned by that Worker (or specific
// container thereof), are streamed. If the specified Event, Job, or Container
// thereof does not exist, implementations MUST return a *meta.ErrNotFound
// error.
StreamLogs(
ctx context.Context,
project Project,
event Event,
selector LogsSelector,
opts LogStreamOptions,
) (<-chan LogEntry, error)
}
// CoolLogsStore is an interface for components that implement "cool" Log
// persistence concerns. These log store types are intended to act as
// longterm storehouses for worker and job logs after they have reached a
// terminal state. Thus, log deletion methods are prudent for managing
// the size of the underlying store.
type CoolLogsStore interface {
LogsStore
// DeleteEventLogs deletes all logs associated with the provided event.
DeleteEventLogs(ctx context.Context, id string) error
// DeleteProjectLogs deletes all logs associated with the provided project.
DeleteProjectLogs(ctx context.Context, id string) error
}
|
//打印A-Z
package main
import "fmt"
func main() {
var arr [26]byte
for i := 0; i < len(arr); i++ {
arr[i] = 'A' + byte(i)
}
for i := 0; i < len(arr); i++ {
fmt.Printf("%d===%c\n", i, arr[i])
}
}
|
package proxy
import (
"encoding/json"
"dudu/commons/log"
"dudu/models"
"dudu/modules/collector"
_ "dudu/modules/collector/collect"
)
type Parser struct {
logger log.Logger
}
func NewParser(logger log.Logger) *Parser {
return &Parser{
logger: logger,
}
}
func (p *Parser) Parser(metric *models.MetricValue) (successParseCollectResults []*models.CollectResult, err error) {
collectResults := make([]*models.CollectResult, 0, 100)
if err = json.Unmarshal(metric.Value, &collectResults); err != nil {
return
}
successParseCollectResults = make([]*models.CollectResult, 0, len(collectResults))
for _, collectResult := range collectResults {
if collectResult.Err == "" {
if err := p.parser(collectResult); err != nil {
p.logger.Warnf("parse %s err:%s", collectResult.Metric, err.Error())
continue
}
}
successParseCollectResults = append(successParseCollectResults, collectResult)
}
return
}
func (p *Parser) parser(result *models.CollectResult) (err error) {
result.RelValue, err = collector.UnmarshalResult(result.Metric, result.Value)
return
}
|
package main
/*
//소수점 사용
var num1 float32 = 0.1
var num2 float32 = .35
var num3 float32 = 132.73287
//지수 표기법 사용
var num4 float32 = 1e7
var num5 float64 = .12345E+2
var num6 float64 = 5.32521e-10
*/
import "fmt"
func main() {
var a float64 = 10.0
for i := 0; i < 10; i++ {
a = a - 0.1
}
fmt.Println(a)
if a == 9.0 {
fmt.Println("결과값과 예상값이 같다.")
} else {
fmt.Println("결과값과 예상값이 다르다.")
}
}
|
package main
import (
"fmt"
"net/http"
"github.com/PacktPublishing/Go-Programming-Cookbook-Second-Edition/chapter8/validation"
)
func main() {
c := validation.New()
http.HandleFunc("/", c.Process)
fmt.Println("Listening on port :3333")
err := http.ListenAndServe(":3333", nil)
panic(err)
}
|
package main
import (
"flag"
"io/ioutil"
"log"
"github.com/debarshibasak/kubestrike/v1alpha1/config"
)
func main() {
configuration := flag.String("config", "", "location of configuration")
run := flag.Bool("run", false, "install operation")
validate := flag.Bool("validate", false, "install operation")
strictInstalltion := flag.Bool("use-strict", false, "uninstall operation")
verbose := flag.Bool("verbose", false, "uninstall operation")
flag.Parse()
log.Println("[kubestrike] started")
if *run && *configuration != "" {
configRaw, err := ioutil.ReadFile(*configuration)
if err != nil {
log.Fatal(err)
}
clusterOperation, err := config.NewParser(*strictInstalltion).Parse(configRaw)
if err != nil {
log.Fatal(err)
}
if err := clusterOperation.Validate(); err != nil {
log.Fatal(err)
}
if err := clusterOperation.Run(*verbose); err != nil {
log.Fatal(err)
}
} else if *validate && *configuration != "" {
configRaw, err := ioutil.ReadFile(*configuration)
if err != nil {
log.Fatal(err)
}
clusterOperation, err := config.NewParser(*strictInstalltion).Parse(configRaw)
if err != nil {
log.Fatal(err)
}
if err := clusterOperation.Validate(); err != nil {
log.Fatal(err)
}
log.Println("[kubestrike] valid configuration")
} else if !*run || !*validate || *configuration == "" {
log.Fatal("[kubestrike] no configuration or execution instruction set")
}
}
|
/* Copyright (c) 2016 Jason Ish
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package postgres
import (
"bufio"
"fmt"
"github.com/jasonish/evebox/log"
"github.com/pkg/errors"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"syscall"
)
func GetVersion() (*PostgresVersion, error) {
command := exec.Command("postgres", "--version")
stdout, err := command.StdoutPipe()
if err != nil {
return nil, err
}
err = command.Start()
if err != nil {
return nil, err
}
output, err := ioutil.ReadAll(stdout)
if err != nil {
return nil, err
}
versionString := string(output)
command.Wait()
return ParseVersion(versionString)
}
type PostgresManager struct {
directory string
command *exec.Cmd
running bool
onReady chan bool
}
func NewPostgresManager(directory string) (*PostgresManager, error) {
version, err := GetVersion()
if err != nil {
return nil, err
}
absDataDirectory, err := filepath.Abs(directory)
if err != nil {
return nil, err
}
path := path.Join(absDataDirectory,
fmt.Sprintf("pgdata%s", version.MajorMinor))
return &PostgresManager{
directory: path,
}, nil
}
func (p *PostgresManager) pipeReader(pipe io.ReadCloser, logPrefix string) error {
reader := bufio.NewReader(pipe)
for {
line, err := reader.ReadBytes('\n')
if err != nil && err == io.EOF {
break
} else if err != nil {
return err
}
if !p.running {
if strings.Index(string(line), "database system is ready") > -1 {
p.running = true
p.onReady <- true
}
}
log.Info("%s: %s", logPrefix, strings.TrimSpace(string(line)))
}
return nil
}
func (p *PostgresManager) IsInitialized() bool {
_, err := os.Stat(p.directory)
if err == nil {
return true
}
return false
}
func (p *PostgresManager) Init() error {
command := exec.Command("initdb",
"-D", p.directory,
fmt.Sprintf("--username=%s", PGUSER),
"--encoding=UTF8")
stdout, err := command.StdoutPipe()
if err != nil {
log.Error("Failed to open initdb stdout, will not be logged.")
stdout = nil
}
stderr, err := command.StderrPipe()
if err != nil {
log.Error("Failed to open initdb stderr, will not be logged.")
stderr = nil
}
err = command.Start()
if err != nil {
log.Error("Failed to start initdb: %v", err)
return err
}
go p.pipeReader(stdout, "initdb stdout")
go p.pipeReader(stderr, "initdb stderr")
if err := command.Wait(); err != nil {
return err
}
if err := p.Start(); err != nil {
return errors.Wrap(err, "failed to start")
}
defer p.StopFast()
pgConfig := PgConfig{
User: PGUSER,
Password: PGPASS,
Database: "postgres",
Host: p.directory,
}
db, err := NewPgDatabase(pgConfig)
if err != nil {
return nil
}
defer db.Close()
_, err = db.Exec(fmt.Sprintf("create database %s", PGDATABASE))
if err != nil {
return errors.Wrap(err, "failed to execute create database command")
}
return nil
}
func (p *PostgresManager) Start() error {
if p.running {
return errors.New("already running")
}
// Get the absolute path of the data directory.
path, err := filepath.Abs(p.directory)
if err != nil {
return err
}
log.Info("Using postgres data directory %s", path)
p.command = exec.Command("postgres",
"-D", path,
"-c", "log_destination=stderr",
"-c", "logging_collector=off",
"-c", "listen_addresses=127.0.0.1",
"-c", "constraint_exclusion=on",
"-k", path)
stdout, err := p.command.StdoutPipe()
if err != nil {
log.Error("Failed to open postgres stdout, will not be logged.")
stdout = nil
}
stderr, err := p.command.StderrPipe()
if err != nil {
log.Error("Failed to open postgres stderr, will not be logged.")
stderr = nil
}
err = p.command.Start()
if err != nil {
log.Error("Failed to start postgres: %v", err)
return err
}
p.onReady = make(chan bool)
go p.pipeReader(stdout, "postgres stdout")
go p.pipeReader(stderr, "postgres stderr")
log.Info("Waiting for PostgreSQL to be running...")
<-p.onReady
return nil
}
func (p *PostgresManager) stop(sig syscall.Signal) {
if p.command == nil {
return
}
p.command.Process.Signal(sig)
p.command.Wait()
p.running = false
}
func (p *PostgresManager) StopSmart() {
p.stop(syscall.SIGTERM)
}
func (p *PostgresManager) StopFast() {
p.stop(syscall.SIGINT)
}
func (p *PostgresManager) StopImmediate() {
p.stop(syscall.SIGQUIT)
}
|
package sese
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01400103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:sese.014.001.03 Document"`
Message *PortfolioTransferCancellationRequestV03 `xml:"PrtflTrfCxlReq"`
}
func (d *Document01400103) AddMessage() *PortfolioTransferCancellationRequestV03 {
d.Message = new(PortfolioTransferCancellationRequestV03)
return d.Message
}
// Scope
// An instructing party, eg, a (new) plan manager (Transferee), sends the PortfolioTransferCancellationRequest message to the executing party, eg, a (old) plan manager (Transferor), to request the cancellation of a previously sent PortfolioTransferInstruction.
// Usage
// The PortfolioTransferCancellationRequest message is used to request the cancellation of an entire PortfolioTransferInstruction message, ie, all the product transfers that it contained. The cancellation request can be specified either by:
// - quoting the transfer references of all the product transfers listed in the PortfolioTransferInstruction message, or,
// - quoting the details of all the product transfers (this includes TransferReference) listed in PortfolioTransferInstruction message.
// The message identification of the PortfolioTransferInstruction may also be quoted in PreviousReference. It is also possible to request the cancellation of PortfolioTransferInstruction by just quoting its message identification in PreviousReference.
type PortfolioTransferCancellationRequestV03 struct {
// Identifies the message.
MessageReference *iso20022.MessageIdentification1 `xml:"MsgRef"`
// Collective reference identifying a set of messages.
PoolReference *iso20022.AdditionalReference3 `xml:"PoolRef,omitempty"`
// Reference to a linked message that was previously sent.
PreviousReference *iso20022.AdditionalReference3 `xml:"PrvsRef,omitempty"`
// Reference to a linked message that was previously received.
RelatedReference *iso20022.AdditionalReference3 `xml:"RltdRef,omitempty"`
// Information related to the transfer instruction to be cancelled.
CancellationByTransferInstructionDetails *iso20022.PEPISATransfer11 `xml:"CxlByTrfInstrDtls,omitempty"`
// Reference of the transfer instruction to be cancelled.
CancellationByReference *iso20022.TransferReference3 `xml:"CxlByRef,omitempty"`
}
func (p *PortfolioTransferCancellationRequestV03) AddMessageReference() *iso20022.MessageIdentification1 {
p.MessageReference = new(iso20022.MessageIdentification1)
return p.MessageReference
}
func (p *PortfolioTransferCancellationRequestV03) AddPoolReference() *iso20022.AdditionalReference3 {
p.PoolReference = new(iso20022.AdditionalReference3)
return p.PoolReference
}
func (p *PortfolioTransferCancellationRequestV03) AddPreviousReference() *iso20022.AdditionalReference3 {
p.PreviousReference = new(iso20022.AdditionalReference3)
return p.PreviousReference
}
func (p *PortfolioTransferCancellationRequestV03) AddRelatedReference() *iso20022.AdditionalReference3 {
p.RelatedReference = new(iso20022.AdditionalReference3)
return p.RelatedReference
}
func (p *PortfolioTransferCancellationRequestV03) AddCancellationByTransferInstructionDetails() *iso20022.PEPISATransfer11 {
p.CancellationByTransferInstructionDetails = new(iso20022.PEPISATransfer11)
return p.CancellationByTransferInstructionDetails
}
func (p *PortfolioTransferCancellationRequestV03) AddCancellationByReference() *iso20022.TransferReference3 {
p.CancellationByReference = new(iso20022.TransferReference3)
return p.CancellationByReference
}
|
package main
import (
"encoding/binary"
"fmt"
"log"
"os"
"room"
"time"
"github.com/funny/link"
)
func main() {
log.SetFlags(log.Lshortfile)
protocol := link.PacketN(2, binary.LittleEndian)
client, err := link.Dial("tcp", "127.0.0.1:55000", protocol)
if err != nil {
log.Println(err)
os.Exit(1)
}
go client.ReadLoop(func(message []byte) {
println("message:", string(message))
})
user := room.NewUser()
fmt.Println("Your Name:")
EnterName:
if _, err := fmt.Scanf("%s\n", &user.Name); err != nil {
if user.Name == "" {
fmt.Println("Please enter your name:")
goto EnterName
}
}
user.CmdContent = "reg"
stream, _ := room.Encode(room.Box{user, "user"})
client.Send(link.Binary(stream))
go func() {
for {
<-time.Tick(1 * time.Second)
stream, _ = room.Encode(room.Box{"Sending Ticker~~~", "debug"})
client.Send(link.Binary(stream))
}
}()
for {
EnterUserMsg:
if _, err := fmt.Scanf("%s\n", &user.Msg.Content); err != nil {
goto EnterUserMsg
}
user.CmdContent = "msg"
//stream.V = room.Box{user, "user"}
stream, _ = room.Encode(room.Box{user, "user"})
client.Send(link.Binary(stream))
//client.Send(stream)
}
client.Close(nil)
println("bye")
}
|
package pkg2
type user struct {
Name string
Email string
}
type Admin struct {
user //内嵌字段 ~ 非公开
Rights int
}
|
package main
import (
"fmt"
"math"
)
// 515. 在每个树行中找最大值
// 您需要在二叉树的每一行中找到最大的值。
// https://leetcode-cn.com/problems/find-largest-value-in-each-tree-row/#/description
func main() {
tree := &TreeNode{
Val: 4,
Left: &TreeNode{
Val: 2,
Left: &TreeNode{
Val: 1,
},
Right: &TreeNode{
Val: 3,
},
},
Right: &TreeNode{
Val: 7,
},
}
fmt.Println(largestValues(tree))
fmt.Println(largestValues2(tree)) //best
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 法一:BFS,相当于层序遍历
func largestValues(root *TreeNode) (result []int) {
if root == nil {
return nil
}
queue := []*TreeNode{root}
for len(queue) > 0 {
size := len(queue)
max := math.MinInt64
for i := 0; i < size; i++ {
max = getMax(max, queue[i].Val)
if queue[i].Left != nil {
queue = append(queue, queue[i].Left)
}
if queue[i].Right != nil {
queue = append(queue, queue[i].Right)
}
}
queue = queue[size:]
result = append(result, max)
}
return result
}
func getMax(a, b int) int {
if a > b {
return a
}
return b
}
// 法二:DFS
// 不需要额外的空间,空间复杂度优于法一
func largestValues2(root *TreeNode) (result []int) {
if root == nil {
return nil
}
largestValuesDFSHelper(root, 0, &result)
return result
}
func largestValuesDFSHelper(root *TreeNode, level int, result *[]int) {
if len(*result) == level {
*result = append(*result, math.MinInt64)
}
(*result)[level] = getMax((*result)[level], root.Val)
if root.Left != nil {
largestValuesDFSHelper(root.Left, level+1, result)
}
if root.Right != nil {
largestValuesDFSHelper(root.Right, level+1, result)
}
}
// 法三:也可以使用分治,略
|
package profile
import (
"net/http"
"net/http/httptest"
"testing"
"time"
)
var srv = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(time.Millisecond * 5)
w.WriteHeader(http.StatusOK)
}))
func TestObserve(t *testing.T) {
c := &http.Client{}
report := ReportFromResponse(sendRequest(c))
if report == nil {
t.Fatal("Report can not be nil")
}
if report.ConnectStart.IsZero() || report.ConnectDone.IsZero() {
t.Errorf("ConnectionStart [%s] or ConnectionDone [%s] is zero",
report.ConnectStart, report.ConnectDone)
}
t.Logf("ConnectTime connect - %s", report.ConnectionTime())
// Reused connection
report = ReportFromResponse(sendRequest(c))
if report.Reused == false {
t.Error("Connection should be reused")
}
if !report.ConnectStart.IsZero() && !report.ConnectDone.IsZero() {
t.Error("ConnecttStart and ConnectDone should be zero")
}
}
func BenchmarkObserve10000(b *testing.B) {
for i := 0; i < b.N; i++ {
r, _ := http.NewRequest(http.MethodGet, "", nil)
Observe(r)
}
}
func sendRequest(c *http.Client) *http.Response {
req, _ := http.NewRequest(http.MethodGet, srv.URL, nil)
res, _ := c.Do(Observe(req))
return res
}
|
package exchanges
import (
"exchanges/bitfinex"
"exchanges/bx"
"fmt"
)
// GetLastestPrice from an exchange's API
func GetLastestPrice(exchange string, pair string) string {
var price float64
switch exchange {
case "bx":
price = bx.GetLastestPrice(pair)
case "bitfinex":
price = bitfinex.GetLastestPrice(pair)
}
if price > 0 {
return fmt.Sprintf("%s %s %f", exchange, pair, price)
}
return fmt.Sprintf("Exchange %s isn't supported", exchange)
}
|
package main
import (
"fmt"
"io/ioutil"
"math"
"os"
"strconv"
"strings"
)
func main() {
fmt.Println("Hello, World!")
}
func variablesInt() {
var x int = 5
var y int = 10
var sum int = x + y
fmt.Println(sum)
// 15
}
func mixTypeVariables() {
x := 2
y := 4
sum := x + y
fmt.Println(sum)
// 6
}
func condition() {
x := 5
if x > 5 {
fmt.Println("more than 5")
} else if x < 2 {
fmt.Println("less than 2")
} else {
fmt.Println("default")
}
// default
}
func initializeValues() {
var a [5]int
a[2] = 7
fmt.Println(a)
// [0 0 7 0 0]
}
func shortInitialization() {
a := [5]int{5, 4, 3, 2, 1}
fmt.Println(a)
// [5 4 3 2 1]
}
func appendToSlices() {
a := []int{1, 2, 3, 4}
a = append(a, 5)
fmt.Println(a)
// [1 2 3 4 5]
}
func vertices() {
vertices := make(map[string]int)
vertices["dodecagon"] = 12
vertices["triangle"] = 3
vertices["square"] = 5
delete(vertices, "square")
fmt.Println(vertices)
// map[dodecagon:12 triangle:3]
}
func iterations() {
for i := 1; i < 3; i++ {
fmt.Println(i)
}
}
func otherIteration() {
i := 0
for i < 5 {
fmt.Println(i)
i++
}
}
func iterationByRange() {
arr := []string{"a", "b", "c"}
for index, value := range arr {
fmt.Println("index", index, "value", value)
}
}
func iterationByRangeInMap() {
m := make(map[string]string)
m["a"] = "a"
m["b"] = "b"
m["c"] = "c"
for key, value := range m {
fmt.Println("key", key, "value", value)
}
}
func sum(x int, y int) int {
fmt.Println(x + y)
return x + y
}
func sqrt(x float64) float64 {
result := math.Sqrt(x)
fmt.Println(result)
return result
}
type person struct {
name string
age int
}
func makePerson() {
p := person{name: "jack", age: 89}
fmt.Println(p)
// {jack 89}
}
func formatPrint() {
i := 54
fmt.Printf("%v, %T\n", i, i)
// 54, int
}
func multipleDeclareVar() {
var (
name string = "Elisabeth"
age int = 77
)
fmt.Printf("Name: %v Age: %v\n", name, age)
// Name: Elisabeth Age: 77
}
func toStringConversion() {
// to avoid * as a conversion from int to str
var i int = 42
var j string = strconv.Itoa(i)
fmt.Printf("%v\n", j)
// 42
}
func constant() {
const myConst int = 42
fmt.Printf("%v, %T\n", myConst, myConst)
// 42, int
}
func iotaExample() {
const (
a = iota
b
c
d
)
fmt.Println(a, b, c, d)
// 0 1 2 3
}
func matrix() {
var identityMatrix [3][3]int
identityMatrix[0] = [3]int{1, 0, 0}
identityMatrix[1] = [3]int{0, 1, 0}
identityMatrix[2] = [3]int{0, 0, 1}
fmt.Println(identityMatrix)
// [[1 0 0] [0 1 0] [0 0 1]]
}
func typeCreation() {
type deck []string
cards := deck{"Ace of Spades", "Six of Spades"}
fmt.Println(cards)
}
// type declaration
type deck []string
// assign a function to a type
func (d deck) print() {
for i, card := range d {
fmt.Println(i, card)
}
}
// use print from type
func printMethodFromTypeFunc() {
cards := deck{"Ace of Spades", "Six of Spades"}
cards.print()
}
func slices() {
cards := [...]string{"Ace of Spades", "Six of Spades", "Two of Spades"}
firstHand := cards[:3]
fmt.Println(firstHand)
// [Ace of Spades Six of Spades Two of Spades]
}
func typeConversion() {
// convert string to byte slice
fmt.Println([]byte("Hello World"))
// [72 101 108 108 111 32 87 111 114 108 100]
}
func saveToFile(content string) {
ioutil.WriteFile("test.txt", []byte(content), 0666)
}
func readFromFile(filename string) {
bs, err := ioutil.ReadFile(filename)
if err != nil {
fmt.Println("Error:", err)
// this will close the program
os.Exit(1)
} else {
fmt.Println("Success:", string(bs))
}
}
func splitByCharacter() {
content := "this,content,is,comma,separated,values"
fmt.Println(strings.Split(content, ","))
// [this content is comma separated values]
}
|
package main
import (
"fmt"
"io"
"log"
"net/http"
"os"
)
func main() {
http.HandleFunc("/upload", upload)
if err := http.ListenAndServe(":8083", nil); err != nil {
log.Printf("[uploadsvr]failed to ListenAndServe, error: %v", err)
return
}
}
func upload(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
page := fmt.Sprintf(tpl, "")
w.Write([]byte(page))
return
}
r.ParseMultipartForm(32 << 20)
file, handler, err := r.FormFile("uploadfile")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
f, err := os.OpenFile("/home/pi/"+handler.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
if err != nil {
page := fmt.Sprintf(tpl, "upload "+handler.Filename+" failed")
w.Write([]byte(page))
log.Printf("[uploadsvr]failed to create file, error: %v", err)
return
}
defer f.Close()
if _, err := io.Copy(f, file); err != nil {
page := fmt.Sprintf(tpl, "upload "+handler.Filename+" failed")
w.Write([]byte(page))
log.Printf("[uploadsvr]failed to copy file, error: %v", err)
return
}
page := fmt.Sprintf(tpl, "uploaded "+handler.Filename+" successfully")
w.Write([]byte(page))
log.Printf("[uploadsvr]upload %s in success", handler.Filename)
}
var tpl = `
<html>
<head>
<title>upload file to pi</title>
</head>
<body>
<form enctype="multipart/form-data" action="/upload" method="post">
<input type="file" name="uploadfile">
<input type="hidden" name="token" value="{...{.}...}">
<br><br>
<input type="submit" value="upload" style="color:white;background-color:steelblue;font-size:15px;">
</form>
</body>
<p style="color:red;font-size:15px;">
<br>
%v
</p>
</html>
`
|
package parsevalidate
import (
"errors"
"math/big"
"strconv"
"strings"
"time"
"github.com/cpusoft/goutil/asn1util"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/conf"
"github.com/cpusoft/goutil/convert"
"github.com/cpusoft/goutil/fileutil"
"github.com/cpusoft/goutil/hashutil"
"github.com/cpusoft/goutil/jsonutil"
"github.com/cpusoft/goutil/opensslutil"
"github.com/cpusoft/goutil/osutil"
"github.com/cpusoft/goutil/regexputil"
model "rpstir2-model"
openssl "rpstir2-parsevalidate-openssl"
packet "rpstir2-parsevalidate-packet"
)
//Try to store the error in statemode instead of returning err
func ParseValidateMft(certFile string) (mftModel model.MftModel, stateModel model.StateModel, err error) {
stateModel = model.NewStateModel()
err = parseMftModel(certFile, &mftModel, &stateModel)
if err != nil {
belogs.Error("ParseValidateMft():parseMftModel err:", certFile, err)
return mftModel, stateModel, nil
}
belogs.Debug("ParseValidateMft(): mftModel:", jsonutil.MarshalJson(mftModel))
err = validateMftModel(&mftModel, &stateModel)
if err != nil {
belogs.Error("ParseValidateMft():validateMftModel err:", certFile, err)
return mftModel, stateModel, nil
}
if len(stateModel.Errors) > 0 || len(stateModel.Warnings) > 0 {
belogs.Info("ParseValidateMft():stateModel have errors or warnings", certFile, " stateModel:", jsonutil.MarshalJson(stateModel))
}
belogs.Debug("ParseValidateMft(): mftModel.FilePath, mftModel.FileName, mftModel.Ski, mftModel.Aki:",
mftModel.FilePath, mftModel.FileName, mftModel.Ski, mftModel.Aki)
return mftModel, stateModel, nil
}
// some parse may return err, will stop
func parseMftModel(certFile string, mftModel *model.MftModel, stateModel *model.StateModel) error {
belogs.Debug("parseMftModel(): certFile: ", certFile)
fileLength, err := fileutil.GetFileLength(certFile)
if err != nil {
belogs.Error("parseMftModel(): GetFileLength: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to open file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
} else if fileLength == 0 {
belogs.Error("parseMftModel(): GetFileLength, fileLenght is emtpy: " + certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "File is empty",
Detail: ""}
stateModel.AddError(&stateMsg)
return errors.New("File " + certFile + " is empty")
}
mftModel.FilePath, mftModel.FileName = osutil.GetFilePathAndFileName(certFile)
//https://blog.csdn.net/Zhymax/article/details/7683925
//openssl asn1parse -in -ard.mft -inform DER
results, err := opensslutil.GetResultsByOpensslAns1(certFile)
if err != nil {
belogs.Error("parseMftModel(): GetResultsByOpensslAns1: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file by openssl",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
}
belogs.Debug("parseMftModel(): len(results):", len(results))
//get file hash
mftModel.FileHash, err = hashutil.Sha256File(certFile)
if err != nil {
belogs.Error("parseMftModel(): Sha256File: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to read file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
}
// get mft hex
// first HEX DUMP
/*
39:d=4 hl=2 l= 11 prim: OBJECT :1.2.840.113549.1.9.16.1.26
52:d=4 hl=2 l=inf cons: cont [ 0 ]
54:d=5 hl=2 l=inf cons: OCTET STRING
56:d=6 hl=3 l= 137 prim: OCTET STRING [HEX DUMP]:308186020200CA180F323031383036323831373030
32345A180F32303138303632393138303032345A060960864801650304020130533051162C36353736393433633735383262
3164656266666261303564363235343034323462633765626363352E63726C032100154269177B0346014642A367DA415F32
C2BFE7C4EAD8AED59ACCF8F20220F89C
*/
err = openssl.ParseMftModelByOpensslResults(results, mftModel)
if err != nil {
belogs.Error("parseMftModel():ParseMftModelByOpensslResults certFile:", certFile, " err:", err, " will try parseMftModelByPacket")
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
err = parseMftModelByPacket(certFile, mftModel)
if err != nil {
belogs.Error("parseMftModel():parseMftModelByPacket err:", certFile, err)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
}
}
mftModel.EContentType, err = openssl.ParseMftEContentTypeByOpensslResults(results)
if err != nil {
belogs.Error("parseMftModel():ParseEContentTypeByOpensslResults certFile:", certFile, " err:", err)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
mftModel.SignerInfoModel, err = openssl.ParseSignerInfoModelByOpensslResults(results)
if err != nil {
belogs.Error("parseMftModel():ParseSignerInfoModelByOpensslResults certFile:", certFile, " err:", err)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
// get cer info in mft
cerFile, fileByte, start, end, err := openssl.ParseByOpensslAns1ToX509(certFile, results)
if err != nil {
belogs.Error("parseMftModel():ParseByOpensslAns1ToX509 certFile:", certFile, " err:", err)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse ee certificate by openssl",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
}
defer osutil.CloseAndRemoveFile(cerFile)
results, err = opensslutil.GetResultsByOpensslX509(cerFile.Name())
if err != nil {
belogs.Error("parseMftModel(): GetResultsByOpensslX509: err: ", err, ": "+cerFile.Name())
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse ee certificate by openssl",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
return err
}
belogs.Debug("parseMftModel(): len(results):", len(results))
mftModel.Aki, mftModel.Ski, err = openssl.ParseAkiSkiByOpensslResults(results)
if err != nil {
belogs.Error("parseMftByOpenssl(): ParseAiaModelSiaModelByOpensslResults: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
// AIA SIA
mftModel.AiaModel, mftModel.SiaModel, err = openssl.ParseAiaModelSiaModelByOpensslResults(results)
if err != nil {
belogs.Error("parseMftModel(): ParseAiaModelSiaModelByOpensslResults: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
// EE
mftModel.EeCertModel, err = ParseEeCertModel(cerFile.Name(), fileByte, start, end)
if err != nil {
belogs.Error("parseMftModel(): ParseEeCertModel: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
// get IP address in EE: RFC9286, will check ipaddress(should be empty)
mftModel.EeCertModel.CerIpAddressModel, _, err = openssl.ParseCerIpAddressModelByOpensslResults(results)
if err != nil {
belogs.Error("parseMftModel(): ParseCerIpAddressModelByOpensslResults: err: ", err, ": "+certFile)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Fail to parse file",
Detail: err.Error()}
stateModel.AddError(&stateMsg)
}
belogs.Debug("parseMftModel(): mftModel:", jsonutil.MarshalJson(mftModel))
return nil
}
func parseMftModelByPacket(certFile string, mftModel *model.MftModel) error {
fileByte, fileDecodeBase64Byte, err := asn1util.ReadFileAndDecodeBase64(certFile)
if err != nil {
belogs.Error("parseMftModelByPacket():ReadFile return err: ", certFile, err)
return err
}
//get file hash
mftModel.FileHash = hashutil.Sha256(fileByte)
pack := packet.DecodePacket(fileDecodeBase64Byte)
//packet.PrintPacketString("parseMftModelByPacket():DecodePacket: ", pack, true, true)
var oidPacketss = &[]packet.OidPacket{}
packet.TransformPacket(pack, oidPacketss)
packet.PrintOidPacket(oidPacketss)
// manifests,
err = packet.ExtractMftOid(oidPacketss, certFile, fileDecodeBase64Byte, mftModel)
if err != nil {
belogs.Error("parseMftModelByPacket():ExtractMftOid err:", certFile, err)
return err
}
return nil
}
// only validate mft self. in chain check, will check file list;;;;
// https://datatracker.ietf.org/doc/rfc6486/?include_text=1 Manifests for the Resource Public Key Infrastructure (RPKI) 4.4.Manifest Validation;;;;;;;
// roa_validate.c manifestValidate()
// TODO: sqhl.c P2036 updateManifestObjs(): check file and hash in mft to actually files
func validateMftModel(mftModel *model.MftModel, stateModel *model.StateModel) (err error) {
// The version of the rpkiManifest is 0
if mftModel.Version != 0 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Wrong Version number",
Detail: ""}
stateModel.AddError(&stateMsg)
}
// check mft number ,should >0
mftNumberByte := []byte(mftModel.MftNumber)
//Manifest verifiers MUST be able to handle number values up to 20 octets. Conforming manifest issuers MUST NOT use number values longer than 20 octets.
if len(mftNumberByte) == 0 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Manifest Number is zero",
Detail: ""}
stateModel.AddWarning(&stateMsg)
}
if len(mftNumberByte) > 20*2 {
le := strconv.Itoa(len(mftNumberByte))
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Manifest Number is too long",
Detail: "Manifest Number length is " + le}
stateModel.AddWarning(&stateMsg)
}
//isHex, err := regexputil.IsHex(mftModel.MftNumber)
//if !isHex || err != nil {
_, ok := new(big.Int).SetString(mftModel.MftNumber, 16)
if !ok {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Manifest Number is not a hexadecimal number",
Detail: mftModel.MftNumber}
stateModel.AddError(&stateMsg)
}
if len(mftModel.MftNumber) > 1024 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Manifest Number is too long",
Detail: mftModel.MftNumber}
stateModel.AddError(&stateMsg)
}
// check the hash algorithm
if mftModel.FileHashAlg != "2.16.840.1.101.3.4.2.1" {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "Oid of fileHashAlg is not 2.16.840.1.101.3.4.2.1",
Detail: ""}
stateModel.AddError(&stateMsg)
}
// check check_mft_filenames will in chain check
// check legal filename
// on sync time ,file may not have sync, so only check filename is or not legal
// not actually check file
for _, fileHash := range mftModel.FileHashModels {
fileName := fileHash.File
check := regexputil.CheckRpkiFileName(fileName)
if !check {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "The haracters in file name is illegal",
Detail: "The file is " + fileName}
stateModel.AddError(&stateMsg)
}
hash := fileHash.Hash
ext := osutil.Ext(fileName)
// no .mft
// https://www.iana.org/assignments/rpki/rpki.xhtml
if ext != ".cer" && ext != ".roa" && ext != ".crl" && ext != ".gbr" &&
ext != ".asa" && ext != ".sig" {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "The file in fileList is not one of the types of cer/roa/crl/gbr/asa/sig",
Detail: "The file is " + fileName}
stateModel.AddError(&stateMsg)
}
if len(hash) != 64 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "The length of the hash in fileList is not 64",
Detail: "The illegal hash is " + hash}
stateModel.AddError(&stateMsg)
}
}
// check duplicate file name
for i1 := 0; i1 < len(mftModel.FileHashModels); i1++ {
duplicate := false
fileHash1 := mftModel.FileHashModels[i1]
for i2 := i1 + 1; i2 < len(mftModel.FileHashModels); i2++ {
fileHash2 := mftModel.FileHashModels[i2]
if fileHash1.File == fileHash2.File {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "There are duplicate files in fileList",
Detail: ""}
stateModel.AddError(&stateMsg)
duplicate = true
break
}
}
if duplicate {
break
}
}
//check time
now := time.Now()
if mftModel.ThisUpdate.IsZero() {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "ThisUpdate is empty",
Detail: ""}
stateModel.AddError(&stateMsg)
}
if mftModel.NextUpdate.IsZero() {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate is empty",
Detail: ""}
stateModel.AddError(&stateMsg)
}
//thisUpdate precedes nextUpdate.
if mftModel.ThisUpdate.After(now) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "ThisUpdate is later than the current time",
Detail: "The current time is " + convert.Time2StringZone(now) + ", thisUpdate is " + convert.Time2StringZone(mftModel.ThisUpdate)}
stateModel.AddError(&stateMsg)
}
if mftModel.NextUpdate.Before(now) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate is earlier than the current time",
Detail: "The current time is " + convert.Time2StringZone(now) + ", nextUpdate is " + convert.Time2StringZone(mftModel.NextUpdate)}
stateModel.AddError(&stateMsg)
}
if mftModel.ThisUpdate.After(mftModel.NextUpdate) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate is earlier than ThisUpdate",
Detail: ""}
stateModel.AddError(&stateMsg)
}
if mftModel.ThisUpdate.Before(mftModel.EeCertModel.NotBefore) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "ThisUpdate of MFT is later than the NotBefore of EE",
Detail: ""}
stateModel.AddError(&stateMsg)
}
if !mftModel.ThisUpdate.Equal(mftModel.EeCertModel.NotBefore) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "ThisUpdate of MFT is not equal to the NotBefore of EE",
Detail: ""}
stateModel.AddWarning(&stateMsg)
}
if mftModel.NextUpdate.After(mftModel.EeCertModel.NotAfter) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate of MFT is later than the NotAfter of EE",
Detail: ""}
stateModel.AddWarning(&stateMsg)
}
if !mftModel.NextUpdate.Equal(mftModel.EeCertModel.NotAfter) {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate of MFT is not equal to the NotAfter of EE",
Detail: ""}
stateModel.AddWarning(&stateMsg)
}
// ski aki
if len(mftModel.Ski) == 0 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "SKI is empty",
Detail: ""}
stateModel.AddError(&stateMsg)
}
// hash is 160bit --> 20Byte --> 40Str
if len(mftModel.Ski) != 40 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "SKI length is wrong",
Detail: ""}
stateModel.AddError(&stateMsg)
}
if len(mftModel.Aki) == 0 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "AKI is empty",
Detail: ""}
stateModel.AddError(&stateMsg)
}
// hash is 160bit --> 20Byte --> 40Str
if len(mftModel.Aki) != 40 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "AKI length is wrong",
Detail: ""}
stateModel.AddError(&stateMsg)
}
// rfc9286
if len(mftModel.EeCertModel.CerIpAddressModel.CerIpAddresses) != 0 {
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "sbgp-ipAddrBlock of MFT's EE should be empty",
Detail: "INRs must use the 'inherit' attribute, the current length is " +
convert.ToString(len(mftModel.EeCertModel.CerIpAddressModel.CerIpAddresses))}
stateModel.AddError(&stateMsg)
}
//TODO, todo,Manifest's EE certificate has RFC3779 resources that are not marked inherit, in roa_vildate.c P1009
err = ValidateEeCertModel(stateModel, &mftModel.EeCertModel)
err = ValidateSignerInfoModel(stateModel, &mftModel.SignerInfoModel)
belogs.Debug("validateMftModel():filePath, fileName,stateModel:",
mftModel.FilePath, mftModel.FileName, jsonutil.MarshalJson(stateModel))
return nil
}
func updateMftByCheckAll(now time.Time) error {
// check expire
curCertIdStateModels, err := getExpireMftDb(now)
if err != nil {
belogs.Error("updateMftByCheckAll(): getExpireMftDb: err: ", err)
return err
}
belogs.Info("updateMftByCheckAll(): len(curCertIdStateModels):", len(curCertIdStateModels))
newCertIdStateModels := make([]CertIdStateModel, 0)
for i := range curCertIdStateModels {
// if have this error, ignore
belogs.Debug("updateMftByCheckAll(): old curCertIdStateModels[i]:", jsonutil.MarshalJson(curCertIdStateModels[i]))
if strings.Contains(curCertIdStateModels[i].StateStr, "NextUpdate is earlier than the current time") {
continue
}
// will add error
stateModel := model.StateModel{}
jsonutil.UnmarshalJson(curCertIdStateModels[i].StateStr, &stateModel)
stateMsg := model.StateMsg{Stage: "parsevalidate",
Fail: "NextUpdate is earlier than the current time",
Detail: "The current time is " + convert.Time2StringZone(now) + ", nextUpdate is " + convert.Time2StringZone(curCertIdStateModels[i].EndTime)}
if conf.Bool("policy::allowStaleMft") {
stateModel.AddWarning(&stateMsg)
} else {
stateModel.AddError(&stateMsg)
}
certIdStateModel := CertIdStateModel{
Id: curCertIdStateModels[i].Id,
StateStr: jsonutil.MarshalJson(stateModel),
}
newCertIdStateModels = append(newCertIdStateModels, certIdStateModel)
belogs.Debug("updateMftByCheckAll(): new certIdStateModel:", jsonutil.MarshalJson(certIdStateModel))
}
// update db
err = updateMftStateDb(newCertIdStateModels)
if err != nil {
belogs.Error("updateMftByCheckAll(): updateMftStateDb: err: ", len(newCertIdStateModels), err)
return err
}
belogs.Info("updateMftByCheckAll(): ok len(newCertIdStateModels):", len(newCertIdStateModels))
return nil
}
|
package impl
import (
"github.com/gomodule/redigo/redis"
"time"
)
//var (
// server string = "127.0.0.1:6379"
// n uint = 100000
// fp float64 = 0.01
//)
// redis连接池
//var pool *redis.Pool
func PoolInit(server string) *redis.Pool {
return &redis.Pool{
MaxIdle: 3,
IdleTimeout: 240 * time.Second,
Dial: func() (redis.Conn, error) {
c, err := redis.Dial("tcp", server)
if err != nil {
return nil, err
}
return c, err
},
}
}
/*
// 测试使用,测试时按分钟切换
func GetVersion() int {
return time.Now().Minute()%2
}
*/
// 获取当前BloomFilter版本,按月份切换
func GetVersion() int {
var version int
month_num := map[string]int{
"January": 1,
"February": 2,
"March": 3,
"April": 4,
"May": 5,
"June": 6,
"July": 7,
"August": 8,
"September": 9,
"October": 10,
"November": 11,
"December": 12,
}
month := time.Now().Month().String()
version = month_num[month] % 2
return version
}
// 按key设置存储中的values
func SetFunc(pool *redis.Pool, key string, value []byte) error {
db := pool.Get()
defer db.Close()
_, err := redis.String(db.Do("SET", key, value))
if err != nil {
return err
}
return err
}
// 按key从存储中获取values
func GetFunc(pool *redis.Pool, key string) ([]byte, error) {
db := pool.Get()
defer db.Close()
res, err := redis.Bytes(db.Do("GET", key))
if err != nil {
return res, err
}
return res, err
}
|
package main
import "fmt"
import "net/http"
func indexHandler(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, `<h1>En header</h1>
<p>og en linje </p>
<p>og en linje til </p>
`)
}
func main() {
http.HandleFunc("/", indexHandler)
http.ListenAndServe(":7000", nil)
}
|
/*
* @lc app=leetcode.cn id=139 lang=golang
*
* [139] 单词拆分
*/
package solution
// @lc code=start
func wordBreak139(s string, wordDict []string) bool {
wordDictMap := make(map[string]bool)
for _, word := range wordDict {
wordDictMap[word] = true
}
// "dp[i] = true" means the result would be true while s[:i] as the input of wordBreak
dp := make([]bool, len(s)+1)
dp[0] = true
for i := 1; i <= len(s); i++ {
for j := 0; j < i; j++ {
if dp[j] && wordDictMap[s[j:i]] {
dp[i] = true
}
}
}
return dp[len(s)]
}
// @lc code=end
|
package routes
import (
"net/http"
"github.com/gin-gonic/gin"
controllers "go-pg-gin/controllers"
)
func Routes(router *gin.Engine) {
router.GET("/", welcome)
router.GET("/api/team", controllers.GetAllPlayers)
router.POST("/api/team", controllers.CreatePlayer)
router.GET("/api/team/:Id", controllers.GetSinglePlayer)
// router.PUT("/api/team/{id}", controllers.UpdatePlayer)
// router.DELETE("/api/deleteplayer/{id}", controllers.DeletePlayer)
// router.GET("/todos", controllers.GetAllTodos)
// router.POST("/todo", controllers.CreateTodo)
// router.GET("/todo/:todoId", controllers.GetSingleTodo)
// router.PUT("/todo/:todoId", controllers.EditTodo)
// router.DELETE("/todo/:todoId", controllers.DeleteTodo)
}
func welcome(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{
"status": 200,
"message": "Welcome To API",
})
return
}
|
package storage
import (
"context"
"encoding/json"
"errors"
"strings"
)
type StorageEntry struct {
Key string
Value json.RawMessage
}
type Storage interface {
Get(ctx context.Context, key string) (*StorageEntry, error)
Put(ctx context.Context, e StorageEntry) error
Delete(ctx context.Context, key string) error
List(ctx context.Context, prefix string) ([]string, error)
}
type StorageCreator func(conf map[string]string) (Storage, error)
var StorageOptions = map[string]StorageCreator{
"file": NewFileStorage,
"bigcache": NewBigCacheStorage,
"cacheable": NewCacheableStorageWithConf,
}
var CacheableStorageOptions = map[string]StorageCreator{
"file": NewFileStorage,
"bigcache": NewBigCacheStorage,
}
func ValidatePath(path string) error {
if strings.Contains(path, "..") {
return errors.New("path cannot reference parents")
}
return nil
}
|
package id
import (
"errors"
"github.com/emersion/go-imap"
"github.com/emersion/go-imap/client"
)
// Client is an ID client.
type Client struct {
c *client.Client
}
// NewClient creates a new client.
func NewClient(c *client.Client) *Client {
return &Client{c: c}
}
// SupportID checks if the server supports the ID extension.
func (c *Client) SupportID() (bool, error) {
return c.c.Support(Capability)
}
// ID sends an ID command to the server and returns the server's ID.
func (c *Client) ID(clientID ID) (serverID ID, err error) {
if state := c.c.State(); imap.ConnectedState&state != state {
return nil, errors.New("Not connected")
}
var cmd imap.Commander = &Command{ID: clientID}
res := &Response{}
status, err := c.c.Execute(cmd, res)
if err != nil {
return
}
if err = status.Err(); err != nil {
return
}
serverID = res.ID
return
}
|
package models
type Function struct {
}
type FunctionStore struct {}
|
/*
Proxy is a Minetest proxy server
supporting multiple concurrent connections.
Usage:
proxy dial:port listen:port
where dial:port is the server address
and listen:port is the address to listen on.
*/
package main
import (
"errors"
"fmt"
"log"
"net"
"os"
"github.com/anon55555/mt"
)
func main() {
if len(os.Args) != 3 {
fmt.Fprintln(os.Stderr, "usage: proxy dial:port listen:port")
os.Exit(1)
}
srvaddr, err := net.ResolveUDPAddr("udp", os.Args[1])
if err != nil {
log.Fatal(err)
}
lc, err := net.ListenPacket("udp", os.Args[2])
if err != nil {
log.Fatal(err)
}
defer lc.Close()
l := mt.Listen(lc)
for {
clt, err := l.Accept()
if err != nil {
log.Print(err)
continue
}
log.Print(clt.RemoteAddr().String() + " connected")
conn, err := net.DialUDP("udp", nil, srvaddr)
if err != nil {
log.Print(err)
continue
}
srv := mt.Connect(conn)
go proxy(clt, srv)
go proxy(srv, clt)
}
}
func proxy(src, dest mt.Peer) {
s := fmt.Sprint(src.ID(), " (", src.RemoteAddr(), "): ")
for {
pkt, err := src.Recv()
if err != nil {
if errors.Is(err, net.ErrClosed) {
if err := src.WhyClosed(); err != nil {
log.Print(s, "disconnected: ", err)
} else {
log.Print(s, "disconnected")
}
break
}
log.Print(s, err)
continue
}
if _, err := dest.Send(pkt); err != nil {
log.Print(err)
}
}
dest.Close()
}
|
package model
import (
"log"
)
type Group struct {
BaseModel
Name string `gorm:"column:name;not null" binding:"required" json:"name"`
Description *string `gorm:"column:description" json:"description"`
UserId int `gorm:"column:user_id;default:1" json:"user_id"`
}
type GroupSimple struct {
ID int64 `json:"id"`
Name string `json:"name"`
}
func (group *Group) Detail(id int) (*Group, error) {
err := db.Where("id = ?", id).First(&Group{}).Scan(&group).Error
if err != nil {
return group, err
}
return group, err
}
func (group *Group) List(page, pagesize int) (groups []*Group, count int, err error) {
err = Pagination(db.Order("updated_at desc, id desc"), page, pagesize).Find(&groups).Error
if err != nil {
return
}
err = db.Model(&group).Count(&count).Error
if err != nil {
return
}
return
}
func (group *Group) ListAll() (groups []*GroupSimple, err error) {
err = db.Table("groups").Select("id, name").Scan(&groups).Error
if err != nil {
return
}
return
}
func (group *Group) Save() (id int64, err error) {
err = db.Create(group).Error
if err != nil {
log.Panicln(" save group error", err.Error())
return
}
id = int64(group.ID)
return
}
func (group *Group) Update() (err error) {
err = db.Model(&group).Update(group).Error
if err != nil {
log.Panicln(" update group error", err.Error())
}
return
}
func (group *Group) Delete(id float64) (err error) {
err = db.Where("id = ?", id).Delete(&group).Error
if err != nil {
log.Panicln(" delete group error", err.Error())
}
return
}
func (group *Group) Deletes(ids []interface{}) (err error) {
err = db.Where("id IN (?)", ids).Delete(&group).Error
if err != nil {
log.Panicln("list delete group error", err.Error())
return
}
return
}
|
package main
import (
"net/http"
yaml "gopkg.in/yaml.v3"
)
// PathURL is just a structure to represent key value pair
type PathURL struct {
Path string `yaml:"path"`
URL string `yaml:"url"`
}
// MapHandler will handle requests that will be matched from Map
// Map pathToURLs
func MapHandler(pathToURLs map[string]string, fallback http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
path := r.URL.Path
if dest, ok := pathToURLs[path]; ok {
http.Redirect(w, r, dest, http.StatusFound)
return
}
fallback.ServeHTTP(w, r)
}
}
// YamlHandler will try to convert yaml to map and then serve the requests
func YamlHandler(yamlContent []byte, fallback http.Handler) (http.HandlerFunc, error) {
data, err := yamlParser(yamlContent)
if err != nil {
return nil, err
}
return MapHandler(arrayToMapConvertor(data), fallback), nil
}
func yamlParser(data []byte) ([]PathURL, error) {
var pathArray []PathURL
err := yaml.Unmarshal(data, &pathArray)
if err != nil {
return nil, err
}
return pathArray, nil
}
func arrayToMapConvertor(pathURLs []PathURL) map[string]string {
urlMap := make(map[string]string)
for _, ele := range pathURLs {
urlMap[ele.Path] = ele.URL
}
return urlMap
}
|
package models
import (
"bytes"
"database/sql"
"git.hoogi.eu/snafu/go-blog/logger"
"strings"
"time"
)
// SQLiteCategoryDatasource providing an implementation of CategoryDatasourceService for SQLite
type SQLiteCategoryDatasource struct {
SQLConn *sql.DB
}
func (rdb *SQLiteCategoryDatasource) Create(c *Category) (int, error) {
res, err := rdb.SQLConn.Exec("INSERT INTO category (name, slug, last_modified, user_id) "+
"VALUES (?, ?, ?, ?)",
c.Name,
c.Slug,
time.Now(),
c.Author.ID)
if err != nil {
return 0, err
}
id, err := res.LastInsertId()
if err != nil {
return 0, err
}
return int(id), nil
}
func (rdb *SQLiteCategoryDatasource) List(fc FilterCriteria) ([]Category, error) {
var args []interface{}
var stmt strings.Builder
stmt.WriteString("SELECT DISTINCT c.id, c.name, c.slug, c.last_modified, ")
stmt.WriteString("u.id, u.display_name, u.username, u.email, u.is_admin ")
stmt.WriteString("FROM category as c ")
stmt.WriteString("INNER JOIN user as u ")
stmt.WriteString("ON c.user_id = u.id ")
if fc == CategoriesWithPublishedArticles {
stmt.WriteString("INNER JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.published = true ")
} else if fc == CategoriesWithoutArticles {
stmt.WriteString("LEFT JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.categorie_id IS NULL ")
}
stmt.WriteString("ORDER BY c.last_modified DESC ")
rows, err := rdb.SQLConn.Query(stmt.String(), args...)
if err != nil {
return nil, err
}
defer func() {
if err := rows.Close(); err != nil {
logger.Log.Error(err)
}
}()
var cs []Category
for rows.Next() {
var c Category
var ru User
if err := rows.Scan(&c.ID, &c.Name, &c.Slug, &c.LastModified, &ru.ID, &ru.DisplayName, &ru.Username, &ru.Email, &ru.IsAdmin); err != nil {
return nil, err
}
c.Author = &ru
cs = append(cs, c)
}
if err := rows.Err(); err != nil {
return nil, err
}
return cs, nil
}
func (rdb *SQLiteCategoryDatasource) Count(fc FilterCriteria) (int, error) {
var total int
if err := rdb.SQLConn.QueryRow("SELECT count(id) FROM category ").Scan(&total); err != nil {
return -1, err
}
return total, nil
}
func (rdb *SQLiteCategoryDatasource) Get(categoryID int, fc FilterCriteria) (*Category, error) {
var stmt bytes.Buffer
stmt.WriteString("SELECT c.id, c.name, c.slug, c.last_modified, ")
stmt.WriteString("u.id, u.display_name, u.username, u.email, u.is_admin ")
stmt.WriteString("FROM category as c ")
stmt.WriteString("INNER JOIN user as u ")
stmt.WriteString("ON u.id = c.user_id ")
if fc == CategoriesWithPublishedArticles {
stmt.WriteString("INNER JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.published = true ")
stmt.WriteString("AND c.id=? ")
} else if fc == CategoriesWithoutArticles {
stmt.WriteString("LEFT JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.categorie_id IS NULL ")
stmt.WriteString("AND c.id=? ")
} else {
stmt.WriteString("WHERE c.id=? ")
}
var c Category
var ru User
if err := rdb.SQLConn.QueryRow(stmt.String(), categoryID).Scan(&c.ID, &c.Name, &c.Slug, &c.LastModified, &ru.ID,
&ru.DisplayName, &ru.Username, &ru.Email, &ru.IsAdmin); err != nil {
return nil, err
}
c.Author = &ru
return &c, nil
}
func (rdb *SQLiteCategoryDatasource) GetBySlug(slug string, fc FilterCriteria) (*Category, error) {
var stmt strings.Builder
stmt.WriteString("SELECT c.id, c.name, c.slug, c.last_modified, ")
stmt.WriteString("u.id, u.display_name, u.username, u.email, u.is_admin ")
stmt.WriteString("FROM category as c ")
stmt.WriteString("INNER JOIN user as u ")
stmt.WriteString("ON u.id = c.user_id ")
if fc == CategoriesWithPublishedArticles {
stmt.WriteString("INNER JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.published = true ")
stmt.WriteString("WHERE c.slug=? ")
} else if fc == CategoriesWithoutArticles {
stmt.WriteString("LEFT JOIN article as a ")
stmt.WriteString("ON c.id = a.category_id ")
stmt.WriteString("WHERE a.categorie_id IS NULL ")
stmt.WriteString("WHERE c.slug=? ")
} else {
stmt.WriteString("WHERE c.slug=? ")
}
var c Category
var ru User
if err := rdb.SQLConn.QueryRow(stmt.String(), slug).Scan(&c.ID, &c.Name, &c.Slug, &c.LastModified, &ru.ID,
&ru.DisplayName, &ru.Username, &ru.Email, &ru.IsAdmin); err != nil {
return nil, err
}
c.Author = &ru
return &c, nil
}
func (rdb *SQLiteCategoryDatasource) Update(c *Category) error {
if _, err := rdb.SQLConn.Exec("UPDATE category SET name=?, slug=?, last_modified=?, user_id=? WHERE id=?",
c.Name, c.Slug, time.Now(), c.Author.ID, c.ID); err != nil {
return err
}
return nil
}
func (rdb *SQLiteCategoryDatasource) Delete(categoryID int) error {
if _, err := rdb.SQLConn.Exec("DELETE FROM category WHERE id=?", categoryID); err != nil {
return err
}
return nil
}
|
package main
import (
"fmt"
)
type student struct {
name string
age int
address //嵌套结构体
email
}
type address struct {
country string
city string
}
type email struct {
city string
e_address string
}
func main() {
stu1 := student{
name:"LiyaTong",
age:28,
address:address{
country:"china",
city:"BeiJing",
},
email:email{
city:"Beijing",
e_address:"jzb0424@163.com",
},
}
fmt.Println(stu1.address.country)
//fmt.Println(stu1.city)//错误实例:由于email和address结构体中均出现city,程序无法识别使用的是哪个结构体中的city
//嵌套的结构体中如果有同名同类型的字段,需指定结构体的字段来使用
fmt.Println(stu1.email.city)
}
|
package openstack
import (
"fmt"
"github.com/gophercloud/gophercloud/openstack/compute/v2/flavors"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external"
"github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips"
"github.com/gophercloud/gophercloud/openstack/networking/v2/networks"
"github.com/gophercloud/utils/openstack/clientconfig"
networkutils "github.com/gophercloud/utils/openstack/networking/v2/networks"
openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults"
)
// getCloudNames gets the valid cloud names. These are read from clouds.yaml.
func getCloudNames() ([]string, error) {
clouds, err := clientconfig.LoadCloudsYAML()
if err != nil {
return nil, err
}
cloudNames := []string{}
for k := range clouds {
cloudNames = append(cloudNames, k)
}
return cloudNames, nil
}
// getExternalNetworkNames interrogates OpenStack to get the external network
// names.
func getExternalNetworkNames(cloud string) ([]string, error) {
conn, err := clientconfig.NewServiceClient("network", openstackdefaults.DefaultClientOpts(cloud))
if err != nil {
return nil, err
}
iTrue := true
listOpts := external.ListOptsExt{
ListOptsBuilder: networks.ListOpts{},
External: &iTrue,
}
allPages, err := networks.List(conn, listOpts).AllPages()
if err != nil {
return nil, err
}
allNetworks, err := networks.ExtractNetworks(allPages)
if err != nil {
return nil, err
}
networkNames := make([]string, len(allNetworks))
for x, network := range allNetworks {
networkNames[x] = network.Name
}
return networkNames, nil
}
// getFlavorNames gets a list of valid flavor names.
func getFlavorNames(cloud string) ([]string, error) {
conn, err := clientconfig.NewServiceClient("compute", openstackdefaults.DefaultClientOpts(cloud))
if err != nil {
return nil, err
}
listOpts := flavors.ListOpts{}
allPages, err := flavors.ListDetail(conn, listOpts).AllPages()
if err != nil {
return nil, err
}
allFlavors, err := flavors.ExtractFlavors(allPages)
if err != nil {
return nil, err
}
if len(allFlavors) == 0 {
return nil, fmt.Errorf("no OpenStack flavors were found")
}
flavorNames := make([]string, len(allFlavors))
for i, flavor := range allFlavors {
flavorNames[i] = flavor.Name
}
return flavorNames, nil
}
type sortableFloatingIPCollection []floatingips.FloatingIP
func (fips sortableFloatingIPCollection) Len() int { return len(fips) }
func (fips sortableFloatingIPCollection) Less(i, j int) bool {
return fips[i].FloatingIP < fips[j].FloatingIP
}
func (fips sortableFloatingIPCollection) Swap(i, j int) {
fips[i], fips[j] = fips[j], fips[i]
}
func (fips sortableFloatingIPCollection) Names() []string {
names := make([]string, len(fips))
for i := range fips {
names[i] = fips[i].FloatingIP
}
return names
}
func (fips sortableFloatingIPCollection) Description(index int) string {
return fips[index].Description
}
func (fips sortableFloatingIPCollection) Contains(value string) bool {
for i := range fips {
if value == fips[i].FloatingIP {
return true
}
}
return false
}
func getFloatingIPs(cloud string, floatingNetworkName string) (sortableFloatingIPCollection, error) {
conn, err := clientconfig.NewServiceClient("network", openstackdefaults.DefaultClientOpts(cloud))
if err != nil {
return nil, err
}
// floatingips.ListOpts requires an ID so we must get it from the name
floatingNetworkID, err := networkutils.IDFromName(conn, floatingNetworkName)
if err != nil {
return nil, err
}
// Only show IPs that belong to the network and are not in use
listOpts := floatingips.ListOpts{
FloatingNetworkID: floatingNetworkID,
Status: "DOWN",
}
allPages, err := floatingips.List(conn, listOpts).AllPages()
if err != nil {
return nil, err
}
allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages)
if err != nil {
return nil, err
}
if len(allFloatingIPs) == 0 {
return nil, fmt.Errorf("there are no unassigned floating IP addresses available")
}
return allFloatingIPs, nil
}
|
package collectors
import (
"bufio"
"encoding/json"
"fmt"
"math"
"os"
"strconv"
"strings"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
)
const SCHEDSTATFILE = `/proc/schedstat`
// These are the fields we read from the JSON configuration
type SchedstatCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
}
// This contains all variables we need during execution and the variables
// defined by metricCollector (name, init, ...)
type SchedstatCollector struct {
metricCollector
config SchedstatCollectorConfig // the configuration structure
lastTimestamp time.Time // Store time stamp of last tick to derive values
meta map[string]string // default meta information
cputags map[string]map[string]string // default tags
olddata map[string]map[string]int64 // default tags
}
// Functions to implement MetricCollector interface
// Init(...), Read(...), Close()
// See: metricCollector.go
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *SchedstatCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "SchedstatCollector"
// This is for later use, also call it early
m.setup()
// Tell whether the collector should be run in parallel with others (reading files, ...)
// or it should be run serially, mostly for collectors acutally doing measurements
// because they should not measure the execution of the other collectors
m.parallel = true
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
m.meta = map[string]string{"source": m.name, "group": "SCHEDSTAT"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
// Check input file
file, err := os.Open(string(SCHEDSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
// Pre-generate tags for all CPUs
num_cpus := 0
m.cputags = make(map[string]map[string]string)
m.olddata = make(map[string]map[string]int64)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") && strings.Compare(linefields[0], "cpu") != 0 {
cpustr := strings.TrimLeft(linefields[0], "cpu")
cpu, _ := strconv.Atoi(cpustr)
running, _ := strconv.ParseInt(linefields[7], 10, 64)
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
m.cputags[linefields[0]] = map[string]string{"type": "hwthread", "type-id": fmt.Sprintf("%d", cpu)}
m.olddata[linefields[0]] = map[string]int64{"running": running, "waiting": waiting}
num_cpus++
}
}
// Save current timestamp
m.lastTimestamp = time.Now()
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
func (m *SchedstatCollector) ParseProcLine(linefields []string, tags map[string]string, output chan lp.CCMetric, now time.Time, tsdelta time.Duration) {
running, _ := strconv.ParseInt(linefields[7], 10, 64)
waiting, _ := strconv.ParseInt(linefields[8], 10, 64)
diff_running := running - m.olddata[linefields[0]]["running"]
diff_waiting := waiting - m.olddata[linefields[0]]["waiting"]
var l_running float64 = float64(diff_running) / tsdelta.Seconds() / (math.Pow(1000, 3))
var l_waiting float64 = float64(diff_waiting) / tsdelta.Seconds() / (math.Pow(1000, 3))
m.olddata[linefields[0]]["running"] = running
m.olddata[linefields[0]]["waiting"] = waiting
value := l_running + l_waiting
y, err := lp.New("cpu_load_core", tags, m.meta, map[string]interface{}{"value": value}, now)
if err == nil {
// Send it to output channel
output <- y
}
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *SchedstatCollector) Read(interval time.Duration, output chan lp.CCMetric) {
if !m.init {
return
}
//timestamps
now := time.Now()
tsdelta := now.Sub(m.lastTimestamp)
file, err := os.Open(string(SCHEDSTATFILE))
if err != nil {
cclog.ComponentError(m.name, err.Error())
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
linefields := strings.Fields(line)
if strings.HasPrefix(linefields[0], "cpu") {
m.ParseProcLine(linefields, m.cputags[linefields[0]], output, now, tsdelta)
}
}
m.lastTimestamp = now
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *SchedstatCollector) Close() {
// Unset flag
m.init = false
}
|
package idbenchmark_test
const (
idbenchmarkKey = "idbenchmark"
)
|
package pool
import (
"time"
)
type worker interface {
Start()
ID() string
Info(CalledToRun bool, err error) *WorkerInfo
}
// workerImpl Struct
type workerImpl struct {
run func() error
id string
queuedAt time.Time
startedAt time.Time
chDone chan *WorkerInfo
}
func (w *workerImpl) Start() {
w.startedAt = time.Now()
err := w.run()
w.chDone <- w.Info(true, err)
}
func (w *workerImpl) ID() string {
return w.id
}
func (w *workerImpl) Info(calledToRun bool, err error) *WorkerInfo {
return &WorkerInfo{
ID: w.id,
QueuedAt: w.queuedAt,
WaitingTime: w.startedAt.Sub(w.queuedAt),
ExecutionTime: time.Since(w.startedAt),
CalledToRun: calledToRun,
Err: err,
}
}
|
/*
The MIT License (MIT)
Copyright (c) 2014 isaac dawson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// This library is for automatically assigning HTTP form values, a map[string][]string or
// a map[string]string to a pre-defined structure. It also allows you to validate the data
// prior to allowing assignment to occur. If any field is found to fail validation, an
// error is immediately returned and further processing is stopped. Additionally, you may
// supply your own functions by calling Add. For more information and examples see:
//
// https://github.com/wirepair/validator/
package validator
import (
"fmt"
"reflect"
"strconv"
"sync"
)
type TypeError struct {
Value string // description of value that caused the error
Param string // the parameter name
Type reflect.Type // type of Go value it could not be assigned to
}
// Returned when validator is unable to get the proper type from the supplied map of parameters and values.
func (e *TypeError) Error() string {
return "validate: error parsing parameter " + e.Param + " with value " + e.Value + " into Go value of type " + e.Type.String()
}
type RequiredParamError struct {
Param string // the parameter that is required
Field string // the field name
}
// Returned when validator is unable to find a required parameter.
func (r *RequiredParamError) Error() string {
return "validate: error the required parameter " + r.Param + " is missing from the input for assignment to " + r.Field
}
type CantSetError struct {
Param string // the parameter that is being attempted to be set
Type reflect.Type // the type of the field.
}
// Returned when validator is unable to set the type
func (c *CantSetError) Error() string {
return "validate: error attempting to set " + c.Param + " with the Go value of type " + c.Type.String()
}
type field struct {
name string
param string
tags string
typ reflect.Type
optional bool
index int
validators []Validater
}
type cache struct {
sync.RWMutex
m map[reflect.Type][]field
}
var fieldCache cache // for caching field look ups.
// Assign iterates over input map keys and assigns the value to the passed in structure (v),
// alternatively validating the input.
func Assign(params map[string][]string, v interface{}) error {
fields, err := getFields(v)
if err != nil {
return err
}
return assign(params, fields, v)
}
// AssignSingle iterates over input map keys with single string values and assigns it to the
// passed in structure (v), alternatively validating the input.
func AssignSingle(params map[string]string, v interface{}) error {
fields, err := getFields(v)
if err != nil {
return err
}
return assignSingle(params, fields, v)
}
// iterates over each field of the structure and assigns various directives on how to
// parse, validate and process the value to be assigned to that field.
// for performance reasons we also store field lookups in a synchronized cache so
// if we get the same struct many times we only have to analyze the structtags a single
// time.
func getFields(v interface{}) ([]field, error) {
var err error
cacheKey := reflect.TypeOf(v)
fieldCache.RLock()
if fieldCache.m == nil {
fieldCache.m = make(map[reflect.Type][]field, 1)
}
f := fieldCache.m[cacheKey]
fieldCache.RUnlock()
if f != nil {
//fmt.Printf("We got a cache hit! on %v cache len: %d\n", cacheKey, len(fieldCache.m))
return f, nil
}
st := reflect.TypeOf(v).Elem()
fields := make([]field, st.NumField())
for i := 0; i < st.NumField(); i++ {
f := &field{}
f.typ = st.Field(i).Type
f.name = st.Field(i).Name
f.index = i
// sets param,optional flags and validators.
err = setDirectives(st.Field(i).Tag, f)
if err != nil {
return nil, err
}
fields[i] = *f
}
fieldCache.Lock()
fieldCache.m[cacheKey] = fields
fieldCache.Unlock()
return fields, nil
}
// assignSingle iterates through fields and makes sure it's settable and calls assignField
func assignSingle(params map[string]string, fields []field, v interface{}) (err error) {
st := reflect.ValueOf(v).Elem()
for _, f := range fields {
// skip parameters which don't have validate markup
if f.param == "" {
continue
}
settable := st.Field(f.index)
if !settable.CanSet() {
return &CantSetError{Param: f.param, Type: settable.Type()}
}
value := params[f.param]
if err := assignField(value, &f, settable); err != nil {
return err
}
}
return nil
}
// assign validates fields are settable, parameters aren't empty and that fields set
// as optional are validated (unless empty, then disregarded).
func assign(params map[string][]string, fields []field, v interface{}) (err error) {
st := reflect.ValueOf(v).Elem()
for _, f := range fields {
// skip parameters which don't have validate markup
if f.param == "" {
continue
}
values := params[f.param]
size := len(values)
if size == 0 && f.optional == false {
return &RequiredParamError{Param: f.param, Field: f.name}
} else if (size == 0 || size == 1 && values[0] == "") && f.optional == true {
continue
}
settable := st.Field(f.index)
if !settable.CanSet() {
return &CantSetError{Param: f.param, Type: settable.Type()}
}
if settable.Kind() == reflect.Slice {
err = assignSlice(values, size, &f, settable)
} else {
// only take the first verify & assign value.
err = assignField(values[0], &f, settable)
}
// we got an error assigning a type or array, error out.
if err != nil {
return err
}
}
return nil
}
// assignField checks if the field is required, returns an error if it is but missing
// or calls verifiedAssign to continue processing.
func assignField(value string, f *field, settable reflect.Value) error {
if value == "" && f.optional == false {
return &RequiredParamError{Param: f.param, Field: f.name}
} else if value == "" && f.optional == true {
return nil
}
return verifiedAssign(value, f, settable)
}
func assignSlice(values []string, size int, f *field, settable reflect.Value) error {
settable.Set(reflect.MakeSlice(reflect.SliceOf(settable.Type().Elem()), size, size))
for i, val := range values {
if err := verifiedAssign(val, f, settable.Index(i)); err != nil {
return err
}
}
return nil
}
// verifiedAssign will take the input string, determine it's type via reflection.
// Then it will run validators against the reflected type to make sure they pass.
// provided they do, the value will be assigned to the structure.
// NOTE: we also check for numerical overflows.
func verifiedAssign(s string, f *field, settable reflect.Value) error {
switch settable.Kind() {
case reflect.String:
//fmt.Printf("In string case validators len: %d\n", len(f.validation.Validaters))
for _, validater := range f.validators {
if err := validater.Validate(f.param, s); err != nil {
return err
}
}
settable.SetString(s)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
n, err := strconv.ParseInt(s, 10, 64)
if err != nil || settable.OverflowInt(n) {
return &TypeError{f.param, s, settable.Type()}
}
for _, validater := range f.validators {
if err := validater.Validate(f.param, n); err != nil {
return err
}
}
settable.SetInt(n)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
n, err := strconv.ParseUint(s, 10, 64)
if err != nil || settable.OverflowUint(n) {
return &TypeError{f.param, s, settable.Type()}
}
for _, validater := range f.validators {
if err := validater.Validate(f.param, n); err != nil {
return err
}
}
settable.SetUint(n)
case reflect.Float32, reflect.Float64:
n, err := strconv.ParseFloat(s, settable.Type().Bits())
if err != nil || settable.OverflowFloat(n) {
return &TypeError{f.param, s, settable.Type()}
}
for _, validater := range f.validators {
if err := validater.Validate(f.param, n); err != nil {
return err
}
}
settable.SetFloat(n)
case reflect.Bool:
n, err := strconv.ParseBool(s)
if err != nil {
return &TypeError{f.param, s, settable.Type()}
}
for _, validater := range f.validators {
if err := validater.Validate(f.param, n); err != nil {
return err
}
}
settable.SetBool(n)
default:
return fmt.Errorf("validate: error %v is not a supported type for parameter %s.", settable.Type(), f.param)
}
return nil
}
|
package vending_machine
import (
"spark_networks_assessment/pkg/repositories/products"
"testing"
)
type mockProductRepo struct {
check int
purchase bool
}
func (m *mockProductRepo) Add(product *products.Product) error {
panic("implement me")
}
func (m *mockProductRepo) List() map[string][]*products.Product {
panic("implement me")
}
func (m *mockProductRepo) Check(product *products.Product) int {
return m.check
}
func (m *mockProductRepo) Purchase(product *products.Product) bool {
return m.purchase
}
func Test_service_Charge(t *testing.T) {
type fields struct {
productRepo products.Repository
}
type args struct {
coins []int
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "charge 10 coins",
fields: fields{
productRepo: new(mockProductRepo),
},
args: args{
coins: []int{10},
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := New(tt.fields.productRepo)
if got := s.Charge(tt.args.coins); got != tt.want {
t.Errorf("Charge() = %v, want %v", got, tt.want)
}
})
}
}
func Test_service_Balance(t *testing.T) {
type fields struct {
UserCoins []int
}
tests := []struct {
name string
fields fields
want int
}{
{
name: "balance 20 coins",
fields: fields{
UserCoins: []int{10, 10},
},
want: 20,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := &service{
userCoins: tt.fields.UserCoins,
}
if got := s.Balance(); got != tt.want {
t.Errorf("Balance() = %v, want %v", got, tt.want)
}
})
}
}
func Test_service_Select(t *testing.T) {
type fields struct {
coins []int
productRepo products.Repository
}
type args struct {
product *products.Product
}
tests := []struct {
name string
fields fields
args args
want bool
}{
{
name: "select SparkPasta 1 available charging 35 coins",
fields: fields{
coins: []int{10, 25},
productRepo: &mockProductRepo{
check: 1,
purchase: true,
},
},
args: args{
product: products.SparkPasta,
},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := New(tt.fields.productRepo)
if len(tt.fields.coins) > 0 {
s.Charge(tt.fields.coins)
}
if got := s.Select(tt.args.product); got != tt.want {
t.Errorf("Select() = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import "fmt"
func main() {
i, j := 42, 2701
p := &i // iのアドレスを取得し、ポインタ変数pへ格納
fmt.Println(*p) // iのアドレス先の値を表示
*p = 21 // iのアドレス先の値を21へ変更
fmt.Println(i) // iの値を表示
p = &j // jのアドレスを取得し、ポインタ変数pへ格納
*p = *p / 37 // jのアドレス先の値を37で割る
fmt.Println(j) // jの値を表示
} |
package cal
import "testing"
//GetSum测试用例
func TestGetSum(t *testing.T) {
res := GetSum(10, 20)
if res != 30 {
t.Fatalf("TestGetSum(10),实际值%v,期望值%v", res, 30)
}
t.Logf("TestGetSum(10)没问题,测试通过")
}
|
package report
import (
"database/sql"
"encoding/json"
"errors"
"fmt"
"github.com/gin-gonic/gin"
_ "github.com/go-sql-driver/mysql"
"github.com/satori/go.uuid"
"github.com/wangfmD/rvs/handles/version"
"github.com/wangfmD/rvs/setting"
"github.com/wangfmD/rvs/sshv"
"log"
"net/http"
"time"
)
type case_info_t struct {
Id string `json:"ID"`
Name string `json:"CASE_NAME"`
ReportPath string `json:"REPORT_PATH"`
Type int `json:"TYPE"`
Status int `json:"STATUS"`
StartTime time.Time `json:"START_TIME"`
StopTime time.Time `json:"STOP_TIME"`
Duration string `json:"DURATION"`
Mid string `json:"maddr"`
Pid string `json:"paddr"`
}
func GetApi(c *gin.Context) {
log.Println(c.Request.URL)
data := make(map[string]interface{})
data["caseInfots"] = query()
d, err := json.Marshal(data)
CheckErr(err)
c.String(http.StatusOK, string(d))
}
func UpdateInfo(c *gin.Context) {
var id string
var status int
var reportPath string
var ra int64
contentType := c.Request.Header.Get("Content-Type")
if contentType == "application/json" {
var caseInfo case_info_t
err := c.BindJSON(&caseInfo)
id = caseInfo.Id
status = caseInfo.Status
reportPath = caseInfo.ReportPath
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
sql := "update case_info set STOP_TIME=now(), STATUS=?, REPORT_PATH=? where ID=?;"
stmt, err := db.Prepare(sql)
defer stmt.Close()
CheckErr(err)
rs, err := stmt.Exec(status, reportPath, id)
CheckErr(err)
ra, err = rs.RowsAffected()
CheckErr(err)
db.Close()
}
msg := fmt.Sprintf("Update person %d successful %d", id, ra)
// msg := "Update person" + id + "successful" + ra
c.JSON(http.StatusOK, gin.H{
"status": "suc",
"result": map[string]string{
"id": id,
"msg": msg,
},
})
}
//根据ip匹配cfg的配置的用户名,密码
func getServerPl(addr string) (*setting.ServerOs, error) {
p := setting.GetPlatformAddrs()
for _, v := range p {
if v.Addr == addr {
return v, nil
}
}
return nil, errors.New("error address")
}
//根据ip匹配cfg的配置的用户名,密码
func getServerMe(addr string) (*setting.ServerOs, error) {
p := setting.GetMediaAddrs()
for _, v := range p {
if v.Addr == addr {
return v, nil
}
}
return nil, errors.New("error address")
}
//执行用例时ssh查询版本并入库
func sSHSqlAddPlatformvers(id, addr string) {
server, err := getServerPl(addr)
if err != nil {
log.Println(err)
} else {
versMap, err := sshv.GetVersionMaps(server.Name, server.Pwd, addr)
if err != nil {
log.Println(err)
} else {
version.SqlAddVersionByExecCase(id, addr, versMap)
}
}
log.Println("exec case: insert platform version to db")
}
//执行用例时ssh查询版本并入库
func sSHSqlAddMediavers(id, addr string) {
server, err := getServerMe(addr)
if err != nil {
log.Println(err)
} else {
versMap, err := sshv.GetVersionMaps(server.Name, server.Pwd, addr)
if err != nil {
log.Println(err)
} else {
version.SqlAddMediaVersionByExecCase(id, addr, versMap)
}
}
log.Println("exec case: insert media version to db")
}
//执行用例时插入执行信息
func AddCaseInfo(c *gin.Context) {
var id, reportId, mid, pid string
var caseName string
// var reportPath string
var caseType int
uuid1 := uuid.NewV4()
id = uuid1.String()
contentType := c.Request.Header.Get("Content-Type")
if contentType == "application/json" {
var caseinfo case_info_t
err := c.BindJSON(&caseinfo)
// err := c.MustBindWith(&ci, binding.JSON)
// err := binding.JSON.Bind(c.Request, &ci)
// id = caseinfo.Id
// reportPath = caseinfo.ReportPath
caseType = caseinfo.Type
caseName = caseinfo.Name
log.Println("caseType:", caseType)
log.Println("casename:", caseName)
pid = caseinfo.Pid
mid = caseinfo.Mid
log.Println(id)
log.Println(caseName)
// log.Println(reportPath)
log.Println(caseType)
CheckErr(err)
}
uuid2 := uuid.NewV4()
reportId = uuid2.String()
log.Println("pid: ", pid)
if pid != "" {
go sSHSqlAddPlatformvers(reportId, pid)
}
log.Println("mid: ", mid)
if mid != "" {
go sSHSqlAddMediavers(reportId, mid)
}
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
sql := "INSERT INTO `case_info` (`ID`, `CASE_NAME`, `REPORT_PATH`,`EXEC_VERSION`, `TYPE`, `STATUS`, `START_TIME`, `STOP_TIME`) VALUES(?,?,'/opt',?,?,0,now(),now())"
rs, err := db.Exec(sql, id, caseName, reportId, caseType)
CheckErr(err)
i, err := rs.LastInsertId()
CheckErr(err)
log.Println(i)
err = db.Close()
CheckErr(err)
c.JSON(http.StatusOK, gin.H{
"status": "suc",
"msg": map[string]string{"id": id},
})
}
func GetCaseInfo(c *gin.Context) {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
rows, err := db.Query("select * from case_info order by `START_TIME` DESC")
CheckErr(err)
caseInfots := make([]case_info_t, 0)
for rows.Next() {
var caseInfot case_info_t
rows.Scan(&caseInfot.Id, &caseInfot.Name, &caseInfot.ReportPath, &caseInfot.Type, &caseInfot.Status, &caseInfot.StartTime, &caseInfot.StopTime, &caseInfot.Duration)
caseInfots = append(caseInfots, caseInfot)
}
db.Close()
log.Println(caseInfots)
c.JSON(http.StatusOK, gin.H{"caseInfots": caseInfots})
}
func query() []map[string]string {
db, err := sql.Open("mysql", "root:123456@tcp(localhost:3306)/casedb?charset=utf8")
CheckErr(err)
rows, err := db.Query("select * from case_info order by `START_TIME` DESC")
CheckErr(err)
columns, _ := rows.Columns()
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
resarr := []map[string]string{}
for rows.Next() {
err = rows.Scan(scanArgs...)
record := make(map[string]string)
for i, col := range values {
if col != nil {
record[columns[i]] = string(col.([]byte))
}
}
resarr = append(resarr, record)
}
db.Close()
return resarr
}
func CheckErr(err error) {
if err != nil {
log.Println(err)
}
}
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package broker_test
import (
"github.com/ghodss/yaml"
"github.com/golang/mock/gomock"
osb "github.com/pmorie/go-open-service-broker-client/v2"
osbbroker "github.com/pmorie/osb-broker-lib/pkg/broker"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/kubernetes-sigs/minibroker/pkg/broker"
"github.com/kubernetes-sigs/minibroker/pkg/broker/mocks"
"github.com/kubernetes-sigs/minibroker/pkg/minibroker"
)
//go:generate mockgen -destination=./mocks/mock_broker.go -package=mocks github.com/kubernetes-sigs/minibroker/pkg/broker MinibrokerClient
const (
overrideParamsYaml = `
mariadb:
overrideParams:
mariadb: value
mongodb:
overrideParams:
mongodb: value
mysql:
overrideParams:
mysql: value
postgresql:
overrideParams:
postgresql: value
rabbitmq:
overrideParams:
rabbitmq: value
redis:
overrideParams:
redis: value
`
)
var _ = Describe("Broker", func() {
var (
ctrl *gomock.Controller
b *broker.Broker
mbclient *mocks.MockMinibrokerClient
provisioningSettings = &broker.ProvisioningSettings{}
namespace = "namespace"
)
BeforeEach(func() {
ctrl = gomock.NewController(GinkgoT())
mbclient = mocks.NewMockMinibrokerClient(ctrl)
})
JustBeforeEach(func() {
b = broker.NewBroker(mbclient, namespace, provisioningSettings)
})
AfterEach(func() {
ctrl.Finish()
})
Describe("Provision", func() {
var (
provisionParams = minibroker.NewProvisionParams(map[string]interface{}{
"key": "value",
})
provisionRequest = &osb.ProvisionRequest{
ServiceID: "redis",
Parameters: provisionParams.Object,
}
requestContext = &osbbroker.RequestContext{}
)
Context("without default chart values", func() {
It("passes on unaltered provision params", func() {
mbclient.EXPECT().
Provision(gomock.Any(), gomock.Eq("redis"), gomock.Any(), gomock.Eq(namespace), gomock.Any(), gomock.Eq(provisionParams))
b.Provision(provisionRequest, requestContext)
})
})
Context("with default chart values", func() {
BeforeEach(func() {
provisioningSettings = &broker.ProvisioningSettings{}
err := provisioningSettings.LoadYaml([]byte(overrideParamsYaml))
Expect(err).ToNot(HaveOccurred())
})
It("passes on default chart values", func() {
services := []string{"mariadb", "mongodb", "mysql", "postgresql", "rabbitmq", "redis"}
for _, service := range services {
provisionRequest.ServiceID = service
provisioningSettings, found := provisioningSettings.ForService(service)
Expect(found).To(BeTrue())
params := minibroker.NewProvisionParams(provisioningSettings.OverrideParams)
mbclient.EXPECT().
Provision(gomock.Any(), gomock.Eq(service), gomock.Any(), gomock.Eq(namespace), gomock.Any(), gomock.Eq(params))
b.Provision(provisionRequest, requestContext)
}
})
})
})
})
var _ = Describe("OverrideChartParams", func() {
Describe("LoadYaml", func() {
var (
ocp = &broker.ProvisioningSettings{}
)
It("Loads valid data", func() {
yamlStr, _ := yaml.Marshal(map[string]interface{}{
"rabbitmq": map[string]interface{}{
"overrideParams": map[string]interface{}{
"rabbitmqdata": "thevalue",
},
},
})
err := ocp.LoadYaml(yamlStr)
Expect(err).ToNot(HaveOccurred())
p, _ := ocp.ForService("rabbitmq")
Expect(p.OverrideParams["rabbitmqdata"]).To(Equal("thevalue"))
})
It("returns an error on unknown fields", func() {
yamlStr, _ := yaml.Marshal(map[string]interface{}{
"unknownservice": map[string]interface{}{
"overrideParams": map[string]interface{}{
"key": "value",
},
},
})
err := ocp.LoadYaml(yamlStr)
Expect(err).To(HaveOccurred())
})
})
})
|
package fs
import (
"fmt"
vmhttp "github.com/vlorc/lua-vm/net/http"
"io"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
)
type HttpFile struct {
fd io.ReadCloser
url string
length string
modify string
}
type HttpFileInfo struct {
name string
length int64
modify time.Time
}
type HttpFileFactory struct {
root string
driver *vmhttp.HTTPFactory
}
func (f *HttpFileFactory) Open(file string, args ...int) (FileDriver, error) {
resp, err := f.driver.Get(f.root + file)
if nil != err {
return nil, err
}
if 200 != resp.StatusCode {
return nil, fmt.Errorf("Can't open http file code: %d", resp.StatusCode)
}
return &HttpFile{
fd: resp.Body,
url: file,
length: resp.Header.Get("Content-Length"),
modify: resp.Header.Get("Last-Modified"),
}, nil
}
func (f *HttpFileFactory) Remove(file string) error {
resp, err := f.driver.Delete(f.root + file)
if nil == err {
defer resp.Body.Close()
if 200 != resp.StatusCode {
err = fmt.Errorf("Can't open http file code: %d", resp.StatusCode)
}
}
return err
}
func (*HttpFileFactory) Rename(src, dst string) error {
return ErrMethodNotSupport
}
func (f *HttpFileFactory) Exist(file string) bool {
resp, err := f.driver.Head(f.root + file)
if nil == err {
defer resp.Body.Close()
if 200 != resp.StatusCode {
err = fmt.Errorf("Can't exist http file code: %d", resp.StatusCode)
}
}
return true
}
func (*HttpFileFactory) Mkdir(string, int) error {
return ErrMethodNotSupport
}
func (*HttpFileFactory) Walk(root string, callback filepath.WalkFunc) error {
return ErrMethodNotSupport
}
func (f *HttpFile) Write(b []byte) (int, error) {
return 0, ErrMethodNotSupport
}
func (f *HttpFile) Read(b []byte) (int, error) {
return f.fd.Read(b)
}
func (f *HttpFile) Close() error {
return f.fd.Close()
}
func (f *HttpFile) Seek(offset int64, whence int) (int64, error) {
return 0, ErrMethodNotSupport
}
func (f *HttpFile) Stat() (os.FileInfo, error) {
_, filename := filepath.Split(f.url)
length, _ := strconv.Atoi(f.length)
last, _ := time.Parse(http.TimeFormat, f.modify)
return &HttpFileInfo{
name: filename,
length: int64(length),
modify: last,
}, nil
}
func (f *HttpFileInfo) Name() string {
return f.name
}
func (f *HttpFileInfo) Size() int64 {
return f.length
}
func (f *HttpFileInfo) Mode() os.FileMode {
return 0
}
func (f *HttpFileInfo) ModTime() time.Time {
return f.modify
}
func (f *HttpFileInfo) IsDir() bool {
return false
}
func (f *HttpFileInfo) Sys() interface{} {
return nil
}
|
package acceptance_test
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
boshsys "github.com/cloudfoundry/bosh-agent/system"
bmtestutils "github.com/cloudfoundry/bosh-micro-cli/testutils"
)
var _ = Describe("bosh-micro", func() {
var (
cmdRunner acceptanceCmdRunner
testEnv acceptanceEnvironment
)
BeforeSuite(func() {
localEnv, err := parseEnv()
Expect(err).NotTo(HaveOccurred())
logger := boshlog.NewLogger(boshlog.LevelDebug)
fileSystem := boshsys.NewOsFileSystem(logger)
testEnv = newRemoteTestEnvironment(
localEnv.vmUsername,
localEnv.vmIP,
localEnv.privateKeyPath,
fileSystem,
logger,
)
cmdRunner = NewSSHCmdRunner(
localEnv.vmUsername,
localEnv.vmIP,
localEnv.privateKeyPath,
logger,
)
err = bmtestutils.BuildExecutableForArch("linux-amd64")
Expect(err).NotTo(HaveOccurred())
boshMicroPath := "./../out/bosh-micro"
Expect(fileSystem.FileExists(boshMicroPath)).To(BeTrue())
err = testEnv.Copy("bosh-micro", boshMicroPath)
Expect(err).NotTo(HaveOccurred())
err = testEnv.DownloadOrCopy("stemcell", localEnv.stemcellURL)
Expect(err).NotTo(HaveOccurred())
err = testEnv.DownloadOrCopy("cpiRelease", localEnv.cpiReleaseURL)
Expect(err).NotTo(HaveOccurred())
})
It("is able to deploy a CPI release with a stemcell", func() {
manifestPath := "./manifest.yml"
manifestContents, err := ioutil.ReadFile(manifestPath)
Expect(err).ToNot(HaveOccurred())
testEnv.WriteContent("manifest", manifestContents)
_, _, exitCode, err := cmdRunner.RunCommand(testEnv.Path("bosh-micro"), "deployment", testEnv.Path("manifest"))
Expect(err).ToNot(HaveOccurred())
Expect(exitCode).To(Equal(0))
stdout, _, exitCode, err := cmdRunner.RunCommand(testEnv.Path("bosh-micro"), "deploy", testEnv.Path("cpiRelease"), testEnv.Path("stemcell"))
Expect(err).ToNot(HaveOccurred())
Expect(exitCode).To(Equal(0))
Expect(stdout).To(ContainSubstring("uploading stemcell"))
Expect(stdout).To(ContainSubstring("Creating VM from"))
Expect(stdout).To(ContainSubstring("Waiting for the agent"))
Expect(stdout).To(ContainSubstring("Applying micro BOSH spec"))
Expect(stdout).To(ContainSubstring("Starting agent services"))
Expect(stdout).To(ContainSubstring("Waiting for the director"))
})
})
type acceptanceEnvironment interface {
Path(string) string
Copy(string, string) error
WriteContent(string, []byte) error
RemoteDownload(string, string) error
DownloadOrCopy(string, string) error
}
type remoteTestEnvironment struct {
vmUsername string
vmIP string
privateKeyPath string
cmdRunner boshsys.CmdRunner
fileSystem boshsys.FileSystem
}
func newRemoteTestEnvironment(
vmUsername string,
vmIP string,
privateKeyPath string,
fileSystem boshsys.FileSystem,
logger boshlog.Logger,
) remoteTestEnvironment {
return remoteTestEnvironment{
vmUsername: vmUsername,
vmIP: vmIP,
privateKeyPath: privateKeyPath,
cmdRunner: boshsys.NewExecCmdRunner(logger),
fileSystem: fileSystem,
}
}
func (e remoteTestEnvironment) Path(name string) string {
return path.Join("/", "home", e.vmUsername, name)
}
func (e remoteTestEnvironment) Copy(destName, srcPath string) error {
if srcPath == "" {
return fmt.Errorf("Cannot use an empty file for `%s'", destName)
}
_, _, exitCode, err := e.cmdRunner.RunCommand(
"scp",
"-o", "StrictHostKeyChecking=no",
"-i", e.privateKeyPath,
srcPath,
fmt.Sprintf("%s@%s:%s", e.vmUsername, e.vmIP, e.Path(destName)),
)
if exitCode != 0 {
return fmt.Errorf("scp of `%s' to `%s' failed", srcPath, destName)
}
return err
}
func (e remoteTestEnvironment) DownloadOrCopy(destName, src string) error {
if strings.HasPrefix(src, "http") {
return e.RemoteDownload(destName, src)
}
return e.Copy(destName, src)
}
func (e remoteTestEnvironment) RemoteDownload(destName, srcURL string) error {
if srcURL == "" {
return fmt.Errorf("Cannot use an empty file for `%s'", destName)
}
_, _, exitCode, err := e.cmdRunner.RunCommand(
"ssh",
"-o", "StrictHostKeyChecking=no",
"-i", e.privateKeyPath,
fmt.Sprintf("%s@%s", e.vmUsername, e.vmIP),
fmt.Sprintf("wget -q -O %s %s", destName, srcURL),
)
if exitCode != 0 {
return fmt.Errorf("download of `%s' to `%s' failed", srcURL, destName)
}
return err
}
func (e remoteTestEnvironment) WriteContent(destName string, contents []byte) error {
tmpFile, err := e.fileSystem.TempFile("bosh-micro-cli-acceptance")
if err != nil {
return err
}
defer e.fileSystem.RemoveAll(tmpFile.Name())
_, err = tmpFile.Write(contents)
if err != nil {
return err
}
err = tmpFile.Close()
if err != nil {
return err
}
return e.Copy(destName, tmpFile.Name())
}
type acceptanceCmdRunner interface {
RunCommand(...string) (string, string, int, error)
}
type sshCmdRunner struct {
vmUsername string
vmIP string
privateKeyPath string
runner boshsys.CmdRunner
}
func NewSSHCmdRunner(
vmUsername string,
vmIP string,
privateKeyPath string,
logger boshlog.Logger,
) sshCmdRunner {
return sshCmdRunner{
vmUsername: vmUsername,
vmIP: vmIP,
privateKeyPath: privateKeyPath,
runner: boshsys.NewExecCmdRunner(logger),
}
}
func (r sshCmdRunner) RunCommand(args ...string) (string, string, int, error) {
argsWithEnv := append([]string{fmt.Sprintf("TMPDIR=/home/%s", r.vmUsername)}, args...)
return r.runner.RunCommand(
"ssh",
"-o", "StrictHostKeyChecking=no",
"-i", r.privateKeyPath,
fmt.Sprintf("%s@%s", r.vmUsername, r.vmIP),
strings.Join(argsWithEnv, " "),
)
}
type localEnvironment struct {
vmUsername string
vmIP string
privateKeyPath string
stemcellURL string
cpiReleaseURL string
}
func parseEnv() (localEnvironment, error) {
env := localEnvironment{
vmUsername: os.Getenv("BOSH_MICRO_VM_USERNAME"),
vmIP: os.Getenv("BOSH_MICRO_VM_IP"),
privateKeyPath: os.Getenv("BOSH_MICRO_PRIVATE_KEY"),
stemcellURL: os.Getenv("BOSH_MICRO_STEMCELL"),
cpiReleaseURL: os.Getenv("BOSH_MICRO_CPI_RELEASE"),
}
var err error
if env.vmUsername == "" {
fmt.Println("BOSH_MICRO_VM_USERNAME must be set")
err = errors.New("")
}
if env.vmIP == "" {
fmt.Println("BOSH_MICRO_VM_IP must be set")
err = errors.New("")
}
if env.privateKeyPath == "" {
fmt.Println("BOSH_MICRO_PRIVATE_KEY must be set")
err = errors.New("")
}
if env.stemcellURL == "" {
fmt.Println("BOSH_MICRO_STEMCELL must be set")
err = errors.New("")
}
if env.cpiReleaseURL == "" {
fmt.Println("BOSH_MICRO_CPI_RELEASE must be set")
err = errors.New("")
}
return env, err
}
|
package priorityqueue
import (
"LimitGo/limit/collection"
"bytes"
"encoding/json"
"fmt"
"reflect"
)
const initCap = 8
type PriorityQueue struct {
elements []*collection.Object
precede func(p1 *collection.Object, p2 *collection.Object) bool
}
// PriorityQueueIterator represents the specific iterator of the PriorityQueue.
type Iterator struct {
list *PriorityQueue
cursor int
lastRet int
}
func New(precede func(p1 *collection.Object, p2 *collection.Object) bool) *PriorityQueue {
return &PriorityQueue{make([]*collection.Object, 0, initCap), precede}
}
func (q *PriorityQueue) GetFunc() func(p1 *collection.Object, p2 *collection.Object) bool {
return q.precede
}
// Size returns the number of elements in this collection.
func (q *PriorityQueue) Size() int {
return len(q.elements)
}
// Empty returns true if this collection contains no element.
func (q *PriorityQueue) Empty() bool {
return q.Size() == 0
}
// GetIterator returns an iterator over the elements in this collection.
func (q *PriorityQueue) GetIterator() collection.Itr {
return &Iterator{q, 0, -1}
}
// String returns a string representation of this collection.
func (q *PriorityQueue) String() string {
var buf bytes.Buffer
buf.WriteByte('{')
it := q.GetIterator()
for it.HashNext() {
p := it.Next()
if buf.Len() > len("{") {
buf.WriteByte(',')
}
var s string
b, err := json.Marshal(*p)
if err == nil {
s = string(b)
} else {
s = "nil"
}
_, err = fmt.Fprint(&buf, s)
}
buf.WriteByte('}')
return buf.String()
}
// Removes all of the elements from this collection.
func (q *PriorityQueue) Clear() bool {
q.elements = make([]*collection.Object, 0, initCap)
return true
}
// Contains returns true if this collection contains the specific element.
func (q *PriorityQueue) Contains(p *collection.Object) bool {
if q.checkNil(p) {
return false
}
for _, v := range q.elements {
if reflect.DeepEqual(*v, *p) {
return true
}
}
return false
}
// Peek returns the head of this queue, or nil if this queue is empty
func (q *PriorityQueue) First() *collection.Object {
if q.Size() == 0 {
return nil
} else {
return q.elements[0]
}
}
// Poll returns and removes the head of this queue, or nil if this queue is empty
func (q *PriorityQueue) Poll() *collection.Object {
if q.Size() == 0 {
return nil
}
result := q.elements[0]
x := q.elements[len(q.elements) - 1]
q.elements = q.elements[0:len(q.elements)-1]
if len(q.elements) != 0 {
q.siftDown(0, x)
}
return result
}
// Add inserts the specified element to the end of this queue.
func (q *PriorityQueue) Add(p *collection.Object) bool {
if q.checkNil(p) {
return false
}
if q.Size() == 0 {
q.elements = append(q.elements, p)
return true
}
q.elements = append(q.elements, nil)
q.siftUp(len(q.elements) - 1, p)
return true
}
// checkNil return true if p is nil or *p if nil
func (q *PriorityQueue) checkNil(p *collection.Object) bool {
return p == nil || (*p) == nil
}
// checkIndex return true if index within the range
func (q *PriorityQueue) checkIndex(index int) bool {
return index >= 0 && index < q.Size()
}
// HashNext returns true if the iteration has more elements.
func (it *Iterator) HashNext() bool {
cursor := it.cursor
size := it.list.Size()
return cursor != size
}
// Next returns the next element in the iteration.
func (it *Iterator) Next() *collection.Object {
if it.HashNext() {
it.lastRet = it.cursor
it.cursor++
return it.list.elements[it.lastRet]
}
return nil
}
// Remove removes from the underlying collection the last element returned
// by this iterator.
func (it *Iterator) Remove() (*collection.Object, bool) {
if it.lastRet < 0 {
return nil, false
}
p := it.list.removeAt(it.lastRet)
it.cursor = it.lastRet
it.lastRet = -1
return p, true
}
func (q *PriorityQueue) removeAt(index int) *collection.Object {
if !q.checkIndex(index) {
return nil
}
p := q.elements[index]
copy(q.elements[index:], q.elements[index+1:])
q.elements = q.elements[:q.Size()-1]
return p
}
func (q *PriorityQueue) siftDown(k int, x *collection.Object) {
half := q.Size() >> 1
for k < half {
child := k << 1 + 1
p := q.elements[child]
right := child + 1
if right < q.Size() && q.precede(q.elements[right], p) {
child = right
p = q.elements[child]
}
if q.precede(x, p) {
break
}
q.elements[k] = p
k = child
}
q.elements[k] = x
}
func (q *PriorityQueue) siftUp(k int, x *collection.Object) {
for k > 0 {
parent := (k - 1) >> 1
e := q.elements[parent]
if q.precede(e, x) {
break
}
q.elements[k] = e
k = parent
}
q.elements[k] = x
} |
/*
Copyright (c) 2017 Simon Schmidt
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/* Deprecated: Considered cruft right now. */
package indexdb
import "github.com/maxymania/fastnntp-polyglot/buffer"
type IndexableBit struct{
_msgpack struct{} `msgpack:",asArray"`
PrimaryKey []byte
SecondaryKeys [][]byte
}
func Prepend(buf []byte,b byte) (r Binary) {
r = AllocBinary(len(buf)+1)
r.Bytes()[0] = b
copy(r.Bytes()[1:],buf)
return r
}
type Binary struct{
buf *[]byte
data []byte
}
func (b Binary) Bytes() []byte { return b.data }
func (b Binary) Free() {
buffer.Put(b.buf)
}
func AllocBinary(n int) (b Binary) {
b.buf = buffer.Get(n)
b.data = (*b.buf)[:n]
return
}
func NewBinary(data []byte) (b Binary) {
b.buf = buffer.Get(len(data))
b.data = (*b.buf)[:len(data)]
copy(b.data,data)
return
}
func NewBinaryStr(data string) (b Binary) {
b.buf = buffer.Get(len(data))
b.data = (*b.buf)[:len(data)]
copy(b.data,data)
return
}
func NewBinaryInplace(data []byte) (b Binary) {
b.data = data
return
}
|
package main
import (
"database/sql"
"fmt"
"net/http"
"strconv"
"todos/model"
"github.com/gin-gonic/gin"
)
// Server struct
type Server struct {
db *sql.DB
todoService TodoService
secretService SecretService
}
//FindByID is Medthod of Server
func (s *Server) FindByID(c *gin.Context) {
id, _ := strconv.Atoi(c.Param("id"))
todo, err := s.todoService.FindByID(id)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, todo)
}
//All is Medthod of Server
func (s *Server) All(c *gin.Context) {
todos, err := s.todoService.All()
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{
"object": "error",
"message": fmt.Sprintf("db: query error: %s", err),
})
return
}
c.JSON(http.StatusOK, todos)
}
//Create is Medthod of Server
func (s *Server) Create(c *gin.Context) {
var todo model.Todo
err := c.ShouldBindJSON(&todo)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, gin.H{
"object": "error",
"message": fmt.Sprintf("json: wrong params: %s", err),
})
return
}
if err := s.todoService.Create(&todo); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusCreated, todo)
}
//Update is Medthod of Server
func (s *Server) Update(c *gin.Context) {
h := map[string]string{}
err := c.ShouldBindJSON(&h)
if err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, err)
return
}
id, _ := strconv.Atoi(c.Param("id"))
todo, err := s.todoService.Update(id, h["todo"])
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, todo)
}
//DeleteByID is Medthod of Server
func (s *Server) DeleteByID(c *gin.Context) {
id, _ := strconv.Atoi(c.Param("id"))
todos, err := s.todoService.DeleteByID(id)
if err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusOK, todos)
}
//CreateSecret is Medthod of Server
func (s *Server) CreateSecret(c *gin.Context) {
var secret model.Secret
if err := c.ShouldBindJSON(&secret); err != nil {
c.AbortWithStatusJSON(http.StatusBadRequest, err)
return
}
if err := s.secretService.Insert(&secret); err != nil {
c.AbortWithStatusJSON(http.StatusInternalServerError, err)
return
}
c.JSON(http.StatusCreated, secret)
}
//AuthTodo is Medthod of Server
func (s *Server) AuthTodo(c *gin.Context) {
user, _, ok := c.Request.BasicAuth()
if ok {
row := s.db.QueryRow("SELECT key FROM secrets WHERE key = $1", user)
if err := row.Scan(&user); err == nil {
return
}
}
c.AbortWithStatus(http.StatusUnauthorized)
}
|
package node
import (
"github.com/gookit/gcli/v3"
)
func Cmd() *gcli.Command {
cmd := &gcli.Command{
Name: "node",
// allow color tag and {$cmd} will be replace to 'demo'
Desc: "Interact with and get information about Nodes",
Func: func(cmd *gcli.Command, args []string) error {
cmd.ShowHelp()
return nil
},
Subs: []*gcli.Command{statusCMD()},
}
return cmd
}
func statusCMD() *gcli.Command {
cmd := &gcli.Command{
Name: "status",
Desc: "Query information about a node, the default is the current specified node",
Func: func(cmd *gcli.Command, args []string) error {
cmd.ShowHelp()
return nil
},
}
return cmd
}
|
//go:build js
// Package tabsupport offers functionality to add tab support to a textarea element.
package tabsupport
import (
"syscall/js"
"honnef.co/go/js/dom/v2"
)
// Add is a helper that modifies a <textarea>, so that pressing tab key will insert tabs.
func Add(textArea *dom.HTMLTextAreaElement) {
textArea.AddEventListener("keydown", false, func(event dom.Event) {
switch ke := event.(*dom.KeyboardEvent); {
case ke.KeyCode() == '\t' && !ke.CtrlKey() && !ke.AltKey() && !ke.MetaKey() && !ke.ShiftKey(): // Tab.
event.PreventDefault()
insertTab(textArea)
}
})
}
// KeyDownHandler is a keydown event handler for a <textarea> element.
// It makes it so that pressing tab key will insert tabs.
//
// To use it, first make it available to the JavaScript world, e.g.:
//
// js.Global().Set("TabSupportKeyDownHandler", jsutil.Wrap(tabsupport.KeyDownHandler))
//
// Then use it as follows in the HTML:
//
// <textarea onkeydown="TabSupportKeyDownHandler(this, event);"></textarea>
//
func KeyDownHandler(element dom.HTMLElement, event dom.Event) {
switch ke := event.(*dom.KeyboardEvent); {
case ke.KeyCode() == '\t' && !ke.CtrlKey() && !ke.AltKey() && !ke.MetaKey() && !ke.ShiftKey(): // Tab.
event.PreventDefault()
insertTab(element.(*dom.HTMLTextAreaElement))
}
}
func insertTab(textArea *dom.HTMLTextAreaElement) {
value, start, end := textArea.Value(), textArea.SelectionStart(), textArea.SelectionEnd()
textArea.SetValue(value[:start] + "\t" + value[end:])
textArea.SetSelectionStart(start + 1)
textArea.SetSelectionEnd(start + 1)
// Trigger "input" event listeners.
inputEvent := js.Global().Get("CustomEvent").New("input")
textArea.Underlying().Call("dispatchEvent", inputEvent)
}
|
package entity
import (
"github.com/fatih/structs"
)
type DecryptionLog struct {
Id int64
Chain string
Token string
}
func (p *DecryptionLog) Map() *map[string]interface{} {
m := structs.Map(p)
return &m
}
|
// *** WARNING: this file was generated by Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package apigateway
import (
"context"
"reflect"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
type APIKeySource pulumi.String
const (
APIKeySourceHEADER = APIKeySource("HEADER")
APIKeySourceAUTHORIZER = APIKeySource("AUTHORIZER")
)
func (APIKeySource) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e APIKeySource) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e APIKeySource) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e APIKeySource) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e APIKeySource) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
type IntegrationConnectionType pulumi.String
const (
IntegrationConnectionTypeINTERNET = IntegrationConnectionType("INTERNET")
IntegrationConnectionType_VPC_LINK = IntegrationConnectionType("VPC_LINK")
)
func (IntegrationConnectionType) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e IntegrationConnectionType) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationConnectionType) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationConnectionType) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e IntegrationConnectionType) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
type IntegrationPassthroughBehavior pulumi.String
const (
IntegrationPassthroughBehavior_When_no_match = IntegrationPassthroughBehavior("when_no_match")
IntegrationPassthroughBehavior_When_no_templates = IntegrationPassthroughBehavior("when_no_templates")
IntegrationPassthroughBehaviorNever = IntegrationPassthroughBehavior("never")
)
func (IntegrationPassthroughBehavior) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e IntegrationPassthroughBehavior) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationPassthroughBehavior) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationPassthroughBehavior) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e IntegrationPassthroughBehavior) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
type IntegrationType pulumi.String
const (
IntegrationTypeAws = IntegrationType("aws")
IntegrationType_Aws_proxy = IntegrationType("aws_proxy")
IntegrationTypeHttp = IntegrationType("http")
IntegrationType_Http_proxy = IntegrationType("http_proxy")
IntegrationTypeMock = IntegrationType("mock")
)
func (IntegrationType) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e IntegrationType) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationType) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e IntegrationType) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e IntegrationType) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
type Method pulumi.String
const (
MethodANY = Method("ANY")
MethodGET = Method("GET")
MethodPUT = Method("PUT")
MethodPOST = Method("POST")
MethodDELETE = Method("DELETE")
MethodPATCH = Method("PATCH")
MethodOPTIONS = Method("OPTIONS")
MethodHEAD = Method("HEAD")
)
func (Method) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e Method) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e Method) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e Method) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e Method) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
type RequestValidator pulumi.String
const (
RequestValidatorALL = RequestValidator("ALL")
RequestValidator_PARAMS_ONLY = RequestValidator("PARAMS_ONLY")
RequestValidator_BODY_ONLY = RequestValidator("BODY_ONLY")
)
func (RequestValidator) ElementType() reflect.Type {
return reflect.TypeOf((*pulumi.String)(nil)).Elem()
}
func (e RequestValidator) ToStringOutput() pulumi.StringOutput {
return pulumi.ToOutput(pulumi.String(e)).(pulumi.StringOutput)
}
func (e RequestValidator) ToStringOutputWithContext(ctx context.Context) pulumi.StringOutput {
return pulumi.ToOutputWithContext(ctx, pulumi.String(e)).(pulumi.StringOutput)
}
func (e RequestValidator) ToStringPtrOutput() pulumi.StringPtrOutput {
return pulumi.String(e).ToStringPtrOutputWithContext(context.Background())
}
func (e RequestValidator) ToStringPtrOutputWithContext(ctx context.Context) pulumi.StringPtrOutput {
return pulumi.String(e).ToStringOutputWithContext(ctx).ToStringPtrOutputWithContext(ctx)
}
|
package lccc_core
import (
"fmt"
"github.com/lemon-cloud-service/lemon-cloud-common/lemon-cloud-common-utils/lccu_log"
"github.com/micro/go-micro/v2/config"
"github.com/micro/go-micro/v2/config/source/etcd"
"sync"
)
type DataSandboxServiceStruct struct {
DataSandboxContext config.Config
}
var dataSandboxServiceInstance *DataSandboxServiceStruct
var dataSandboxServiceOnce sync.Once
// 单例函数
func DataSandboxService() *DataSandboxServiceStruct {
dataSandboxServiceOnce.Do(func() {
dataSandboxServiceInstance = &DataSandboxServiceStruct{}
})
return dataSandboxServiceInstance
}
func (dss *DataSandboxServiceStruct) Init() error {
var err error
if dss.DataSandboxContext, err = config.NewConfig(); err != nil {
return err
}
etcdSource := etcd.NewSource(
etcd.WithAddress(CoreService().CoreStartParams.ServiceGeneralConfig.GetRegistryUrl()),
etcd.WithPrefix(fmt.Sprintf("/%v/%v/%v", KEY_C_DATA_SANDBOX, CoreService().CoreStartParams.ServiceGeneralConfig.Service.Namespace, CoreService().CoreStartParams.ServiceBaseInfo.ServiceKey)),
etcd.StripPrefix(true))
if err = dss.DataSandboxContext.Load(etcdSource); err != nil {
return err
}
return nil
}
func (dss *DataSandboxServiceStruct) Get(dataKey string) string {
return dss.DataSandboxContext.Get(dataKey).String("")
}
func (dss *DataSandboxServiceStruct) Set(dataKey, newValue string) {
dss.DataSandboxContext.Set(newValue, dataKey)
}
func (dss *DataSandboxServiceStruct) Watch(dataKey string, callback func(string)) {
watch, err := dss.DataSandboxContext.Watch(dataKey)
lccu_log.Error("An error occurred while observing the system settings:", err)
go func() {
if val, err := watch.Next(); err == nil {
callback(val.String(""))
} else {
lccu_log.Error("An error occurred while observing the change of system settings:", err)
}
dss.Watch(dataKey, callback)
}()
}
|
package main
import (
"errors"
"fmt"
"os"
"os/signal"
"strings"
"github.com/bwmarrin/discordgo"
)
var (
/* Bot owner */
owner string
/* The list of the sounds found in the sound path */
soundList = make([]string, 0)
/* The list of connected server */
serverList = make(map[string]*Server, 0)
/* A Chan to close properly the bot via ""!musicbot shutdwon" */
shutdownChan = make(chan bool)
)
/* List all sounds in the sound path and put them in the soundlist */
func populateSoundCollection() {
//Create a temporary soundlist (in order that other routines always access to a full populated sound list)
var tempSoundList []string
//tempSoundList := make([]string, 0)
//Open the sound path
folder, err := os.Open(soundPath)
//Read the name of all files in the directory
files, err := folder.Readdirnames(0)
if err != nil {
fmt.Println("Error Reading directory :", err)
return
}
// For each file verify it has the dca suffix (only dca files are accepted by the bot) and add them to the list without the ".dca" suffix (user don't need to type it)
for _, file := range files {
if strings.HasSuffix(file, ".dca") {
name := strings.Replace(file, ".dca", "", -1)
tempSoundList = append(tempSoundList, name)
}
}
//Put the full populated sound list to soundList var
soundList = tempSoundList
}
/* Main function of the program */
func main() {
token := os.Getenv("TOKEN")
if token == "" {
fmt.Println("No token provided")
return
}
// Create a new Discord session using the provided token.
dg, err := discordgo.New(token)
if err != nil {
fmt.Println("error creating Discord session,", err)
return
}
//Get the bot owner
app, err := dg.Application("@me")
if err != nil {
fmt.Println(err)
return
}
owner = app.Owner.ID
// Fill the sound list
populateSoundCollection()
// Register messageCreate as a callback for the messageCreate events.
dg.AddHandler(messageCreate)
dg.AddHandler(guildCreate)
//Prevent forced stop
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
// Open the websocket and begin listening.
err = dg.Open()
if err != nil {
fmt.Println("error opening connection,", err)
return
}
fmt.Println("Bot is now running. Press CTRL-C to exit.")
select {
case <-c:
case <-shutdownChan:
break
}
// Simple way to keep program running until CTRL-C is pressed or shutdown command entered
for _, server := range serverList {
server.Disconnect()
}
dg.Logout()
dg.Close()
fmt.Println("Goodbye ^^")
return
}
func guildCreate(s *discordgo.Session, g *discordgo.GuildCreate) {
serverList[g.ID] = NewServer(g.ID)
}
func guildDelete(s *discordgo.Session, g *discordgo.GuildDelete) {
delete(serverList, g.ID)
}
// This function will be called (due to AddHandler above) every time a new
// message is created on any channel that the autenticated bot has access to.
func messageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
if strings.HasPrefix(m.Content, prefix) {
message := strings.Split(m.Content, " ")
//Get the channel object from the channelID
c, err := s.State.Channel(m.ChannelID)
if err != nil {
// Could not find channel.
return
}
server, exist := serverList[c.GuildID]
if exist == false {
// Could not find guild
return
}
//Verify there is a command after the suffix
if len(message) > 1 {
msg := command(message, server, m.Author.ID, s)
if msg != "" {
_, _ = s.ChannelMessageSend(m.ChannelID, msg)
}
} else {
_, _ = s.ChannelMessageSend(m.ChannelID, help)
}
}
}
func getAuthorChannel(s *discordgo.Session, authorID, guildID string) (string, error) {
g, err := s.State.Guild(guildID)
if err != nil {
return "", errors.New("Error getting guild information")
}
for _, vs := range g.VoiceStates {
if vs.UserID == authorID {
return vs.ChannelID, nil
}
}
return "", errors.New("User not found")
}
|
package omokServer
/*
import (
"go.uber.org/zap"
"main/protocol"
. "gohipernetFake"
)
func (room *baseRoom) _packetProcess_Chat(gameUser *roomUser, packet protocol.Packet) int16 {
_sessionIndex := packet.UserSessionIndex
sessionUniqueId := packet.UserSessionUniqueId
var chatPacket protocol.RoomChatReqPacket
if chatPacket.Decoding(packet.Data) == false {
_sendRoomChatResult(_sessionIndex, sessionUniqueId, protocol.ERROR_CODE_PACKET_DECODING_FAIL)
return protocol.ERROR_CODE_PACKET_DECODING_FAIL
}
// 채팅 최대길이 제한
msgLen := len(chatPacket.Msgs)
if msgLen < 1 || msgLen > protocol.MAX_CHAT_MESSAGE_BYTE_LENGTH {
_sendRoomChatResult(_sessionIndex, sessionUniqueId, protocol.ERROR_CODE_ROOM_CHAT_CHAT_MSG_LEN)
return protocol.ERROR_CODE_ROOM_CHAT_CHAT_MSG_LEN
}
var chatNotifyResponse protocol.RoomChatNtfPacket
chatNotifyResponse.RoomUserUniqueId = gameUser.RoomUniqueId
chatNotifyResponse.MsgLen = int16(msgLen)
chatNotifyResponse.Msg = chatPacket.Msgs
notifySendBuf, packetSize := chatNotifyResponse.EncodingPacket()
room.broadcastPacket(packetSize, notifySendBuf, 0)
_sendRoomChatResult(_sessionIndex, sessionUniqueId, protocol.ERROR_CODE_NONE)
NTELIB_LOG_DEBUG("ParkChannel Chat Notify Function", zap.String("Sender", string(gameUser._id[:])),
zap.String("Message", string(chatPacket.Msgs)))
return protocol.ERROR_CODE_NONE
}
func _sendRoomChatResult(_sessionIndex int32, sessionUniqueId uint64, result int16) {
response := protocol.RoomChatResPacket{ result }
sendPacket, _ := response.EncodingPacket()
NetLibIPostSendToClient(_sessionIndex, sessionUniqueId, sendPacket)
}
*/
|
package config
import (
"errors"
"reflect"
"strings"
"testing"
)
func TestHandleReturnValue(t *testing.T) {
// one value
v, err := handleReturnValue([]reflect.Value{reflect.ValueOf(1)})
if v.(int) != 1 {
t.Fatal("expected value")
}
if err != nil {
t.Fatal(err)
}
// Nil value
v, err = handleReturnValue([]reflect.Value{reflect.ValueOf(nil)})
if v != nil {
t.Fatal("expected no value")
}
if err == nil {
t.Fatal("expected an error")
}
// Nil value, nil err
v, err = handleReturnValue([]reflect.Value{reflect.ValueOf(nil), reflect.ValueOf(nil)})
if v != nil {
t.Fatal("expected no value")
}
if err == nil {
t.Fatal("expected an error")
}
// two values
v, err = handleReturnValue([]reflect.Value{reflect.ValueOf(1), reflect.ValueOf(nil)})
if v, ok := v.(int); !ok || v != 1 {
t.Fatalf("expected value of 1, got %v", v)
}
if err != nil {
t.Fatal("expected no error")
}
// an error
myError := errors.New("my error")
_, err = handleReturnValue([]reflect.Value{reflect.ValueOf(1), reflect.ValueOf(myError)})
if err != myError {
t.Fatal(err)
}
for _, vals := range [][]reflect.Value{
{reflect.ValueOf(1), reflect.ValueOf("not an error")},
{},
{reflect.ValueOf(1), reflect.ValueOf(myError), reflect.ValueOf(myError)},
} {
func() {
defer func() { recover() }()
handleReturnValue(vals)
t.Fatal("expected a panic")
}()
}
}
type foo interface {
foo() foo
}
var fooType = reflect.TypeOf((*foo)(nil)).Elem()
func TestCheckReturnType(t *testing.T) {
for i, fn := range []interface{}{
func() { panic("") },
func() error { panic("") },
func() (error, error) { panic("") },
func() (foo, error, error) { panic("") },
func() (foo, foo) { panic("") },
} {
if checkReturnType(reflect.TypeOf(fn), fooType) == nil {
t.Errorf("expected falure for case %d (type %T)", i, fn)
}
}
for i, fn := range []interface{}{
func() foo { panic("") },
func() (foo, error) { panic("") },
} {
if err := checkReturnType(reflect.TypeOf(fn), fooType); err != nil {
t.Errorf("expected success for case %d (type %T), got: %s", i, fn, err)
}
}
}
func constructFoo() foo {
return nil
}
type fooImpl struct{}
func (f *fooImpl) foo() foo { return nil }
func TestCallConstructor(t *testing.T) {
_, err := callConstructor(reflect.ValueOf(constructFoo), nil)
if err == nil {
t.Fatal("expected constructor to fail")
}
if !strings.Contains(err.Error(), "constructFoo") {
t.Errorf("expected error to contain the constructor name: %s", err)
}
v, err := callConstructor(reflect.ValueOf(func() foo { return &fooImpl{} }), nil)
if err != nil {
t.Fatal(err)
}
if _, ok := v.(*fooImpl); !ok {
t.Fatal("expected a fooImpl")
}
v, err = callConstructor(reflect.ValueOf(func() *fooImpl { return new(fooImpl) }), nil)
if err != nil {
t.Fatal(err)
}
if _, ok := v.(*fooImpl); !ok {
t.Fatal("expected a fooImpl")
}
_, err = callConstructor(reflect.ValueOf(func() (*fooImpl, error) { return nil, nil }), nil)
if err == nil {
t.Fatal("expected error")
}
v, err = callConstructor(reflect.ValueOf(func() (*fooImpl, error) { return new(fooImpl), nil }), nil)
if err != nil {
t.Fatal(err)
}
if _, ok := v.(*fooImpl); !ok {
t.Fatal("expected a fooImpl")
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.