text stringlengths 11 4.05M |
|---|
/*
Description
A Bank plans to install a machine for cash withdrawal. The machine is able to deliver appropriate @ bills for a requested cash amount. The machine uses exactly N distinct bill denominations, say Dk, k=1,N, and for each denomination Dk the machine has a supply of nk bills. For example,
N=3, n1=10, D1=100, n2=4, D2=50, n3=5, D3=10
means the machine has a supply of 10 bills of @100 each, 4 bills of @50 each, and 5 bills of @10 each.
Call cash the requested amount of cash the machine should deliver and write a program that computes the maximum amount of cash less than or equal to cash that can be effectively delivered according to the available bill supply of the machine.
Notes:
@ is the symbol of the currency delivered by the machine. For instance, @ may stand for dollar, euro, pound etc.
Input
The program input is from standard input. Each data set in the input stands for a particular transaction and has the format:
cash N n1 D1 n2 D2 ... nN DN
where 0 <= cash <= 100000 is the amount of cash requested, 0 <=N <= 10 is the number of bill denominations and 0 <= nk <= 1000 is the number of available bills for the Dk denomination, 1 <= Dk <= 1000, k=1,N. White spaces can occur freely between the numbers in the input. The input data are correct.
Output
For each set of data the program prints the result to the standard output on a separate line as shown in the examples below.
Sample Input
735 3 4 125 6 5 3 350
633 4 500 30 6 100 1 5 0 1
735 0
0 3 10 100 10 50 10 10
Sample Output
735
630
0
0
Hint
The first data set designates a transaction where the amount of cash requested is @735. The machine contains 3 bill denominations: 4 bills of @125, 6 bills of @5, and 3 bills of @350. The machine can deliver the exact amount of requested cash.
In the second case the bill supply of the machine does not fit the exact amount of cash requested. The maximum cash that can be delivered is @630. Notice that there can be several possibilities to combine the bills in the machine for matching the delivered cash.
In the third case the machine is empty and no cash is delivered. In the fourth case the amount of cash requested is @0 and, therefore, the machine delivers no cash.
Source
Southeastern Europe 2002
*/
package main
func main() {
assert(knapsack(735, [][2]int{
{4, 125},
{6, 5},
{3, 350},
}) == 735)
assert(knapsack(633, [][2]int{
{500, 30},
{6, 100},
{1, 5},
{0, 1},
}) == 630)
assert(knapsack(735, [][2]int{}) == 0)
assert(knapsack(0, [][2]int{
{10, 100},
{10, 50},
{10, 10},
}) == 0)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func knapsack(c int, a [][2]int) int {
const limit = 10
n := len(a)
p := make([]int, c+n+1)
for i := 0; i < n; i++ {
l := a[i][0]
for l > limit {
for j := 1; j <= l; l, j = l-j, j*2 {
for k := c; k-a[i][1]*j >= 0; k-- {
p[k] = max(p[k], p[k-a[i][1]*j]+a[i][1]*j)
}
}
}
for j := 0; j < l; j++ {
for k := c; k-a[i][1] >= 0; k-- {
p[k] = max(p[k], p[k-a[i][1]]+a[i][1])
}
}
}
return p[c]
}
|
package v2
import "switch-onchain/internal/service"
//v2 接口升级版本
type APIV2 struct {
SVC *service.Service
}
|
// action.go
// author:昌维 [github.com/cw1997]
// date:2017-05-09 09:00:57
package web
import (
"log"
"strconv"
"cache"
"config"
"db"
"url"
"util"
)
func storeUrl(longUrl string, ip string) string {
var shortUrl string
// 遇到随机数碰撞情况重试
retries, err := strconv.Atoi(config.Get("rand.retires"))
if err != nil {
log.Println(err)
return ""
}
// 为了性能,允许插入时间戳有少量误差
// datetime := util.GetTimestamp()
datetime := util.GetDatetime()
for i := 0; i < retries; i++ {
shortUrl = url.GeneralShortgUrl(longUrl)
// 此处逻辑有误:当事务隔离级别为“Repeatable read”,按照老方法判断会出现幻读现象
// if l, err := getLongUrl(shortUrl); l == "" && err != nil {
// break
// }
if db.SetShortUrlByLongUrl(longUrl, shortUrl, datetime, ip) {
cache.SetShortUrlCache(shortUrl, longUrl)
return shortUrl
}
}
return ""
}
func getLongUrl(shortUrl string) (string, error) {
if v, err := cache.Get(config.Get("cache.prefix") + shortUrl); err == nil {
return v, nil
} else {
// log.Println(err)
}
if v, err := url.GetLongUrl(shortUrl); err == nil {
_, errCache := cache.SetShortUrlCache(shortUrl, v)
return v, errCache
} else {
return "", err
}
}
|
package shardmaster
import "../raft"
import "../labrpc"
import "sync"
import "../labgob"
import "log"
//import "fmt"
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func TPrintf(format string, a ...interface{}) (n int, err error) {
if Debug ==-1 {
log.Printf(format, a...)
}
return
}
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
duplicate map[int64]map[string]int64 // [clientId]([key]requestId)
requestHandlers map[int]chan raft.ApplyMsg
configs []Config // indexed by config num
commitIndex int
}
func (sm *ShardMaster) Lock() {
sm.mu.Lock()
}
func (sm *ShardMaster) Unlock() {
sm.mu.Unlock()
}
func (sm *ShardMaster) registerIndexHandler(index int) chan raft.ApplyMsg {
sm.Lock()
defer sm.Unlock()
awaitChan := make(chan raft.ApplyMsg, 1)
sm.requestHandlers[index] = awaitChan
return awaitChan
}
func (sm *ShardMaster) GetDuplicate(clientId int64, method string) (int64, bool) {
sm.Lock()
defer sm.Unlock()
clientRequest, haveClient := sm.duplicate[clientId]
if !haveClient {
return 0,false
}
val, ok := clientRequest[method]
return val, ok
}
func (sm *ShardMaster) SetDuplicateNolock(clientId int64, method string, requestId int64) {
_, haveClient := sm.duplicate[clientId]
if !haveClient {
sm.duplicate[clientId] = make(map[string]int64)
}
sm.duplicate[clientId][method] = requestId
}
type Op struct {
// Your data here.
Method string
Config []Config
RequestId int64
ClientId int64
}
func Clone(a, b *Config) {
b.Num = a.Num
for i,gid := range a.Shards {
b.Shards[i] = gid
}
for gid := range a.Groups {
// 假定了一个group中的机器不变
b.Groups[gid] = a.Groups[gid]
}
}
func (sm *ShardMaster) AppendConfigAfterJoinNolock(args *JoinArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for gid, names := range args.Servers {
newConfig.Groups[gid] = names
}
DPrintf("NewConfigAfterJoin, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterLeaveNolock(args *LeaveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
for _,gid := range args.GIDs {
delete(newConfig.Groups,gid)
}
DPrintf("NewConfigAfterLeave, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.RebalanceNolock(&newConfig)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) AppendConfigAfterMoveNolock(args *MoveArgs) []Config {
newConfig := Config{}
newConfig.Groups = map[int][]string{}
lastConfig := sm.configs[len(sm.configs) - 1]
Clone(&lastConfig, &newConfig)
newConfig.Num = len(sm.configs)
newConfig.Shards[args.Shard] = args.GID
DPrintf("NewConfigAfterMove, lastConfig=%+v, newConfig=%+v, args=%+v",
lastConfig, newConfig, args)
sm.configs = append(sm.configs, newConfig)
return sm.configs
}
func (sm *ShardMaster) RebalanceNolock(config *Config) {
// balance shards to latest groups
numOfGroup := len(config.Groups)
if numOfGroup > 0 {
//numOfNodesPerGroup := NShards / numOfGroup
//log.Println("num of shards per group is", numOfNodesPerGroup)
leftOver := NShards % numOfGroup
for i:=0; i< NShards - leftOver; {
for gid := range config.Groups {
//log.Println("shard is", i, "group id is", gid)
config.Shards[i] = gid
i++
}
}
groupList := make([]int, 0)
for gid := range config.Groups {
groupList = append(groupList, gid)
}
// add left over shards
for j:=NShards-leftOver; j<NShards && len(groupList) > 0; j++ {
nextGroup := (j % numOfGroup)
config.Shards[j] = groupList[nextGroup]
}
DPrintf("RebalanceNolock result %+v\n", config.Shards)
}
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Join"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterJoinNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Join Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Leave"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterLeaveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Leave Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
// Your code here.
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Move"
dup, ok := sm.GetDuplicate(args.ClientId, methodName)
if ok && (dup == args.RequestId) {
reply.Err = OK
return
}
sm.Lock()
newConfig := sm.AppendConfigAfterMoveNolock(args)
ops := Op {
Method: methodName,
Config: newConfig,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Err = OK
DPrintf("Move Success: args:%v\n", args)
return
}
}
func (sm *ShardMaster) getConfigNolock(index int) Config {
var config Config
config.Groups = map[int][]string{}
if (index < 0) || (index >sm.commitIndex) {
Clone(&sm.configs[sm.commitIndex], &config)
} else {
Clone(&sm.configs[index], &config)
}
return config
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
// Your code here.
DPrintf("Query request: args:%v\n", args)
defer DPrintf("Query response: reply:%v\n", reply)
_, isLeader := sm.rf.GetState()
if !isLeader {
reply.WrongLeader = true
return
}
methodName := "Query"
sm.Lock()
theCareConfig := sm.getConfigNolock(args.Num)
ops := Op {
Method: methodName,
Config: nil,
RequestId: args.RequestId,
ClientId: args.ClientId,
}
sm.Unlock()
index, term, isLeader := sm.rf.Start(ops)
if !isLeader {
reply.WrongLeader = true
return
}
success := sm.await(index, term, ops)
if !success {
reply.WrongLeader = true
return
} else {
reply.Config = theCareConfig
reply.Err = OK
return
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
func (sm *ShardMaster) await(index int, term int, op Op) (success bool) {
awaitChan := sm.registerIndexHandler(index)
for {
select {
case message := <-awaitChan:
if sm.RaftBecomeFollower(&message) {
return false
}
if (message.CommandValid == true) &&
(index == message.CommandIndex) {
return (term == message.CommandTerm)
}
// continue
}
}
}
func Max(x, y int) int {
if x > y {
return x
}
return y
}
func (sm *ShardMaster) OnApplyEntry(m *raft.ApplyMsg) {
ops := m.Command.(Op)
dup, ok := sm.GetDuplicate(ops.ClientId, ops.Method)
sm.Lock()
defer sm.Unlock()
if !ok || (dup != ops.RequestId) {
switch ops.Method {
case "Leave":
fallthrough
case "Join":
fallthrough
case "Move":
if len(ops.Config) > len(sm.configs) {
// follower
sm.configs = ops.Config
}
sm.commitIndex = Max(sm.commitIndex, len(ops.Config) - 1)
sm.SetDuplicateNolock(ops.ClientId, ops.Method, ops.RequestId)
case "Query":
// nothing
}
}
ch, ok := sm.requestHandlers[m.CommandIndex]
if ok {
delete(sm.requestHandlers, m.CommandIndex)
ch <- *m
}
}
func (sm *ShardMaster) RaftBecomeFollower(m *raft.ApplyMsg) bool {
return (m.CommandValid == false) &&
(m.Type == raft.MsgTypeRole) &&
(m.Role == raft.RoleFollower)
}
func (sm *ShardMaster) OnRoleNotify(m *raft.ApplyMsg) {
sm.Lock()
defer sm.Unlock()
if sm.RaftBecomeFollower(m) {
for index, ch := range sm.requestHandlers {
delete(sm.requestHandlers, index)
ch <- *m
}
}
}
func (sm *ShardMaster) receivingApplyMsg() {
for {
select {
case m := <-sm.applyCh:
if m.CommandValid {
DPrintf("receivingApplyMsg receive entry message. %+v.", m)
sm.OnApplyEntry(&m)
DPrintf("new configs after apply. %+v.", sm.configs)
} else if(m.Type == raft.MsgTypeKill) {
DPrintf("receivingApplyMsg receive kill message. %+v.", m)
return
} else if(m.Type == raft.MsgTypeRole) {
//DPrintf("receivingApplyMsg receive role message. %+v.", m)
sm.OnRoleNotify(&m)
}
}
}
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.duplicate = make(map[int64]map[string]int64)
sm.requestHandlers = make(map[int]chan raft.ApplyMsg)
sm.commitIndex = 0
go sm.receivingApplyMsg()
return sm
}
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
func main() {
r := mux.NewRouter()
r.HandleFunc("/api/place/board-bitmap", bitmapHandler)
http.ListenAndServe(":4040", handlers.LoggingHandler(os.Stdout, r))
}
func bitmapHandler(w http.ResponseWriter, req *http.Request) {
client := &http.Client{}
w.Header().Add("Access-Control-Allow-Origin", "*")
request, _ := http.NewRequest("GET",
"https://www.reddit.com/api/place/board-bitmap", nil)
resp, err := client.Do(request)
if err != nil {
panic(err)
}
bodyBytes, _ := ioutil.ReadAll(resp.Body)
fmt.Println(bodyBytes[4] >> 4)
fmt.Println(bodyBytes[4] % 16)
w.Write(bodyBytes)
}
|
package pydict
import (
"fmt"
"io/ioutil"
"testing"
)
var _ = fmt.Println
func mustReadFile(p string) string {
b, err := ioutil.ReadFile(p)
if err != nil {
panic(err)
}
return string(b)
}
func TestLex(t *testing.T) {
expect := []string{
"{", "foo", ":", "bar", ",",
"[", "a", ",", "b", ",", "c", "]", ",", "}",
}
l := lex(mustReadFile("test1.input"))
for _, v := range expect {
item := l.nextItem()
if item.iType == itemError {
t.Error("error occurred")
}
if v != item.value {
t.Errorf("got %s but want %s", item.value, v)
}
}
if l.nextItem().iType != itemEOF {
t.Error("expect EOF")
}
}
|
// Copyright 2021 Kuei-chun Chen. All rights reserved.
package keyhole
import (
"github.com/simagix/keyhole/mdb"
"go.mongodb.org/mongo-driver/mongo"
)
// MonitorWiredTigerCache monitor wiredTiger cache
func MonitorWiredTigerCache(version string, client *mongo.Client) {
wtc := mdb.NewWiredTigerCache(version)
wtc.Start(client)
}
|
package dht
type Router struct {
local ID
k int
buckets [BucketSize]Peers
}
func NewRouter(local ID, k int) (*Router, error) {
r := new(Router)
r.local = local
r.k = k
for i := range r.buckets {
r.buckets[i] = Peers{}
}
return r, nil
}
func (r *Router) bucketIndex(id ID) int {
var bi int
if r.local == id {
bi = len(r.buckets) - 1
} else {
bi = len(r.buckets) - r.local.Xor(id).BitLen()
}
return bi
}
func (r *Router) update(peer *Peer) {
if r.local == peer.ID {
return
}
bi := r.bucketIndex(peer.ID)
bucket := r.buckets[bi]
if len(bucket) == r.k {
return
}
bucket[peer.ID] = peer
return
}
func (r *Router) remove(id ID) {
bi := r.bucketIndex(id)
bucket := r.buckets[bi]
delete(bucket, id)
}
func (r *Router) near(target ID, k int) []*Peer {
result := make([]*Peer, 0, k)
bi := r.bucketIndex(target)
for i := bi; i < len(r.buckets); i++ {
if len(result) >= k {
break
}
bucket := r.buckets[i]
result = append(result, bucket.List()...)
}
for i := bi - 1; i >= 0; i-- {
if len(result) >= k {
break
}
bucket := r.buckets[i]
result = append(result, bucket.List()...)
}
result = NewPeers(result).Sort(target)
if len(result) > k {
result = result[:k]
}
return result
}
func (r *Router) length() int {
length := 0
for _, bucket := range r.buckets {
length = length + len(bucket)
}
return length
}
func (r *Router) each(fn func(peer *Peer) bool) bool {
for _, bucket := range r.buckets {
for _, peer := range bucket {
success := fn(peer)
if success == false {
return false
}
}
}
return true
}
func (r *Router) list() []*Peer {
result := []*Peer{}
for _, bucket := range r.buckets {
result = append(result, bucket.List()...)
}
return result
}
|
package main
import (
"github.com/davecgh/go-spew/spew"
)
// 108. 将有序数组转换为二叉搜索树
// 将一个按照升序排列的有序数组,转换为一棵高度平衡二叉搜索树。
// 本题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。
// 链接:https://leetcode-cn.com/problems/convert-sorted-array-to-binary-search-tree
func main() {
spew.Dump(sortedArrayToBST2([]int{-10, -3, 0, 5, 9}))
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
// 法一:递归
// 时间复杂度O(n)
// 空间复杂度O(logn)
func sortedArrayToBST(nums []int) *TreeNode {
if len(nums) == 0 {
return nil
}
mid := len(nums) >> 1
tree := &TreeNode{
Val: nums[mid],
Left: sortedArrayToBST(nums[:mid]),
Right: sortedArrayToBST(nums[mid+1:]),
}
return tree
}
// 法二:栈+循环
type MyNode struct {
parent *TreeNode
left, right int // 用于构建子树的数组开始下标和结束下标
isLeft bool
}
func newMyNode(parent *TreeNode, left, right int, isLeft bool) *MyNode {
return &MyNode{parent, left, right, isLeft}
}
func sortedArrayToBST2(nums []int) *TreeNode {
n := len(nums)
if n == 0 {
return nil
} else if n == 1 {
return &TreeNode{Val: nums[0]}
}
mid := n >> 1
root := &TreeNode{}
stack := []*MyNode{
newMyNode(root, 0, n-1, true),
}
for len(stack) > 0 {
last := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if last.left <= last.right {
mid = last.left + ((last.right - last.left) >> 1)
tree := &TreeNode{Val: nums[mid]}
if last.isLeft {
last.parent.Left = tree
} else {
last.parent.Right = tree
}
stack = append(stack, newMyNode(tree, last.left, mid-1, true))
stack = append(stack, newMyNode(tree, mid+1, last.right, false))
}
}
return root.Left
}
|
package agenda
import (
errors "convention/agendaerror"
"entity"
"model"
"time"
log "util/logger"
)
type Username = entity.Username
type Auth = entity.Auth
type UserInfo = entity.UserInfo
type UserInfoPublic = entity.UserInfoPublic
type User = entity.User
type MeetingInfo = entity.MeetingInfo
type Meeting = entity.Meeting
type MeetingTitle = entity.MeetingTitle
func MakeUserInfo(username Username, password Auth, email, phone string) UserInfo {
info := UserInfo{}
info.Name = username
info.Auth = password
info.Mail = email
info.Phone = phone
return info
}
func MakeMeetingInfo(title MeetingTitle, sponsor Username, participators []Username, startTime, endTime time.Time) MeetingInfo {
info := MeetingInfo{}
info.Title = title
info.Sponsor = sponsor.RefInAllUsers()
info.Participators.InitFrom(participators)
info.StartTime = startTime
info.EndTime = endTime
return info
}
func LoadAll() {
model.Load()
LoadLoginStatus()
}
func SaveAll() {
if err := model.Save(); err != nil {
log.Error(err)
}
SaveLoginStatus()
}
// NOTE: Now, assume the operations' actor are always the `Current User`
// RegisterUser ...
func RegisterUser(uInfo UserInfo) error {
if !uInfo.Name.Valid() {
return errors.ErrInvalidUsername
}
u := entity.NewUser(uInfo)
err := entity.GetAllUsersRegistered().Add(u)
return err
}
func LogIn(name Username, auth Auth) error {
u := name.RefInAllUsers()
if u == nil {
return errors.ErrNilUser
}
log.Printf("User %v logs in.\n", name)
if LoginedUser() != nil {
return errors.ErrLoginedUserAuthority
}
if verified := u.Auth.Verify(auth); !verified {
return errors.ErrFailedAuth
}
loginedUser = name
return nil
}
// LogOut log out User's own (current working) account
// TODO:
func LogOut(name Username) error {
u := name.RefInAllUsers()
// check if under login status, TODO: check the login status
if logined := LoginedUser(); logined == nil {
return errors.ErrUserNotLogined
} else if logined != u {
return errors.ErrUserAuthority
}
err := u.LogOut()
if err != nil {
log.Errorf("Failed to log out, error: %q.\n", err.Error())
}
loginedUser = ""
return err
}
// QueryAccountAll queries all accounts
func QueryAccountAll() []UserInfoPublic {
// NOTE: FIXME: whatever, temporarily ignore the problem that the actor of query is Nil
// Hence, now if so, agenda would crash for `Nil.Name`
ret := LoginedUser().QueryAccountAll()
return ret
}
// CancelAccount cancels(deletes) LoginedUser's account
func CancelAccount() error {
u := LoginedUser()
if u == nil {
return errors.ErrUserNotLogined
}
if err := entity.GetAllMeetings().ForEach(func(m *Meeting) error {
if m.SponsoredBy(u.Name) {
return m.Dissolve()
}
if m.ContainsParticipator(u.Name) {
return m.Exclude(u)
}
return nil
}); err != nil {
log.Error(err)
}
if err := entity.GetAllUsersRegistered().Remove(u); err != nil {
log.Error(err)
}
if err := u.LogOut(); err != nil {
log.Error(err)
}
err := u.CancelAccount()
return err
}
// SponsorMeeting creates a meeting
func SponsorMeeting(mInfo MeetingInfo) (*Meeting, error) {
u := LoginedUser()
if u == nil {
return nil, errors.ErrUserNotLogined
}
info := mInfo
if !info.Title.Valid() {
return nil, errors.ErrInvalidMeetingTitle
}
// NOTE: dev-assert
if info.Sponsor == nil {
return nil, errors.ErrNilSponsor
} else if info.Sponsor.Name != LoginedUser().Name {
log.Fatalf("User %v is creating a meeting with Sponsor %v\n", LoginedUser().Name, info.Sponsor.Name)
}
// NOTE: repeat in MeetingList.Add ... DEL ?
if info.Title.RefInAllMeetings() != nil {
return nil, errors.ErrExistedMeetingTitle
}
// if !LoginedUser().Registered() { return nil, errors.ErrUserNotRegistered }
if err := info.Participators.ForEach(func(u *User) error {
if !u.Registered() {
return errors.ErrUserNotRegistered
}
return nil
}); err != nil {
log.Error(err)
return nil, err
}
if !info.EndTime.After(info.StartTime) {
return nil, errors.ErrInvalidTimeInterval
}
if err := info.Participators.ForEach(func(u *User) error {
if !u.FreeWhen(info.StartTime, info.EndTime) {
return errors.ErrConflictedTimeInterval
}
return nil
}); err != nil {
log.Error(err)
return nil, err
}
m, err := LoginedUser().SponsorMeeting(info)
if err != nil {
log.Errorf("Failed to sponsor meeting, error: %q.\n", err.Error())
}
return m, err
}
// AddParticipatorToMeeting ...
func AddParticipatorToMeeting(title MeetingTitle, name Username) error {
u := LoginedUser()
// check if under login status, TODO: check the login status
if u == nil {
return errors.ErrUserNotLogined
}
meeting, user := title.RefInAllMeetings(), name.RefInAllUsers()
if meeting == nil {
return errors.ErrNilMeeting
}
if user == nil {
return errors.ErrNilUser
}
if !meeting.SponsoredBy(u.Name) {
return errors.ErrSponsorAuthority
}
if meeting.ContainsParticipator(name) {
return errors.ErrExistedUser
}
if !user.FreeWhen(meeting.StartTime, meeting.EndTime) {
return errors.ErrConflictedTimeInterval
}
err := u.AddParticipatorToMeeting(meeting, user)
if err != nil {
log.Errorf("Failed to add participator into Meeting, error: %q.\n", err.Error())
}
return err
}
// RemoveParticipatorFromMeeting ...
func RemoveParticipatorFromMeeting(title MeetingTitle, name Username) error {
u := LoginedUser()
// check if under login status, TODO: check the login status
if u == nil {
return errors.ErrUserNotLogined
}
meeting, user := title.RefInAllMeetings(), name.RefInAllUsers()
if meeting == nil {
return errors.ErrMeetingNotFound
}
if user == nil {
return errors.ErrUserNotRegistered
}
if !meeting.SponsoredBy(u.Name) {
return errors.ErrSponsorAuthority
}
if !meeting.ContainsParticipator(name) {
return errors.ErrUserNotFound
}
err := u.RemoveParticipatorFromMeeting(meeting, user)
if err != nil {
log.Errorf("Failed to remove participator from Meeting, error: %q.\n", err.Error())
}
return err
}
func QueryMeetingByInterval(start, end time.Time, name Username) entity.MeetingInfoListPrintable {
// NOTE: FIXME: whatever, temporarily ignore the problem that the actor of query is Nil
// Hence, now if so, agenda would crash for `Nil.Name`
ret := LoginedUser().QueryMeetingByInterval(start, end)
return ret
}
// CancelMeeting cancels(deletes) the given meeting which sponsored by LoginedUser self
func CancelMeeting(title MeetingTitle) error {
u := LoginedUser()
// check if under login status, TODO: check the login status
if u == nil {
return errors.ErrUserNotLogined
}
meeting := title.RefInAllMeetings()
if meeting == nil {
return errors.ErrMeetingNotFound
}
if !meeting.SponsoredBy(u.Name) {
return errors.ErrSponsorAuthority
}
err := u.CancelMeeting(meeting)
if err != nil {
log.Errorf("Failed to cancel Meeting, error: %q.\n", err.Error())
}
return err
}
// QuitMeeting let LoginedUser quit the given meeting
func QuitMeeting(title MeetingTitle) error {
u := LoginedUser()
// check if under login status, TODO: check the login status
if u == nil {
return errors.ErrUserNotLogined
}
meeting := title.RefInAllMeetings()
if meeting == nil {
return errors.ErrMeetingNotFound
}
// CHECK: what to do in case User is exactly the sponsor ?
// for now, refuse that
if meeting.SponsoredBy(u.Name) {
return errors.ErrSponsorResponsibility
}
if !meeting.ContainsParticipator(u.Name) {
return errors.ErrUserNotFound
}
err := u.QuitMeeting(meeting)
if err != nil {
log.Errorf("Failed to quit Meeting, error: %q.\n", err.Error())
}
return err
}
// ClearAllMeeting cancels all meeting sponsored by LoginedUser
func ClearAllMeeting() error {
u := LoginedUser()
// check if under login status, TODO: check the login status
if u == nil {
return errors.ErrUserNotLogined
}
if err := entity.GetAllMeetings().ForEach(func(m *Meeting) error {
if m.SponsoredBy(u.Name) {
return CancelMeeting(m.Title)
}
return nil
}); err != nil {
log.Errorf("Failed to clear all Meetings, error: %q.\n", err.Error())
return err
}
return nil
}
|
package query
import (
"github.com/juju/errgo"
// "github.com/mezis/klask/index"
"strings"
)
// A generic query, which can combine $and, $or, field filters, and a $by
// clause. They will be run in an unspecified order, except the optional $by clause
// which is run last.
// Represented by a JSON object.
type query_generic_t struct {
// queries []Query
queries *query_and_t
order *query_order_t
}
func (self *query_generic_t) parse(parsed interface{}) error {
var (
err error = nil
order *query_order_t = nil
limit uint = 0
offset uint = 0
queries []Query = make([]Query, 0)
node map[string]interface{} = nil
)
switch n := parsed.(type) {
case map[string]interface{}:
node = n
default:
return errgo.Newf("unexpected node of type %T (%v)", node, node)
}
for key, subnode := range node {
switch {
case key == "$or":
q := new(query_or_t)
queries = append(queries, q)
err = q.parse(subnode)
case key == "$and":
q := new(query_and_t)
queries = append(queries, q)
err = q.parse(subnode)
case key == "$by":
order := new(query_order_t)
err = order.parse(subnode)
case key == "$limit":
limit, err = self.parseInt(subnode)
case key == "$offset":
offset, err = self.parseInt(subnode)
case strings.HasPrefix(key, "$"):
err = errgo.Newf("unknown subquery type '%s'", key)
default:
q := new(query_field_t)
queries = append(queries, q)
err = q.parse(key, subnode)
}
if err != nil {
return errgo.Mask(err)
}
}
// create the and query
if len(queries) > 0 {
q := new(query_and_t)
q.queries = queries
self.queries = q
}
// the order query, if any, should be last
if order == nil && (limit != 0 || offset != 0) {
return errgo.New("cannot have $limit or $offset without $by")
}
if order != nil {
order.limit = limit
order.offset = offset
self.order = order
}
return nil
}
func (self *query_generic_t) parseInt(val interface{}) (uint, error) {
switch v := val.(type) {
case float64:
if v < 0 {
return 0, errgo.Newf("unexpected negative value '%v'", v)
}
return uint(v), nil
default:
return 0, errgo.Newf("expected positive int, got %T '%v'", v, v)
}
}
func (self *query_generic_t) Run(records string, ctx Context) (string, error) {
var (
err error = nil
unsorted string = ""
sorted string = ""
)
if self.queries != nil {
unsorted, err = self.queries.Run(records, ctx)
if err != nil {
return "", errgo.Mask(err)
}
} else {
unsorted = records
}
if self.order != nil {
sorted, err = self.order.Run(unsorted, ctx)
if err != nil {
return "", errgo.Mask(err)
}
} else {
sorted = unsorted
}
return sorted, nil
}
|
package opentrace
import (
"net/http"
"net/http/httptest"
"testing"
"errors"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/mocktracer"
)
func TestTransport_RoundTripper(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
tracer := mocktracer.New()
span := tracer.StartSpan("root")
client := &http.Client{
Transport: NewTransport().
WithTracer(tracer).
RoundTripper(http.DefaultTransport),
}
request, _ := http.NewRequest(http.MethodGet, server.URL, nil)
request.Header.Set("Authorization", "Bearer")
request = request.WithContext(
opentracing.ContextWithSpan(request.Context(), span),
)
if _, err := client.Do(request); err != nil {
t.Error(err)
}
span.Finish()
// Headers have not been changed
if len(request.Header) > 1 && request.Header.Get("Authorization") != "Bearer" {
t.Errorf("request headers has been changed")
}
if count := len(tracer.FinishedSpans()); count != 1 {
t.Errorf("number of finished spans '%d', expected 1", count)
}
finishedSpan := tracer.FinishedSpans()[0]
if finishedSpan.OperationName != "root" {
t.Errorf("operation name not equal: expected '%s', actual '%s'",
"root", finishedSpan.OperationName)
}
if finishedSpan.FinishTime.IsZero() {
t.Error("Finish time is zero")
}
}
func TestTransport_RoundTripper_WithGlobalTracer(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
tracer := mocktracer.New()
opentracing.SetGlobalTracer(tracer)
span := tracer.StartSpan("root")
client := &http.Client{
Transport: NewTransport().RoundTripper(http.DefaultTransport),
}
request, _ := http.NewRequest(http.MethodGet, server.URL, nil)
request = request.WithContext(
opentracing.ContextWithSpan(request.Context(), span),
)
if _, err := client.Do(request); err != nil {
t.Error(err)
}
span.Finish()
if count := len(tracer.FinishedSpans()); count != 1 {
t.Errorf("number of finished spans '%d', expected 1", count)
}
}
func TestTransport_RoundTripper_SpannerReturnsNil(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
tracer := mocktracer.New()
client := &http.Client{
Transport: NewTransport().
WithTracer(tracer).
RoundTripper(http.DefaultTransport),
}
request, _ := http.NewRequest(http.MethodGet, server.URL, nil)
request.Header.Set("Authorization", "Bearer")
if _, err := client.Do(request); err != nil {
t.Error(err)
}
}
func TestTransport_RoundTripper_InjectorError_InterruptOnError(t *testing.T) {
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
tracer := mocktracer.New()
span := tracer.StartSpan("root")
client := &http.Client{
Transport: NewTransport().
WithTracer(tracer).
WithInterruptOnError(true).
WithInjector(InjectorFn(func(tracer opentracing.Tracer, ctx opentracing.SpanContext, r **http.Request) error {
return errors.New("internal error")
})).
RoundTripper(http.DefaultTransport),
}
request, _ := http.NewRequest(http.MethodGet, server.URL, nil)
request = request.WithContext(
opentracing.ContextWithSpan(request.Context(), span),
)
if _, err := client.Do(request); err == nil {
t.Error("error can not be nil")
}
}
func TestTransport_WithSpanner(t *testing.T) {
transport := NewTransport()
spanner := transport.spanner
spanner2 := transport.WithSpanner(new(CreatorSpanner)).spanner
if spanner == spanner2 {
t.Error("Span hasn't changed")
}
}
|
package commands
import (
"flag"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"syscall"
)
var UNDAEMONIZE = Undaemonize{
workdir: "/var/uhppoted",
logdir: "/var/log/uhppoted",
config: "/etc/uhppoted/uhppoted.conf",
}
type Undaemonize struct {
workdir string
logdir string
config string
}
func (cmd *Undaemonize) Name() string {
return "undaemonize"
}
func (cmd *Undaemonize) FlagSet() *flag.FlagSet {
return flag.NewFlagSet("undaemonize", flag.ExitOnError)
}
func (cmd *Undaemonize) Description() string {
return fmt.Sprintf("Deregisters %s as a service/daemon", SERVICE)
}
func (cmd *Undaemonize) Usage() string {
return ""
}
func (cmd *Undaemonize) Help() {
fmt.Println()
fmt.Printf(" Usage: %s undaemonize\n", SERVICE)
fmt.Println()
fmt.Printf(" Deregisters %s from launchd as a service/daemon", SERVICE)
fmt.Println()
helpOptions(cmd.FlagSet())
}
func (cmd *Undaemonize) Execute(args ...interface{}) error {
fmt.Println(" ... undaemonizing")
if err := cmd.systemd(); err != nil {
return err
}
if err := cmd.logrotate(); err != nil {
return err
}
if err := cmd.clean(); err != nil {
return err
}
fmt.Printf(" ... %s unregistered as a systemd service\n", SERVICE)
fmt.Printf(`
NOTE: Configuration files in %s,
working files in %s,
and log files in %s
were not removed and should be deleted manually
`, filepath.Dir(cmd.config), cmd.workdir, cmd.logdir)
fmt.Println()
return nil
}
func (cmd *Undaemonize) systemd() error {
path := filepath.Join("/etc/systemd/system", fmt.Sprintf("%s.service", SERVICE))
_, err := os.Stat(path)
if err != nil && !os.IsNotExist(err) {
return err
}
if os.IsNotExist(err) {
fmt.Printf(" ... nothing to do for 'systemd' (%s does not exist)\n", path)
return nil
}
fmt.Printf(" ... stopping %s service\n", SERVICE)
command := exec.Command("systemctl", "stop", SERVICE)
out, err := command.CombinedOutput()
if strings.TrimSpace(string(out)) != "" {
fmt.Printf(" > %s\n", out)
}
if err != nil {
return fmt.Errorf("failed to stop '%s' (%v)", SERVICE, err)
}
fmt.Printf(" ... removing '%s'\n", path)
err = os.Remove(path)
if err != nil {
return err
}
return nil
}
func (cmd *Undaemonize) logrotate() error {
path := filepath.Join("/etc/logrotate.d", SERVICE)
fmt.Printf(" ... removing '%s'\n", path)
err := os.Remove(path)
if err != nil {
return err
}
return nil
}
func (cmd *Undaemonize) clean() error {
files := []string{
filepath.Join(cmd.workdir, fmt.Sprintf("%s.pid", SERVICE)),
}
directories := []string{
cmd.logdir,
cmd.workdir,
}
for _, f := range files {
fmt.Printf(" ... removing '%s'\n", f)
if err := os.Remove(f); err != nil && !os.IsNotExist(err) {
return err
}
}
for _, dir := range directories {
fmt.Printf(" ... removing '%s'\n", dir)
if err := os.Remove(dir); err != nil && !os.IsNotExist(err) {
patherr, ok := err.(*os.PathError)
if !ok {
return err
}
syserr, ok := patherr.Err.(syscall.Errno)
if !ok {
return err
}
if syserr != syscall.ENOTEMPTY {
return err
}
fmt.Printf(" ... WARNING: could not remove directory '%s' (%v)\n", dir, syserr)
}
}
return nil
}
|
package add
import "testing"
func TestAdd(t *testing.T) {
cases := []struct {
I int
J int
Expect int
}{
// tc1
{
I: 2, J: 3,
Expect: 5,
},
// tc2
{
I: 3, J: 4,
Expect: 7,
},
}
for _, tc := range cases {
actual := Add(tc.I, tc.J)
t.Errorf("expect: hoge, actual: %s", "foo")
if actual != tc.Expect {
t.Errorf("expect: %d, actual: %d", tc.Expect, actual)
}
}
}
|
package main
import (
"os"
"net"
"fmt"
"time"
"runtime"
"bufio"
"github.com/puslip41/GoStudy/third"
)
const MINUTE_FORMAT = "200601021504"
const SECOND_FORMAT = "20060102150405"
const UDP_READ_BUFFER_SIZE = 1024*1024*10
const WRITE_BUFFER_SIZE = 1024
func main() {
port, savePath := getSyslogReceiverArgs()
listener, err := openUdpListener(port)
if err != nil {
PrintError(err, "cannot open udp port")
} else {
defer listener.Close()
count := 0
beforeTime := time.Now().Format(MINUTE_FORMAT)
buffer := make([]byte, 1024)
var logWriter *third.LogWriter
openLogFile := func (fileTime string) { // file close & open
if logWriter != nil {
logWriter.Close()
}
logWriter, err = openNewLogWriter(savePath, fileTime)
if err != nil {
PrintError(err, "cannot open log file")
}
}
openLogFile(beforeTime)
go func() { // print receive + write log count per second
lastCheckSecond := time.Now().Format(SECOND_FORMAT)
for {
currentCheckSecond := time.Now()
if lastCheckSecond != currentCheckSecond.Format(SECOND_FORMAT) {
lastCheckSecond = currentCheckSecond.Format(SECOND_FORMAT)
fmt.Printf("%s receive count : %d%s", currentCheckSecond.Format("2006/01/02 15:04:05"), count, getNewLineSymbol())
count = 0
} else {
time.Sleep(1)
}
}
}()
for {
length, saddr, err := listener.ReadFromUDP(buffer)
PrintError(err, "cannot receive message")
currentTime := time.Now()
if beforeTime != currentTime.Format(MINUTE_FORMAT) { // change minute log file
beforeTime = currentTime.Format(MINUTE_FORMAT)
openLogFile(beforeTime)
}
logWriter.WriteFormat(`%s|%s|%s%s`, currentTime.Format(SECOND_FORMAT), saddr.IP.String(), buffer[:length], getNewLineSymbol() )
count++
}
}
fmt.Println(savePath)
}
func PrintError(e error, message string) {
if e != nil {
fmt.Printf("ERROR: %s%s%s", message, getNewLineSymbol(), e.Error())
}
}
func getNewLineSymbol() string {
if runtime.GOOS == "windows" {
return "\r\n"
} else {
return "\n"
}
}
func openNewLogWriter(saveDirectory, fileName string) (*third.LogWriter, error ) {
newLogFileName := fmt.Sprintf("%s\\%s.log", saveDirectory, fileName)
file, err := os.OpenFile(newLogFileName, os.O_APPEND|os.O_CREATE, 0660)
if err != nil {
return nil, err
}
writer := bufio.NewWriterSize(file, WRITE_BUFFER_SIZE)
return &(third.LogWriter{File: file, Writer: writer}), nil
}
func getSyslogReceiverArgs() (port string, savePath string) {
if len(os.Args) > 1 {
port = os.Args[1]
} else {
port = "514"
}
if len(os.Args) > 2 {
savePath = os.Args[2]
} else {
savePath, _ = os.Getwd()
}
return
}
func openUdpListener(port string) (*net.UDPConn, error) {
addr, err := net.ResolveUDPAddr("udp", ":"+port)
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
err = conn.SetReadBuffer(UDP_READ_BUFFER_SIZE)
if err != nil {
PrintError(err, "cannot setup udp read buffer size")
}
return conn, nil
} |
package zmq4_test
import (
zmq "github.com/pebbe/zmq4"
"fmt"
"time"
)
func rep_socket_monitor(addr string) {
s, err := zmq.NewSocket(zmq.PAIR)
if checkErr(err) {
return
}
defer s.Close()
err = s.Connect(addr)
if checkErr(err) {
return
}
for {
a, b, _, err := s.RecvEvent(0)
if checkErr(err) {
break
}
fmt.Println(a, b)
if a == zmq.EVENT_CLOSED {
break
}
}
}
func Example_socket_event() {
// REP socket
rep, err := zmq.NewSocket(zmq.REP)
if checkErr(err) {
return
}
// REP socket monitor, all events
err = rep.Monitor("inproc://monitor.rep", zmq.EVENT_ALL)
if checkErr(err) {
rep.Close()
return
}
go rep_socket_monitor("inproc://monitor.rep")
time.Sleep(time.Second)
// Generate an event
rep.Bind("tcp://*:9689")
if checkErr(err) {
rep.Close()
return
}
rep.Close()
// Allow some time for event detection
time.Sleep(time.Second)
fmt.Println("Done")
// Output:
// EVENT_LISTENING tcp://0.0.0.0:9689
// EVENT_CLOSED tcp://0.0.0.0:9689
// Done
}
|
package lsof
import (
"encoding/hex"
"fmt"
)
func getTCPConnections() {
}
func hexIPToDecimal(ipHex string) string {
a, _ := hex.DecodeString(ipHex)
s := fmt.Sprintf("%v.%v.%v.%v", a[3], a[2], a[1], a[0])
return s
}
func hexPortToDecimal(portHex string) string {
a, _ := hex.DecodeString(portHex)
r := int(a[0])*256 + int(a[1])
return fmt.Sprintf("%d", r)
}
|
package slovnik
import "strings"
// Language of the input string
type Language int
const (
// Ru represents Russian language
Ru Language = iota
// Cz represents Czech language
Cz
)
// Russian alphabet
const rusSymbols = "абвгдеёжзийклмнопрстуфхцчшщьыъэюя"
// DetectLanguage used to find out which language is used for the input string
func DetectLanguage(input string) Language {
for _, ch := range input {
if strings.Contains(rusSymbols, strings.ToLower(string(ch))) {
return Ru
}
}
return Cz
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package casetest
import (
"testing"
"github.com/pingcap/tidb/testkit"
"github.com/pingcap/tidb/testkit/testdata"
)
type Input []string
func TestRemoveRedundantPredicates(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, f int)")
var input Input
var output []struct {
SQL string
Plan []string
}
suiteData := GetPredicateSimplificationTestData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
}
}
func TestInListAndNotEqualSimplification(t *testing.T) {
store := testkit.CreateMockStore(t)
tk := testkit.NewTestKit(t, store)
tk.MustExec("use test")
tk.MustExec("drop table if exists t")
tk.MustExec("create table t(a int, f int)")
tk.MustExec("drop table if exists ts")
tk.MustExec("create table ts(a char(10), f char(10))")
var input Input
var output []struct {
SQL string
Plan []string
}
suiteData := GetPredicateSimplificationTestData()
suiteData.LoadTestCases(t, &input, &output)
for i, sql := range input {
plan := tk.MustQuery("explain format = 'brief' " + sql)
testdata.OnRecord(func() {
output[i].SQL = sql
output[i].Plan = testdata.ConvertRowsToStrings(plan.Rows())
})
plan.Check(testkit.Rows(output[i].Plan...))
}
}
|
package response
import (
"time"
"github.com/agusbasari29/Skilltest-RSP-Akselerasi-2-Backend-Agus-Basari/entity"
"gorm.io/gorm"
)
type ResponseUserData struct {
User interface{} `json:"user_data"`
Credential interface{} `json:"credential"`
}
type ResponseUser struct {
ID uint `json:"id"`
Username string `json:"username"`
Fullname string `json:"fullname"`
Email string `json:"email"`
Role entity.UserRole `json:"role"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt gorm.DeletedAt `json:"deleted_at"`
UserCredential interface{} `json:"user_credential"`
}
func ResponseUserFormatter(user entity.Users) ResponseUser {
formatter := ResponseUser{}
formatter.ID = user.ID
formatter.Username = user.Username
formatter.Fullname = user.Fullname
formatter.Email = user.Email
formatter.Role = user.Role
formatter.CreatedAt = user.CreatedAt
formatter.UpdatedAt = user.UpdatedAt
formatter.DeletedAt = user.DeletedAt
return formatter
}
func ResponseUserDataFormatter(user interface{}, credential interface{}) ResponseUserData {
userData := ResponseUserData{
User: user,
Credential: credential,
}
return userData
}
|
package main
// import (
// "io/fs"
// "log"
// "os"
// )
func main() {
// if len(os.Args) <= 1 || os.Args[1] == "" {
// log.Fatalln("no given file")
// }
// var (
// a fs.FileInfo
// err error
// )
// if a, err = os.Stat(os.Args[1]); err != nil {
// log.Fatalln(err)
// }
// log.Println(a.Name())
}
|
package mongodb
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"sync"
"time"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
// DB struct of monge database
type DB struct {
client *mongo.Client
db *mongo.Database
}
var instance *DB
var once sync.Once
const defautlURL string = "mongodb://localhost:27017"
// GetDB get unique database pointer
func GetDB() *DB {
once.Do(func() {
instance = initDB(defautlURL)
})
return instance
}
// Connect to default database
func (d *DB) Connect() error {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
err := d.client.Connect(ctx)
if err != nil {
return err
}
err = d.client.Ping(context.TODO(), nil)
return err
}
// Initial databas
func initDB(url string) *DB {
const defaultDatabase = "test"
db := new(DB)
var err error
db.client, err = mongo.NewClient(options.Client().ApplyURI(url))
if err != nil {
panic(err)
}
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
err = db.client.Connect(ctx)
if err != nil {
panic(err)
}
err = db.client.Ping(ctx, nil)
if err != nil {
panic(err)
}
db.db = db.client.Database(defaultDatabase)
fmt.Printf("Database %s connected... \n", url)
return db
}
func (d *DB) SaveTagPosition(data bson.M) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
collection := d.db.Collection("tagPosition")
_, err := collection.InsertOne(ctx, data)
//fmt.Println("Insert id:", res.InsertedID)
if err != nil {
log.Println(err)
}
}
// Insert insert data into collection c
func (d *DB) Insert(c string, data bson.M) error {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
collection := d.db.Collection(c)
res, err := collection.InsertOne(ctx, data)
fmt.Println("Insert id:", res.InsertedID)
if err != nil {
log.Println(err)
}
return err
}
func (d *DB) SaveTagInfo(data bson.M) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
collection := d.db.Collection("tagInformation")
_, err := collection.InsertOne(ctx, data)
//fmt.Println("Insert id:", res.InsertedID)
if err != nil {
log.Println(err)
}
}
func (d *DB) read(c string, query bson.M) ([]bson.M, error) {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
defer cancel()
collection := d.db.Collection(c)
cur, err := collection.Find(ctx, query)
var data []bson.M
if err != nil {
log.Println(err)
msg := fmt.Sprintf("collection: %s is not exist!", c)
return nil, errors.New(msg)
}
defer cur.Close(ctx)
for cur.Next(ctx) {
var item bson.M
err := cur.Decode(&item)
if err != nil {
return nil, err
}
data = append(data, item)
}
if len(data) == 0 {
msg := fmt.Sprintf("Query: %s not found result.", c)
return nil, errors.New(msg)
}
return data, nil
}
// Read - db.c.find(query) -> return data "ARRAY" of json format , error
func (d *DB) Read(c string, query bson.M) ([]byte, error) {
fmt.Println("Read query", query)
data, err := d.read(c, query)
if err != nil {
fmt.Println("Read err", err)
return nil, err
}
jsonStr, err := json.Marshal(data)
if err != nil {
fmt.Println("Read json err", err)
return nil, err
}
return jsonStr, err
}
|
/*
The nth term of the sequence of triangle numbers is given by, tn = ½n(n+1); so the first ten triangle numbers are:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
By converting each letter in a word to a number corresponding to its alphabetical position and adding these values we form a word value.
For example, the word value for SKY is 19 + 11 + 25 = 55 = t10. If the word value is a triangle number then we shall call the word a triangle word.
Using words.txt (right click and 'Save Link/Target As...'), a 16K text file containing nearly two-thousand common English words, how many are triangle words?
*/
package main
import (
"encoding/csv"
"fmt"
"os"
)
func main() {
file, err := os.Open("/Users/ben/ProjectEuler/42/words.txt")
if err != nil {
panic(err)
}
reader := csv.NewReader(file)
words, _ := reader.ReadAll()
count := 0
triangleNumbers := getTriangleNumbers(300) // Ten Z's would make a word value of 260
for _, word := range words[0] {
wordValue := 0
for _, letter := range word {
wordValue += int(letter) - 64
}
if _, ok := triangleNumbers[wordValue]; ok {
count++
fmt.Printf("Found triangle word %s\n", word)
}
}
fmt.Println("Number of triangle words", count)
}
func getTriangleNumbers(last int) map[int]bool {
var numbers = map[int]bool{}
for n := 1; n < last; n++ {
numbers[n*(n+1)/2] = true
}
return numbers
}
|
/*
A version control system(VCS) is a repository of files, often the files for the source code of computer programs, with monitored access.
Every change made to the source is tracked, along with who made the change, why they made it, and references to problems fixed, or enhancements introduced, by the change.
Version control systems are essential for any form of distributed, collaborative development.
Whether it is the history of a wiki page or large software development project, the ability to track each change as it was made,
and to reverse changes when necessary can make all the difference between a well managed and controlled process and an uncontrolled ‘first come, first served’ system.
It can also serve as a mechanism for due diligence for software projects.
In this problem we'll consider a simplified model of a development project. Let's suppose, that there are N source files in the project. All the source files are distinct and numbered from 1 to N.
A VCS, that is used for maintaining the project, contains two sequences of source files. The first sequence contains the source files, that are ignored by the VCS.
If a source file is not in the first sequence, then it's considered to be unignored. The second sequence contains the source files, that are tracked by the VCS.
If a source file is not in the second sequence, then it's considered to be untracked. A source file can either be or not be in any of these two sequences.
Your task is to calculate two values: the number of source files of the project, that are both tracked and ignored, and the number of source files of the project, that are both untracked and unignored.
Input
The first line of the input contains an integer T denoting the number of test cases. The description of T test cases follows.
The first line of the test case description contains three integers N, M and K denoting the number of source files in the project, the number of ignored source files and the number of tracked source files.
The second line contains M distinct integers denoting the sequence A of ignored source files. The sequence is strictly increasing.
The third line contains K distinct integers denoting the sequence B of tracked source files. The sequence is strictly increasing.
Output
For each test case, output a single line containing two integers: the number of the source files, that are both tracked and ignored, and the number of the source files, that are both untracked and unignored.
Constraints
1 ≤ T ≤ 100
1 ≤ M, K ≤ N ≤ 100
1 ≤ A1 < A2 < ... < AM ≤ N
1 ≤ B1 < B2 < ... < BK ≤ N
*/
package main
func main() {
test(7, []int{1, 4, 6, 7}, []int{1, 2, 3, 4, 6, 7}, 4, 1)
test(4, []int{1, 4}, []int{3, 4}, 1, 1)
}
func test(n int, a, b []int, rt, ru int) {
t, u := stats(n, a, b)
assert(t == rt)
assert(u == ru)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func stats(n int, a, b []int) (t, u int) {
m := make(map[int]bool)
for _, i := range a {
m[i] = true
}
for _, i := range b {
if m[i] {
t++
}
}
for _, i := range b {
m[i] = true
}
for i := 1; i <= n; i++ {
if !m[i] {
u++
}
}
return
}
|
package services
import (
DB "LivingPointAPI/database/database"
)
func mapConvert(m map[string]string) map[string]interface{} {
mi := map[string]interface{}{}
for k, v := range m {
mi[k] = v
}
return mi
}
func mapToMap(m map[int64]map[string]string) []*DB.Map {
// var out []*DB.Map
out := make([]*DB.Map, len(m))
for k, v := range m {
out[k] = &DB.Map{
Key: k,
Map: v,
}
}
return out
}
|
package znet
import (
"bytes"
"encoding/binary"
"errors"
"zinx/src/zinx/utils"
"zinx/src/zinx/ziface"
)
// 封包 拆包的具体模块
type DataPack struct {
}
// 封包 拆包 实例的一个初始化方法
func NewDataPack() *DataPack {
return &DataPack{}
}
// 获取包的头的长度的方法
func (pD *DataPack)GetHandLen() uint32{
// DataLen uint32(4字节)
// ID uint32(4字节)
return 8
}
// 封包方法 dataLen|dataMsgID|data
func (pD *DataPack)Pack(msg ziface.IMessage )([]byte,error){
// 创建一个存放byte 字节的缓冲
aDataBuff := bytes.NewBuffer([]byte{})
// 将 datalen 写入
if err:=binary.Write(aDataBuff,binary.LittleEndian,msg.GetMsgLen());err != nil {
return nil, err
}
// 将 MsgID 写入
if err:=binary.Write(aDataBuff,binary.LittleEndian,msg.GetMsgID());err != nil {
return nil, err
}
// 将 data 数据写入
if err:=binary.Write(aDataBuff,binary.LittleEndian,msg.GetMsgData());err != nil {
return nil, err
}
return aDataBuff.Bytes(),nil
}
// 拆包方法
// 将包的Head 信息读出来,之后再根据 head 信息里的data 长度,在进行一次读
func (pD *DataPack)Unpack(binaryData []byte)( ziface.IMessage,error){
// 创建一个从输入二进制数据的 ioReader
dataBuff :=bytes.NewReader(binaryData)
// 只解压 head 信息,得到 datalen 和 MsgID
msg := &Message{}
// 读datalen
if err:=binary.Read(dataBuff,binary.LittleEndian,&msg.IDataLen); err != nil {
return nil, err
}
// 读MsgID
if err:=binary.Read(dataBuff,binary.LittleEndian,&msg.IID); err != nil {
return nil, err
}
// 判断datalen 是否已经超出了我们运行的最大包长度
if utils.GlobalObject.IMaxPackageSize >0 &&
msg.IDataLen > utils.GlobalObject.IMaxPackageSize {
return nil, errors.New("too large msg data recv")
}
return msg,nil
}
|
package aggregation
import (
"fmt"
"github.com/emicklei/go-restful"
// . "grm-searcher/dbcentral/pg"
. "titan-statistics/types"
// "grm-service/dbcentral/pg"
// "grm-service/log"
"grm-service/util"
)
var (
volUnit = map[string]string{"K": "KB", "M": "MB", "G": "GB", "T": "TB"}
)
func (svc *AggrSvc) GetAggr(req *restful.Request, res *restful.Response) {
typename := req.PathParameter("type")
if len(typename) == 0 {
util.ResWriteError(res, fmt.Errorf(util.TR("Invalid data type: %s", typename)))
return
}
info, err := svc.DynamicDB.GetTypeAggr(typename)
if err != nil {
util.ResWriteError(res, err)
return
}
util.ResWriteHeaderEntity(res, info)
}
func (svc *AggrSvc) SetAggr(req *restful.Request, res *restful.Response) {
typename := req.PathParameter("type")
if len(typename) == 0 {
util.ResWriteError(res, fmt.Errorf(util.TR("Invalid data type: %s", typename)))
return
}
var data TypeAggr
err := req.ReadEntity(&data)
if err != nil {
util.ResWriteError(res, err)
return
}
if len(data.Aggr) == 0 {
util.ResWriteError(res, fmt.Errorf(util.TR("Invalid input")))
return
}
data.DataType = typename
err = svc.DynamicDB.SetTypeAggr(data)
if err != nil {
util.ResWriteError(res, err)
return
}
}
func (svc *AggrSvc) StatByAggr(req *restful.Request, res *restful.Response) {
typename := req.PathParameter("type")
if len(typename) == 0 {
util.ResWriteError(res, fmt.Errorf(util.TR("Invalid data type: %s", typename)))
return
}
field := req.PathParameter("field")
if len(field) == 0 {
util.ResWriteError(res, fmt.Errorf(util.TR("Invalid field: %s", field)))
return
}
//根据type和filed来对数据库中的数据进行distinct
}
|
package services
import (
"github.com/wbreza/go-store/api/models"
)
var cache = make(map[int]*models.Product)
// ProductManager provides CRUD access to product entities
type ProductManager struct {
}
// NewProductManager creates a new instance of a product manager
func NewProductManager() *ProductManager {
return &ProductManager{}
}
// GetList returns a slice of User entities
func (productManager *ProductManager) GetList() ([]*models.Product, error) {
products := []*models.Product{}
for _, value := range cache {
products = append(products, value)
}
return products, nil
}
// Get returns a product that matches by id
func (productManager *ProductManager) Get(id int) (*models.Product, error) {
return cache[id], nil
}
// Save inserts or updates a product in the data store
func (productManager *ProductManager) Save(product *models.Product) (*models.Product, error) {
if product.ID == 0 {
product.ID = GetNewID()
}
cache[product.ID] = product
return product, nil
}
// Delete removes an entity by id from the data store
func (productManager *ProductManager) Delete(id int) (bool, error) {
product := cache[id]
if product == nil {
return false, nil
}
delete(cache, id)
return true, nil
}
// GetNewID returns the next id for storing stuff
func GetNewID() int {
maxKey := 0
for key := range cache {
if key > maxKey {
maxKey = key
}
}
return maxKey + 1
}
|
package configuration_test
import (
"bytes"
"crypto/ecdsa"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"math"
"net/mail"
"net/url"
"reflect"
"regexp"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/authelia/authelia/v4/internal/configuration"
"github.com/authelia/authelia/v4/internal/configuration/schema"
)
func TestStringToMailAddressHookFunc(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeMailAddress",
have: "james@example.com",
want: mail.Address{Name: "", Address: "james@example.com"},
decode: true,
},
{
desc: "ShouldDecodeMailAddressWithName",
have: "James <james@example.com>",
want: mail.Address{Name: "James", Address: "james@example.com"},
decode: true,
},
{
desc: "ShouldDecodeMailAddressWithEmptyString",
have: "",
want: mail.Address{},
decode: true,
},
{
desc: "ShouldNotDecodeInvalidMailAddress",
have: "fred",
want: mail.Address{},
err: "could not decode 'fred' to a mail.Address (RFC5322): mail: missing '@' or angle-addr",
decode: true,
},
}
hook := configuration.StringToMailAddressHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToMailAddressHookFuncPointer(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeMailAddress",
have: "james@example.com",
want: &mail.Address{Name: "", Address: "james@example.com"},
decode: true,
},
{
desc: "ShouldDecodeMailAddressWithName",
have: "James <james@example.com>",
want: &mail.Address{Name: "James", Address: "james@example.com"},
decode: true,
},
{
desc: "ShouldDecodeMailAddressWithEmptyString",
have: "",
want: (*mail.Address)(nil),
decode: true,
},
{
desc: "ShouldNotDecodeInvalidMailAddress",
have: "fred",
want: &mail.Address{},
err: "could not decode 'fred' to a *mail.Address (RFC5322): mail: missing '@' or angle-addr",
decode: true,
},
{
desc: "ShouldNotDecodeToInt",
have: "fred",
want: testInt32Ptr(4),
decode: false,
},
}
hook := configuration.StringToMailAddressHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToURLHookFunc(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeURL",
have: "https://www.example.com:9090/abc?test=true",
want: url.URL{Scheme: "https", Host: "www.example.com:9090", Path: "/abc", RawQuery: "test=true"},
decode: true,
},
{
desc: "ShouldDecodeURLEmptyString",
have: "",
want: url.URL{},
decode: true,
},
{
desc: "ShouldNotDecodeToString",
have: "abc",
want: "",
decode: false,
},
{
desc: "ShouldDecodeURLWithUserAndPassword",
have: "https://john:abc123@www.example.com:9090/abc?test=true",
want: url.URL{Scheme: "https", Host: "www.example.com:9090", Path: "/abc", RawQuery: "test=true", User: url.UserPassword("john", "abc123")},
decode: true,
},
{
desc: "ShouldNotDecodeInt",
have: 5,
want: url.URL{},
decode: false,
},
{
desc: "ShouldNotDecodeBool",
have: true,
want: url.URL{},
decode: false,
},
{
desc: "ShouldNotDecodeBadURL",
have: "*(!&@#(!*^$%",
want: url.URL{},
err: "could not decode '*(!&@#(!*^$%' to a url.URL: parse \"*(!&@#(!*^$%\": invalid URL escape \"%\"",
decode: true,
},
}
hook := configuration.StringToURLHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToURLHookFuncPointer(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeURL",
have: "https://www.example.com:9090/abc?test=true",
want: &url.URL{Scheme: "https", Host: "www.example.com:9090", Path: "/abc", RawQuery: "test=true"},
decode: true,
},
{
desc: "ShouldDecodeURLEmptyString",
have: "",
want: (*url.URL)(nil),
decode: true,
},
{
desc: "ShouldDecodeURLWithUserAndPassword",
have: "https://john:abc123@www.example.com:9090/abc?test=true",
want: &url.URL{Scheme: "https", Host: "www.example.com:9090", Path: "/abc", RawQuery: "test=true", User: url.UserPassword("john", "abc123")},
decode: true,
},
{
desc: "ShouldNotDecodeInt",
have: 5,
want: &url.URL{},
decode: false,
},
{
desc: "ShouldNotDecodeBool",
have: true,
want: &url.URL{},
decode: false,
},
{
desc: "ShouldNotDecodeBadURL",
have: "*(!&@#(!*^$%",
want: &url.URL{},
err: "could not decode '*(!&@#(!*^$%' to a *url.URL: parse \"*(!&@#(!*^$%\": invalid URL escape \"%\"",
decode: true,
},
{
desc: "ShouldNotDecodeToInt",
have: "fred",
want: testInt32Ptr(4),
decode: false,
},
}
hook := configuration.StringToURLHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestToTimeDurationHookFunc(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeFourtyFiveSeconds",
have: "45s",
want: time.Second * 45,
decode: true,
},
{
desc: "ShouldDecodeOneMinute",
have: "1m",
want: time.Minute,
decode: true,
},
{
desc: "ShouldDecodeTwoHours",
have: "2h",
want: time.Hour * 2,
decode: true,
},
{
desc: "ShouldDecodeThreeDays",
have: "3d",
want: time.Hour * 24 * 3,
decode: true,
},
{
desc: "ShouldDecodeFourWeeks",
have: "4w",
want: time.Hour * 24 * 7 * 4,
decode: true,
},
{
desc: "ShouldDecodeFiveMonths",
have: "5M",
want: time.Hour * 24 * 30 * 5,
decode: true,
},
{
desc: "ShouldDecodeSixYears",
have: "6y",
want: time.Hour * 24 * 365 * 6,
decode: true,
},
{
desc: "ShouldNotDecodeInvalidString",
have: "abc",
want: time.Duration(0),
err: "could not decode 'abc' to a time.Duration: could not parse 'abc' as a duration",
decode: true,
},
{
desc: "ShouldDecodeIntToSeconds",
have: 60,
want: time.Second * 60,
decode: true,
},
{
desc: "ShouldDecodeInt8ToSeconds",
have: int8(90),
want: time.Second * 90,
decode: true,
},
{
desc: "ShouldDecodeInt16ToSeconds",
have: int16(90),
want: time.Second * 90,
decode: true,
},
{
desc: "ShouldDecodeInt32ToSeconds",
have: int32(90),
want: time.Second * 90,
decode: true,
},
{
desc: "ShouldDecodeFloat64ToSeconds",
have: float64(90),
want: time.Second * 90,
decode: true,
},
{
desc: "ShouldDecodeFloat64ToSeconds",
have: math.MaxFloat64,
want: time.Duration(math.MaxInt64),
decode: true,
},
{
desc: "ShouldDecodeInt64ToSeconds",
have: int64(120),
want: time.Second * 120,
decode: true,
},
{
desc: "ShouldDecodeTimeDuration",
have: time.Second * 30,
want: time.Second * 30,
decode: true,
},
{
desc: "ShouldNotDecodeToString",
have: int64(30),
want: "",
decode: false,
},
{
desc: "ShouldDecodeFromIntZero",
have: 0,
want: time.Duration(0),
decode: true,
},
{
desc: "ShouldSkipParsingBoolean",
have: true,
want: time.Duration(0),
decode: false,
},
{
desc: "ShouldNotDecodeFromBool",
have: true,
want: true,
},
}
hook := configuration.ToTimeDurationHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestToTimeDurationHookFuncPointer(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeFourtyFiveSeconds",
have: "45s",
want: testTimeDurationPtr(time.Second * 45),
decode: true,
},
{
desc: "ShouldDecodeOneMinute",
have: "1m",
want: testTimeDurationPtr(time.Minute),
decode: true,
},
{
desc: "ShouldDecodeTwoHours",
have: "2h",
want: testTimeDurationPtr(time.Hour * 2),
decode: true,
},
{
desc: "ShouldDecodeThreeDays",
have: "3d",
want: testTimeDurationPtr(time.Hour * 24 * 3),
decode: true,
},
{
desc: "ShouldDecodeFourWeeks",
have: "4w",
want: testTimeDurationPtr(time.Hour * 24 * 7 * 4),
decode: true,
},
{
desc: "ShouldDecodeFiveMonths",
have: "5M",
want: testTimeDurationPtr(time.Hour * 24 * 30 * 5),
decode: true,
},
{
desc: "ShouldDecodeSixYears",
have: "6y",
want: testTimeDurationPtr(time.Hour * 24 * 365 * 6),
decode: true,
},
{
desc: "ShouldNotDecodeInvalidString",
have: "abc",
want: testTimeDurationPtr(time.Duration(0)),
err: "could not decode 'abc' to a *time.Duration: could not parse 'abc' as a duration",
decode: true,
},
{
desc: "ShouldDecodeIntToSeconds",
have: 60,
want: testTimeDurationPtr(time.Second * 60),
decode: true,
},
{
desc: "ShouldDecodeInt32ToSeconds",
have: int32(90),
want: testTimeDurationPtr(time.Second * 90),
decode: true,
},
{
desc: "ShouldDecodeInt64ToSeconds",
have: int64(120),
want: testTimeDurationPtr(time.Second * 120),
decode: true,
},
{
desc: "ShouldDecodeTimeDuration",
have: time.Second * 30,
want: testTimeDurationPtr(time.Second * 30),
decode: true,
},
{
desc: "ShouldNotDecodeToString",
have: int64(30),
want: &testString,
decode: false,
},
{
desc: "ShouldDecodeFromIntZero",
have: 0,
want: testTimeDurationPtr(time.Duration(0)),
decode: true,
},
{
desc: "ShouldNotDecodeFromBool",
have: true,
want: &testTrue,
decode: false,
},
}
hook := configuration.ToTimeDurationHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToRegexpFunc(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
wantGrps []string
}{
{
desc: "ShouldNotDecodeRegexpWithOpenParenthesis",
have: "hello(test one two",
want: regexp.Regexp{},
err: "could not decode 'hello(test one two' to a regexp.Regexp: error parsing regexp: missing closing ): `hello(test one two`",
decode: true,
},
{
desc: "ShouldDecodeValidRegex",
have: "^(api|admin)$",
want: *regexp.MustCompile(`^(api|admin)$`),
decode: true,
},
{
desc: "ShouldDecodeValidRegexWithGroupNames",
have: "^(?P<area>api|admin)(one|two)$",
want: *regexp.MustCompile(`^(?P<area>api|admin)(one|two)$`),
decode: true,
wantGrps: []string{"area"},
},
{
desc: "ShouldNotDecodeFromInt32",
have: int32(20),
want: regexp.Regexp{},
decode: false,
},
{
desc: "ShouldNotDecodeFromBool",
have: false,
want: regexp.Regexp{},
decode: false,
},
{
desc: "ShouldNotDecodeToBool",
have: "^(?P<area>api|admin)(one|two)$",
want: testTrue,
decode: false,
},
{
desc: "ShouldNotDecodeToInt32",
have: "^(?P<area>api|admin)(one|two)$",
want: testInt32Ptr(0),
decode: false,
},
{
desc: "ShouldNotDecodeToMailAddress",
have: "^(?P<area>api|admin)(one|two)$",
want: mail.Address{},
decode: false,
},
{
desc: "ShouldErrOnDecodeEmptyString",
have: "",
want: regexp.Regexp{},
err: "could not decode an empty value to a regexp.Regexp: must have a non-empty value",
decode: true,
},
}
hook := configuration.StringToRegexpHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
pattern := result.(regexp.Regexp)
var names []string
for _, name := range pattern.SubexpNames() {
if name != "" {
names = append(names, name)
}
}
if len(tc.wantGrps) != 0 {
t.Run("MustHaveAllExpectedSubexpGroupNames", func(t *testing.T) {
for _, name := range tc.wantGrps {
assert.Contains(t, names, name)
}
})
t.Run("MustNotHaveUnexpectedSubexpGroupNames", func(t *testing.T) {
for _, name := range names {
assert.Contains(t, tc.wantGrps, name)
}
})
} else {
t.Run("MustHaveNoSubexpGroupNames", func(t *testing.T) {
assert.Len(t, names, 0)
})
}
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToRegexpFuncPointers(t *testing.T) {
testCases := []struct {
desc string
have any
want any
err string
decode bool
wantGrps []string
}{
{
desc: "ShouldNotDecodeRegexpWithOpenParenthesis",
have: "hello(test one two",
want: ®exp.Regexp{},
err: "could not decode 'hello(test one two' to a *regexp.Regexp: error parsing regexp: missing closing ): `hello(test one two`",
decode: true,
},
{
desc: "ShouldDecodeValidRegex",
have: "^(api|admin)$",
want: regexp.MustCompile(`^(api|admin)$`),
decode: true,
},
{
desc: "ShouldDecodeValidRegexWithGroupNames",
have: "^(?P<area>api|admin)(one|two)$",
want: regexp.MustCompile(`^(?P<area>api|admin)(one|two)$`),
decode: true,
wantGrps: []string{"area"},
},
{
desc: "ShouldNotDecodeFromInt32",
have: int32(20),
want: ®exp.Regexp{},
decode: false,
},
{
desc: "ShouldNotDecodeFromBool",
have: false,
want: ®exp.Regexp{},
decode: false,
},
{
desc: "ShouldNotDecodeToBool",
have: "^(?P<area>api|admin)(one|two)$",
want: &testTrue,
decode: false,
},
{
desc: "ShouldNotDecodeToInt32",
have: "^(?P<area>api|admin)(one|two)$",
want: &testZero,
decode: false,
},
{
desc: "ShouldNotDecodeToMailAddress",
have: "^(?P<area>api|admin)(one|two)$",
want: &mail.Address{},
decode: false,
},
{
desc: "ShouldDecodeEmptyStringToNil",
have: "",
want: (*regexp.Regexp)(nil),
decode: true,
},
}
hook := configuration.StringToRegexpHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
pattern := result.(*regexp.Regexp)
if tc.want == (*regexp.Regexp)(nil) {
assert.Nil(t, pattern)
} else {
var names []string
for _, name := range pattern.SubexpNames() {
if name != "" {
names = append(names, name)
}
}
if len(tc.wantGrps) != 0 {
t.Run("MustHaveAllExpectedSubexpGroupNames", func(t *testing.T) {
for _, name := range tc.wantGrps {
assert.Contains(t, names, name)
}
})
t.Run("MustNotHaveUnexpectedSubexpGroupNames", func(t *testing.T) {
for _, name := range names {
assert.Contains(t, tc.wantGrps, name)
}
})
} else {
t.Run("MustHaveNoSubexpGroupNames", func(t *testing.T) {
assert.Len(t, names, 0)
})
}
}
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToAddressHookFunc(t *testing.T) {
testCases := []struct {
name string
have any
expected any
err string
decode bool
}{
{
name: "ShouldDecodeNonPtr",
have: "tcp://0.0.0.0:2020",
expected: MustParseAddress("tcp://0.0.0.0:2020"),
decode: true,
},
{
name: "ShouldDecodePtr",
have: "tcp://0.0.0.0:2020",
expected: MustParseAddressPtr("tcp://0.0.0.0:2020"),
decode: true,
},
{
name: "ShouldNotDecodeIntegerToCorrectType",
have: 1,
expected: schema.Address{},
decode: false,
},
{
name: "ShouldNotDecodeIntegerToCorrectTypePtr",
have: 1,
expected: &schema.Address{},
decode: false,
},
{
name: "ShouldNotDecodeIntegerPtrToCorrectType",
have: testInt32Ptr(1),
expected: schema.Address{},
decode: false,
},
{
name: "ShouldNotDecodeIntegerPtrToCorrectTypePtr",
have: testInt32Ptr(1),
expected: &schema.Address{},
decode: false,
},
{
name: "ShouldNotDecodeToString",
have: "tcp://0.0.0.0:2020",
expected: "",
decode: false,
},
{
name: "ShouldNotDecodeToIntPtr",
have: "tcp://0.0.0.0:2020",
expected: testInt32Ptr(1),
decode: false,
},
{
name: "ShouldNotDecodeToIntPtr",
have: "tcp://0.0.0.0:2020",
expected: testInt32Ptr(1),
decode: false,
},
{
name: "ShouldFailDecode",
have: "tcp://&!@^#*&!@#&*@!:2020",
expected: schema.Address{},
err: "could not decode 'tcp://&!@^#*&!@#&*@!:2020' to a schema.Address: could not parse string 'tcp://&!@^#*&!@#&*@!:2020' as address: expected format is [<scheme>://]<hostname>[:<port>]: parse \"tcp://&!@^\": invalid character \"^\" in host name",
decode: false,
},
{
name: "ShouldDecodeTCP",
have: "tcp://127.0.0.1",
expected: schema.AddressTCP{Address: MustParseAddress("tcp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeTCPPtr",
have: "tcp://127.0.0.1",
expected: &schema.AddressTCP{Address: MustParseAddress("tcp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeUDP",
have: "udp://127.0.0.1",
expected: schema.AddressUDP{Address: MustParseAddress("udp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeUDPPtr",
have: "udp://127.0.0.1",
expected: &schema.AddressUDP{Address: MustParseAddress("udp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeLDAP",
have: "ldap://127.0.0.1",
expected: schema.AddressLDAP{Address: MustParseAddress("ldap://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeLDAPPtr",
have: "ldap://127.0.0.1",
expected: &schema.AddressLDAP{Address: MustParseAddress("ldap://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeSMTP",
have: "smtp://127.0.0.1",
expected: schema.AddressSMTP{Address: MustParseAddress("smtp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldDecodeSMTPPtr",
have: "smtp://127.0.0.1",
expected: &schema.AddressSMTP{Address: MustParseAddress("smtp://127.0.0.1")},
err: "",
decode: true,
},
{
name: "ShouldFailDecodeTCP",
have: "@@@@@@@",
expected: schema.AddressTCP{Address: MustParseAddress("tcp://127.0.0.1")},
err: "could not decode '@@@@@@@' to a schema.AddressTCP: error validating the address: the url 'tcp://%40%40%40%40%40%40@' appears to have user info but this is not valid for addresses",
decode: false,
},
{
name: "ShouldFailDecodeUDP",
have: "@@@@@@@",
expected: schema.AddressUDP{Address: MustParseAddress("udp://127.0.0.1")},
err: "could not decode '@@@@@@@' to a schema.AddressUDP: error validating the address: the url 'udp://%40%40%40%40%40%40@' appears to have user info but this is not valid for addresses",
decode: false,
},
{
name: "ShouldFailDecodeLDAP",
have: "@@@@@@@",
expected: schema.AddressLDAP{Address: MustParseAddress("ldap://127.0.0.1")},
err: "could not decode '@@@@@@@' to a schema.AddressLDAP: error validating the address: the url 'ldaps://%40%40%40%40%40%40@' appears to have user info but this is not valid for addresses",
decode: false,
},
{
name: "ShouldFailDecodeSMTP",
have: "@@@@@@@",
expected: schema.AddressSMTP{Address: MustParseAddress("smtp://127.0.0.1")},
err: "could not decode '@@@@@@@' to a schema.AddressSMTP: error validating the address: the url 'smtp://%40%40%40%40%40%40@' appears to have user info but this is not valid for addresses",
decode: false,
},
}
hook := configuration.StringToAddressHookFunc()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.expected), tc.have)
if tc.err != "" {
assert.EqualError(t, err, tc.err)
if !tc.decode {
assert.Nil(t, actual)
}
} else {
assert.NoError(t, err)
if tc.decode {
assert.Equal(t, tc.expected, actual)
} else {
assert.Equal(t, tc.have, actual)
}
}
})
}
}
func TestStringToPrivateKeyHookFunc(t *testing.T) {
var (
nilRSA *rsa.PrivateKey
nilECDSA *ecdsa.PrivateKey
nilCert *x509.Certificate
)
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeRSAPrivateKey",
have: x509PrivateKeyRSA1,
want: MustParseRSAPrivateKey(x509PrivateKeyRSA1),
decode: true,
},
{
desc: "ShouldDecodeECDSAPrivateKey",
have: x509PrivateKeyEC1,
want: MustParseECDSAPrivateKey(x509PrivateKeyEC1),
decode: true,
},
{
desc: "ShouldNotDecodeToECDSAPrivateKey",
have: x509PrivateKeyRSA1,
want: &ecdsa.PrivateKey{},
decode: true,
err: "could not decode to a *ecdsa.PrivateKey: the data is for a *rsa.PrivateKey not a *ecdsa.PrivateKey",
},
{
desc: "ShouldNotDecodeEmptyRSAKey",
have: "",
want: nilRSA,
decode: true,
},
{
desc: "ShouldNotDecodeEmptyECDSAKey",
have: "",
want: nilECDSA,
decode: true,
},
{
desc: "ShouldNotDecodeECDSAKeyToRSAKey",
have: x509PrivateKeyEC1,
want: nilRSA,
decode: true,
err: "could not decode to a *rsa.PrivateKey: the data is for a *ecdsa.PrivateKey not a *rsa.PrivateKey",
},
{
desc: "ShouldNotDecodeRSAKeyToECDSAKey",
have: x509PrivateKeyRSA1,
want: nilECDSA,
decode: true,
err: "could not decode to a *ecdsa.PrivateKey: the data is for a *rsa.PrivateKey not a *ecdsa.PrivateKey",
},
{
desc: "ShouldNotDecodeBadRSAPrivateKey",
have: x509PrivateKeyRSA2,
want: nilRSA,
decode: true,
err: "could not decode to a *rsa.PrivateKey: failed to parse PEM block containing the key",
},
{
desc: "ShouldNotDecodeBadECDSAPrivateKey",
have: x509PrivateKeyEC2,
want: nilECDSA,
decode: true,
err: "could not decode to a *ecdsa.PrivateKey: failed to parse PEM block containing the key",
},
{
desc: "ShouldNotDecodeCertificateToRSAPrivateKey",
have: x509CertificateRSA1,
want: nilRSA,
decode: true,
err: "could not decode to a *rsa.PrivateKey: the data is for a *x509.Certificate not a *rsa.PrivateKey",
},
{
desc: "ShouldNotDecodeCertificateToECDSAPrivateKey",
have: x509CertificateRSA1,
want: nilECDSA,
decode: true,
err: "could not decode to a *ecdsa.PrivateKey: the data is for a *x509.Certificate not a *ecdsa.PrivateKey",
},
{
desc: "ShouldNotDecodeRSAKeyToCertificate",
have: x509PrivateKeyRSA1,
want: nilCert,
decode: false,
},
{
desc: "ShouldNotDecodeECDSAKeyToCertificate",
have: x509PrivateKeyEC1,
want: nilCert,
decode: false,
},
}
hook := configuration.StringToPrivateKeyHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToX509CertificateHookFunc(t *testing.T) {
var nilkey *x509.Certificate
testCases := []struct {
desc string
have any
want any
err string
decode bool
}{
{
desc: "ShouldDecodeRSACertificate",
have: x509CertificateRSA1,
want: MustParseX509Certificate(x509CertificateRSA1),
decode: true,
},
{
desc: "ShouldDecodeECDSACertificate",
have: x509CACertificateECDSA,
want: MustParseX509Certificate(x509CACertificateECDSA),
decode: true,
},
{
desc: "ShouldDecodeRSACACertificate",
have: x509CACertificateRSA,
want: MustParseX509Certificate(x509CACertificateRSA),
decode: true,
},
{
desc: "ShouldDecodeECDSACACertificate",
have: x509CACertificateECDSA,
want: MustParseX509Certificate(x509CACertificateECDSA),
decode: true,
},
{
desc: "ShouldDecodeEmptyCertificateToNil",
have: "",
want: nilkey,
decode: true,
},
{
desc: "ShouldNotDecodeECDSAKeyToCertificate",
have: x509PrivateKeyEC1,
want: nilkey,
decode: true,
err: "could not decode to a *x509.Certificate: the data is for a *ecdsa.PrivateKey not a *x509.Certificate",
},
{
desc: "ShouldNotDecodeBadRSAPrivateKeyToCertificate",
have: x509PrivateKeyRSA2,
want: nilkey,
decode: true,
err: "could not decode to a *x509.Certificate: failed to parse PEM block containing the key",
},
}
hook := configuration.StringToX509CertificateHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
result, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.want), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, result)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.want, result)
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, result)
}
})
}
}
func TestStringToPasswordDigestHookFunc(t *testing.T) {
var nilvalue *schema.PasswordDigest
testCases := []struct {
name string
have any
expected any
err string
decode bool
}{
{
"ShouldParse",
"$plaintext$example",
MustParsePasswordDigest("$plaintext$example"),
"",
true,
},
{
"ShouldParsePtr",
"$plaintext$example",
MustParsePasswordDigestPtr("$plaintext$example"),
"",
true,
},
{
"ShouldNotParseUnknown",
"$abc$example",
schema.PasswordDigest{},
"could not decode '$abc$example' to a schema.PasswordDigest: provided encoded hash has an invalid identifier: the identifier 'abc' is unknown to the decoder",
false,
},
{
"ShouldNotParseWrongType",
"$abc$example",
schema.TLSVersion{},
"",
false,
},
{
"ShouldNotParseWrongTypePtr",
"$abc$example",
&schema.TLSVersion{},
"",
false,
},
{
"ShouldNotParseEmptyString",
"",
schema.PasswordDigest{},
"could not decode an empty value to a schema.PasswordDigest: must have a non-empty value",
false,
},
{
"ShouldParseEmptyStringPtr",
"",
nilvalue,
"",
true,
},
}
hook := configuration.StringToPasswordDigestHookFunc()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.expected), tc.have)
if tc.err != "" {
assert.EqualError(t, err, tc.err)
if !tc.decode {
assert.Nil(t, actual)
}
} else {
assert.NoError(t, err)
if tc.decode {
assert.Equal(t, tc.expected, actual)
} else {
assert.Equal(t, tc.have, actual)
}
}
})
}
}
func TestStringToTLSVersionHookFunc(t *testing.T) {
testCases := []struct {
name string
have any
expected any
err string
decode bool
}{
{
"ShouldParseTLS1.3",
"TLS1.3",
schema.TLSVersion{Value: tls.VersionTLS13},
"",
true,
},
{
"ShouldParseTLS1.3PTR",
"TLS1.3",
&schema.TLSVersion{Value: tls.VersionTLS13},
"",
true,
},
{
"ShouldParseTLS1.2",
"TLS1.2",
schema.TLSVersion{Value: tls.VersionTLS12},
"",
true,
},
{
"ShouldNotParseInt",
1,
&schema.TLSVersion{},
"",
false,
},
{
"ShouldNotParseNonVersion",
"1",
&schema.TLSVersion{},
"could not decode '1' to a *schema.TLSVersion: supplied tls version isn't supported",
false,
},
}
hook := configuration.StringToTLSVersionHookFunc()
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.expected), tc.have)
if tc.err != "" {
assert.EqualError(t, err, tc.err)
if !tc.decode {
assert.Nil(t, actual)
}
} else {
assert.NoError(t, err)
if tc.decode {
assert.Equal(t, tc.expected, actual)
} else {
assert.Equal(t, tc.have, actual)
}
}
})
}
}
func TestStringToX509CertificateChainHookFunc(t *testing.T) {
var nilkey *schema.X509CertificateChain
testCases := []struct {
desc string
have any
expected any
err, verr string
decode bool
}{
{
desc: "ShouldDecodeRSACertificate",
have: x509CertificateRSA1,
expected: MustParseX509CertificateChain(x509CertificateRSA1),
decode: true,
},
{
desc: "ShouldDecodeRSACertificateNoPtr",
have: x509CertificateRSA1,
expected: *MustParseX509CertificateChain(x509CertificateRSA1),
decode: true,
},
{
desc: "ShouldDecodeRSACertificateChain",
have: BuildChain(x509CertificateRSA1, x509CACertificateRSA),
expected: MustParseX509CertificateChain(x509CertificateRSA1, x509CACertificateRSA),
decode: true,
},
{
desc: "ShouldDecodeRSACertificateChainNoPtr",
have: BuildChain(x509CertificateRSA1, x509CACertificateRSA),
expected: *MustParseX509CertificateChain(x509CertificateRSA1, x509CACertificateRSA),
decode: true,
},
{
desc: "ShouldNotDecodeBadRSACertificateChain",
have: BuildChain(x509CertificateRSA1, x509CACertificateECDSA),
expected: MustParseX509CertificateChain(x509CertificateRSA1, x509CACertificateECDSA),
verr: "certificate #1 in chain is not signed properly by certificate #2 in chain: x509: signature algorithm specifies an RSA public key, but have public key of type *ecdsa.PublicKey",
decode: true,
},
{
desc: "ShouldDecodeECDSACertificate",
have: x509CACertificateECDSA,
expected: MustParseX509CertificateChain(x509CACertificateECDSA),
decode: true,
},
{
desc: "ShouldDecodeRSACACertificate",
have: x509CACertificateRSA,
expected: MustParseX509CertificateChain(x509CACertificateRSA),
decode: true,
},
{
desc: "ShouldDecodeECDSACACertificate",
have: x509CACertificateECDSA,
expected: MustParseX509CertificateChain(x509CACertificateECDSA),
decode: true,
},
{
desc: "ShouldDecodeEmptyCertificateToNil",
have: "",
expected: nilkey,
decode: true,
},
{
desc: "ShouldDecodeEmptyCertificateToEmptyStruct",
have: "",
expected: schema.X509CertificateChain{},
decode: true,
},
{
desc: "ShouldNotDecodeECDSAKeyToCertificate",
have: x509PrivateKeyEC1,
expected: nilkey,
decode: true,
err: "could not decode to a *schema.X509CertificateChain: the PEM data chain contains a EC PRIVATE KEY but only certificates are expected",
},
{
desc: "ShouldNotDecodeBadRSAPrivateKeyToCertificate",
have: x509PrivateKeyRSA2,
expected: nilkey,
decode: true,
err: "could not decode to a *schema.X509CertificateChain: invalid PEM block",
},
}
hook := configuration.StringToX509CertificateChainHookFunc()
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
actual, err := hook(reflect.TypeOf(tc.have), reflect.TypeOf(tc.expected), tc.have)
switch {
case !tc.decode:
assert.NoError(t, err)
assert.Equal(t, tc.have, actual)
case tc.err == "":
assert.NoError(t, err)
require.Equal(t, tc.expected, actual)
if tc.expected == nilkey {
break
}
switch chain := actual.(type) {
case *schema.X509CertificateChain:
require.NotNil(t, chain)
if tc.verr == "" {
assert.NoError(t, chain.Validate())
} else {
assert.EqualError(t, chain.Validate(), tc.verr)
}
case schema.X509CertificateChain:
require.NotNil(t, chain)
if tc.verr == "" {
assert.NoError(t, chain.Validate())
} else {
assert.EqualError(t, chain.Validate(), tc.verr)
}
}
default:
assert.EqualError(t, err, tc.err)
assert.Nil(t, actual)
}
})
}
}
var (
x509PrivateKeyRSA1 = `
-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA6z1LOg1ZCqb0lytXWZ+MRBpMHEXOoTOLYgfZXt1IYyE3Z758
cyalk0NYQhY5cZDsXPYWPvAHiPMUxutWkoxFwby56S+AbIMa3/Is+ILrHRJs8Exn
ZkpyrYFxPX12app2kErdmAkHSx0Z5/kuXiz96PHs8S8/ZbyZolLHzdfLtSzjvRm5
Zue5iFzsf19NJz5CIBfv8g5lRwtE8wNJoRSpn1xq7fqfuA0weDNFPzjlNWRLy6aa
rK7qJexRkmkCs4sLgyl+9NODYJpvmN8E1yhyC27E0joI6rBFVW7Ihv+cSPCdDzGp
EWe81x3AeqAa3mjVqkiq4u4Z2i8JDgBaPboqJwIDAQABAoIBAAFdLZ58jVOefDSU
L8F5R1rtvBs93GDa56f926jNJ6pLewLC+/2+757W+SAI+PRLntM7Kg3bXm/Q2QH+
Q1Y+MflZmspbWCdI61L5GIGoYKyeers59i+FpvySj5GHtLQRiTZ0+Kv1AXHSDWBm
9XneUOqU3IbZe0ifu1RRno72/VtjkGXbW8Mkkw+ohyGbIeTx/0/JQ6sSNZTT3Vk7
8i4IXptq3HSF0/vqZuah8rShoeNq72pD1YLM9YPdL5by1QkDLnqATDiCpLBTCaNV
I8sqYEun+HYbQzBj8ZACG2JVZpEEidONWQHw5BPWO95DSZYrVnEkuCqeH+u5vYt7
CHuJ3AECgYEA+W3v5z+j91w1VPHS0VB3SCDMouycAMIUnJPAbt+0LPP0scUFsBGE
hPAKddC54pmMZRQ2KIwBKiyWfCrJ8Xz8Yogn7fJgmwTHidJBr2WQpIEkNGlK3Dzi
jXL2sh0yC7sHvn0DqiQ79l/e7yRbSnv2wrTJEczOOH2haD7/tBRyCYECgYEA8W+q
E9YyGvEltnPFaOxofNZ8LHVcZSsQI5b6fc0iE7fjxFqeXPXEwGSOTwqQLQRiHn9b
CfPmIG4Vhyq0otVmlPvUnfBZ2OK+tl5X2/mQFO3ROMdvpi0KYa994uqfJdSTaqLn
jjoKFB906UFHnDQDLZUNiV1WwnkTglgLc+xrd6cCgYEAqqthyv6NyBTM3Tm2gcio
Ra9Dtntl51LlXZnvwy3IkDXBCd6BHM9vuLKyxZiziGx+Vy90O1xI872cnot8sINQ
Am+dur/tAEVN72zxyv0Y8qb2yfH96iKy9gxi5s75TnOEQgAygLnYWaWR2lorKRUX
bHTdXBOiS58S0UzCFEslGIECgYBqkO4SKWYeTDhoKvuEj2yjRYyzlu28XeCWxOo1
otiauX0YSyNBRt2cSgYiTzhKFng0m+QUJYp63/wymB/5C5Zmxi0XtWIDADpLhqLj
HmmBQ2Mo26alQ5YkffBju0mZyhVzaQop1eZi8WuKFV1FThPlB7hc3E0SM5zv2Grd
tQnOWwKBgQC40yZY0PcjuILhy+sIc0Wvh7LUA7taSdTye149kRvbvsCDN7Jh75lM
USjhLXY0Nld2zBm9r8wMb81mXH29uvD+tDqqsICvyuKlA/tyzXR+QTr7dCVKVwu0
1YjCJ36UpTsLre2f8nOSLtNmRfDPtbOE2mkOoO9dD9UU0XZwnvn9xw==
-----END RSA PRIVATE KEY-----`
x509PrivateKeyRSA2 = `
-----BEGIN RSA PRIVATE KEY-----
bad key
-----END RSA PRIVATE KEY-----`
x509PrivateKeyEC1 = `
-----BEGIN EC PRIVATE KEY-----
MHcCAQEEIMn970LSn8aKVhBM4vyUmpZyEdCT4riN+Lp4QU04zUhYoAoGCCqGSM49
AwEHoUQDQgAEMD69n22nd78GmaRDzy/s7muqhbc/OEnFS2mNtiRAA5FaX+kbkCB5
8pu/k2jkaSVNZtBYKPVAibHkhvakjVb66A==
-----END EC PRIVATE KEY-----`
x509PrivateKeyEC2 = `
-----BEGIN EC PRIVATE KEY-----
bad key
-----END EC PRIVATE KEY-----`
x509CertificateRSA1 = `
-----BEGIN CERTIFICATE-----
MIIC5TCCAc2gAwIBAgIQfBUmKLmEvMqS6S9auKCY2DANBgkqhkiG9w0BAQsFADAT
MREwDwYDVQQKEwhBdXRoZWxpYTAeFw0yMjA5MDgxMDA5MThaFw0yMzA5MDgxMDA5
MThaMBMxETAPBgNVBAoTCEF1dGhlbGlhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
MIIBCgKCAQEApqno1cOpDcgKmOJqeDQGIGH5/ZnqcJ4xud6eOUfbDqel3b0RkAQX
mFYWEDO/PDOAOjYk/xSwZGo3jDofOHGhrKstQqLdweHGfme5NXYHJda7nGv/OY5q
zUuEG4xBVgUsvbshWZ18H+bIQpwiP6tDAabxc0B7J15F1pArK8QN4pDTfsqZDwMi
Qyo638XfUbDzEVZRbdDKxHz5g0w2vFdXon8uOxRRb0+zlHF9nM4PiESNgiUIYeua
8Q5yP10SY2k9zlQ/OFJ4XhQmioCJvNjJE/TSc5/ECub2n7hTZhN5TGKagukZ5NAy
KgbvNYW+CN+H4pFJt/9WptiDfBqhlUvjnwIDAQABozUwMzAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADANBgkqhkiG9w0B
AQsFAAOCAQEAH9veGzfWqXxsa5s2KHV2Jzed9V8KSs1Qy9QKRez1i2OMvMPh2DRM
RLzAAp/XigjxLQF/LFXuoFW0Qg8BRb44iRgZrCiqVOUnd3xTrS/CcFExnpQI4F12
/U70o97rkTonCOHmUUW6vQfWSXR/GU3/faRLJjiqcpWLZhTQNrnsip1ym3B2NMdk
gMKkT8Acx1DX48MvTE4+DyqCS8TlJbacBJ2RFFELKu3jYnVNyrb0ywLxoCtWqBBE
veVj+VMn9hNY1u5uydLsUDOlT5QyQcEuUzjjdhsJKEgDE5daNtB2OJJnd9IOMzUA
hasPZETCCKabTpWiEPw1Cn/ZRqya0SZqFg==
-----END CERTIFICATE-----
`
x509CACertificateRSA = `
-----BEGIN CERTIFICATE-----
MIIDBDCCAeygAwIBAgIRAJfz0dHS9UkDngE55lUPdu4wDQYJKoZIhvcNAQELBQAw
EzERMA8GA1UEChMIQXV0aGVsaWEwHhcNMjIwOTA4MDk1OTI1WhcNMjMwOTA4MDk1
OTI1WjATMREwDwYDVQQKEwhBdXRoZWxpYTCCASIwDQYJKoZIhvcNAQEBBQADggEP
ADCCAQoCggEBALfivbwq9r5X8N+NSbNHVuKbCb9f9vD5Xw2pOjSVvVjFkWQ1YKJu
JGx9yskhHBZTBt76cInipA+0PqCBrBrjij1lh2StvzRVuQwgFG6H01LxBPi0JyYv
Is94F6PHr6fSBgFWB5GNQ797KQIOdIr057uEFbp0eBMxxqiQ9gdyD0HPretrx1Uy
kHuF6jck958combn9luHW0i53mt8706j7UAhxFqu9YUeklTM1VqUiRm5+nJKIdNA
LiDMGVAuoxjhF6aIgY0yh5mL5mKtYYzhtA8WryrMzBgFRUGzHCSI1TNisA8wSf2T
Z2JhbFHrFPR5fiSqAEHok3UXu++wsfl/lisCAwEAAaNTMFEwDgYDVR0PAQH/BAQD
AgKkMA8GA1UdJQQIMAYGBFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
OSXG42bCPNuWeP0ahScUMVjxe/wwDQYJKoZIhvcNAQELBQADggEBAFRnubHiYy1H
PGODKA++UY9eAmmaCJxzuWpY8FY9fBz8/VBzcp8vaURPmmQ/34QcqfaSHDM2jIaL
dQ2o9Ae5NjbRzLB6a5DcVO50oHG4BHP1ix4Bt3POr8J80KgA9pOIyAQqbAlFBSzQ
l9yrzVULyf+qpUmByRf5qy2kQJOBfMJbn5j+BprWKwbcI8OAZWWSLItTXqJDrFTk
OMZK4wZ6KiZM07KWMlwW/CE0QRzDk5MXfbwRt4D8pyx6rGKqI7QRusjm5osIpHZV
26FdBdBvEhq4i8UsmDsQqH3iMY1AKmojZToZb5rStOZWHO/BZZ7nT2bscNjwm0E8
6E2l6czk8ss=
-----END CERTIFICATE-----`
x509CACertificateECDSA = `
-----BEGIN CERTIFICATE-----
MIIBdzCCAR2gAwIBAgIQUzb62irYb/7B2H0c1AbriDAKBggqhkjOPQQDAjATMREw
DwYDVQQKEwhBdXRoZWxpYTAeFw0yMjA5MDgxMDEzNDZaFw0yMzA5MDgxMDEzNDZa
MBMxETAPBgNVBAoTCEF1dGhlbGlhMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE
b/EiIBpmifCI34JdI7luetygue2rTtoNH0QXhtrjMuZNugT29LUz+DobZQxvGsOY
4TXzAQXq4gnTb7enNWFgsaNTMFEwDgYDVR0PAQH/BAQDAgKkMA8GA1UdJQQIMAYG
BFUdJQAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUxlDPBKHKawuvhtQTN874
TeCEKjkwCgYIKoZIzj0EAwIDSAAwRQIgAQeV01FZ/VkSERwaRKTeXAXxmKyc/05O
uDv6M2spMi0CIQC8uOSMcv11vp1ylsGg38N6XYA+GQa1BHRd79+91hC+7w==
-----END CERTIFICATE-----`
)
func MustParseRSAPrivateKey(data string) *rsa.PrivateKey {
block, _ := pem.Decode([]byte(data))
if block == nil || block.Bytes == nil || len(block.Bytes) == 0 {
panic("not pem encoded")
}
if block.Type != "RSA PRIVATE KEY" {
panic("not rsa private key")
}
key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
panic(err)
}
return key
}
func MustParseECDSAPrivateKey(data string) *ecdsa.PrivateKey {
block, _ := pem.Decode([]byte(data))
if block == nil || block.Bytes == nil || len(block.Bytes) == 0 {
panic("not pem encoded")
}
if block.Type != "EC PRIVATE KEY" {
panic("not ecdsa private key")
}
key, err := x509.ParseECPrivateKey(block.Bytes)
if err != nil {
panic(err)
}
return key
}
func MustParseX509Certificate(data string) *x509.Certificate {
block, _ := pem.Decode([]byte(data))
if block == nil || len(block.Bytes) == 0 {
panic("not a PEM")
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
panic(err)
}
return cert
}
func BuildChain(pems ...string) string {
buf := bytes.Buffer{}
for i, data := range pems {
if i != 0 {
buf.WriteString("\n")
}
buf.WriteString(data)
}
return buf.String()
}
func MustParseX509CertificateChain(datas ...string) *schema.X509CertificateChain {
chain, err := schema.NewX509CertificateChain(BuildChain(datas...))
if err != nil {
panic(err)
}
return chain
}
func testInt32Ptr(i int32) *int32 {
return &i
}
func testTimeDurationPtr(t time.Duration) *time.Duration {
return &t
}
var (
testTrue = true
testZero int32
testString = ""
)
func MustParseAddress(input string) schema.Address {
address, err := schema.NewAddress(input)
if err != nil {
panic(err)
}
addr := *address
return addr
}
func MustParseAddressPtr(input string) *schema.Address {
address, err := schema.NewAddress(input)
if err != nil {
panic(err)
}
return address
}
func MustParsePasswordDigest(input string) schema.PasswordDigest {
digest, err := schema.DecodePasswordDigest(input)
if err != nil {
panic(err)
}
return *digest
}
func MustParsePasswordDigestPtr(input string) *schema.PasswordDigest {
digest, err := schema.DecodePasswordDigest(input)
if err != nil {
panic(err)
}
return digest
}
|
package BLC
import (
"bytes"
"crypto/sha256"
"fmt"
"math/big"
)
type ProofOfWork struct {
Block *Block //当前要验证的区块
target *big.Int // 大数存储
}
func (pow *ProofOfWork) prepareData(nonce int64) []byte {
data := bytes.Join([][]byte{pow.Block.PrevBlockHash,
pow.Block.HashTransactions(),
IntToHex(pow.Block.TimeStamp),
IntToHex(int64(targetBit)),
IntToHex(int64(nonce)),
IntToHex(int64(pow.Block.Height)),
},
[]byte{})
return data
}
// 难度 - 挖矿 的 难度
// 256 位
// 0000 0000 0000 0000 1001 0001 0000 .... 0001
//256位Hash里面前面至少要有16个零
const targetBit = 16
/**
* 检查 工作量算法,是否有效
*/
func (proofOfWork *ProofOfWork) IsValid() bool {
// 1.proofOfWork.Blck.Heigh
// 2.proofOfWork.Target
var hashInt big.Int
hashInt.SetBytes(proofOfWork.Block.Hash)
if proofOfWork.target.Cmp(&hashInt) == 1 {
return true
}
return false
}
func (proofOfWork *ProofOfWork) Run() ([]byte, int64) {
// 1.将block 的属性拼接成字节数组
// 2.生成hash
//3.判断hash有效性,如果满足条件,跳出循环
var nonce int64
var hashInt big.Int //存储我们新生成的hash
var hash [32]byte
for {
dataBytes := proofOfWork.prepareData(nonce)
//生成hash
hash = sha256.Sum256(dataBytes)
fmt.Printf("\r%x", hash)
//将hash 存储到hashInt
hashInt.SetBytes(hash[:])
//判断hashInt 是否小于Block里面的target
if proofOfWork.target.Cmp(&hashInt) == 1 {
break
}
nonce++
}
return hash[:], nonce
}
// 创建新的工作量工作证明对象
func NewProofOfWork(block *Block) *ProofOfWork {
// 1.big.Int对象 1
// 2
//0000 0001
// 8 - 2 = 6
// 0100 0000 64
// 1.创建一个如始值为1的target
target := big.NewInt(1)
// 2.左移256 - targetBit
target = target.Lsh(target, 256-targetBit)
return &ProofOfWork{block, target}
}
|
package workers
import (
"time"
"github.com/spf13/viper"
"go.uber.org/zap"
"github.com/pushaas/push-agent/push-agent/services"
)
type (
StatsWorker interface {
DispatchWorker()
}
statsWorker struct {
enabled bool
expiration time.Duration
interval time.Duration
logger *zap.Logger
agentName string
quitChan chan struct{}
statsService services.StatsService
workersEnabled bool
}
)
func (w *statsWorker) performAction() {
go w.statsService.UpdateGlobalStats(w.agentName, w.expiration)
go w.statsService.UpdateChannelsStats(w.agentName, w.expiration)
}
// thanks https://stackoverflow.com/a/16466581/1717979
func (w *statsWorker) startWorker() {
w.performAction() // run once right away
ticker := time.NewTicker(w.interval)
for {
select {
case <- ticker.C:
w.performAction()
case <- w.quitChan:
w.quitChan = nil
ticker.Stop()
w.logger.Info("stopping stats worker")
return
}
}
}
func (w *statsWorker) stopWorker() {
if w.quitChan != nil {
w.quitChan <- struct{}{}
}
}
func (w *statsWorker) DispatchWorker() {
if w.workersEnabled && w.enabled {
go w.startWorker()
}
}
func NewStatsWorker(config *viper.Viper, logger *zap.Logger, agentName string, statsService services.StatsService) StatsWorker {
enabled := config.GetBool("workers.stats.enabled")
expiration := config.GetDuration("workers.stats.expiration")
interval := config.GetDuration("workers.stats.interval")
workersEnabled := config.GetBool("workers.enabled")
return &statsWorker{
enabled: enabled,
expiration: expiration,
interval: interval,
logger: logger.Named("statsWorker"),
agentName: agentName,
statsService: statsService,
workersEnabled: workersEnabled,
}
}
|
package rc4
import (
"crypto/cipher"
"crypto/rc4"
"io"
)
const defaultBufferSize = 1024 * 1024
func RC4Stream(src io.Reader, dst io.Writer, key []byte) error {
stream, err := rc4.NewCipher(key)
if err != nil {
return err
}
if _, err = io.CopyBuffer(dst, cipher.StreamReader{S: stream, R: src},
make([]byte, defaultBufferSize)); err != nil {
return err
}
return nil
}
func RC4(plaintext, key []byte) ([]byte, error) {
cipher, err := rc4.NewCipher(key)
if err != nil {
return nil, err
}
buffer := make([]byte, len(plaintext))
cipher.XORKeyStream(buffer, plaintext)
return buffer, nil
}
|
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
package rolling
import (
"compress/gzip"
"io"
)
type Compression interface {
Compress(in io.Reader, out io.Writer) error
Extension() string
}
type CompressGZip struct {
Level int
}
func (c *CompressGZip) Extension() string {
return "gz"
}
func (c *CompressGZip) Compress(in io.Reader, out io.Writer) (err error) {
w, errOpen := gzip.NewWriterLevel(out, c.Level)
if errOpen != nil {
return errOpen
}
defer func() {
cerr := w.Close()
if err == nil {
err = cerr
}
}()
if _, err = io.Copy(w, in); err == nil {
err = w.Flush()
}
return err
}
|
/*****************************************************************
* Copyright©,2020-2022, email: 279197148@qq.com
* Version: 1.0.0
* @Author: yangtxiang
* @Date: 2020-08-04 16:55
* Description:
*****************************************************************/
package netstream
import (
"github.com/go-xe2/x/core/logger"
"github.com/go-xe2/x/os/xlog"
"sync/atomic"
"time"
)
func (p *TStreamClient) heartbeatProcessLoop() {
speed := p.options.GetHeartbeatSpeed()
maxLoss := p.options.GetAllowMaxLoss()
xlog.Debug("准备发送心跳,speed:", speed, ", maxLoss:", maxLoss)
if speed == 0 || maxLoss == 0 {
// 未设置跳心速率,不启用心跳检查
return
}
if isRun := atomic.LoadInt32(&p.heartbeatIsRun); isRun != 0 {
return
}
atomic.StoreInt32(&p.heartbeatIsRun, 1)
defer atomic.StoreInt32(&p.heartbeatIsRun, 0)
go func() {
for {
if p.conn.isStop() {
// 已经关闭服务
xlog.Debug("已关闭服务,退出心跳检测.")
return
}
conn := p.conn
xlog.Debug("丢失", conn.HeartbeatLossCount(), "次心跳")
if conn.HeartbeatLossCount() > maxLoss {
// 心跳包丢失超出了最大限制,断开连接尝试重连
// 当非人工关闭时,在onDisconnect事件中会调用RetryConnect,所以该处不用调用RetryConnect
conn.innerClose()
//p.RetryConnect()
p.Log(logger.LEVEL_INFO, "长时间未收到数据,已断开连接")
return
}
t := conn.Heartbeat()
diff := time.Now().Sub(t)
if diff > speed {
conn.UpdateHeartbeat(true)
if e := conn.SendHeartbeat(); e != nil {
p.Log(logger.LEVEL_WARN, "发送心跳包出错:", e)
}
}
time.Sleep(speed)
}
}()
}
func (p *TStreamClient) RetryConnect() bool {
maxTryCount := p.options.GetTryConnectCount()
trySpeed := p.options.GetTryConnectSpeed()
if trySpeed <= 0 {
return false
}
for {
n := p.tryCount + 1
if maxTryCount > 0 {
p.Log(logger.LEVEL_INFO, "尝试第", n, "次连接, maxCount:", maxTryCount)
} else {
p.Log(logger.LEVEL_INFO, "尝试第", n, "次连接")
}
if e := p.conn.ReOpen(); e != nil {
p.tryCount++
p.Log(logger.LEVEL_DEBU, "第", n, "次连接失败:", e)
// maxTryCount为0时表达一直测试连接
if maxTryCount > 0 && p.tryCount >= maxTryCount {
p.Log(logger.LEVEL_ERRO, "尝试连接", p.options.GetAllowMaxLoss(), "次失败,请检查服务端是否正常运行")
// 重连接不成功,关闭服务
close(p.closed)
return false
}
} else {
p.tryCount = 0
return true
}
time.Sleep(trySpeed)
}
}
|
package cmds
import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"github.com/ayufan/docker-composer/compose"
)
var composeAppCommands map[string]string = map[string]string{
"build": "Build or rebuild services",
"config": "Validate and view the compose file",
"create": "Create services",
"down": "Stop and remove containers, networks, images, and volumes",
"events": "Receive real time events from containers",
"exec": "Execute a command in a running container",
"kill": "Kill containers",
"logs": "View output from containers",
"pause": "Pause services",
"port": "Print the public port for a port binding",
"ps": "List containers",
"pull": "Pulls service images",
"restart": "Restart services",
"rm": "Remove stopped containers",
"run": "Run a one-off command",
"scale": "Set number of containers for a service",
"start": "Start services",
"stop": "Stop services",
"unpause": "Unpause services",
"up": "Create and start containers",
}
func runComposeCommand(c *cli.Context) error {
if c.NArg() < 1 {
logrus.Fatalln("Missing application name")
}
app, err := compose.ExistingApplication(c.Args()[0])
if err != nil {
logrus.Fatalln("App:", err)
}
err = app.Compose(c.Command.Name, c.Args()[1:]...)
if err != nil {
logrus.Fatalln("Compose:", err)
}
return nil
}
func init() {
for commandName, commandHelp := range composeAppCommands {
command := cli.Command{
Name: commandName,
Action: runComposeCommand,
SkipFlagParsing: true,
Usage: commandHelp,
ArgsUsage: "APP",
Category: "compose",
}
registerCommand(command)
}
}
|
package general_api
import (
"log"
"time"
"net/url"
"strings"
"github.com/tmaiaroto/aegis/lambda"
)
func logger(inner lambda.RouteHandler, name string) lambda.RouteHandler {
return lambda.RouteHandler(func(ctx *lambda.Context, evt *lambda.Event, res *lambda.ProxyResponse, params url.Values) {
start := time.Now()
inner(ctx, evt, res, params)
index := strings.IndexRune(evt.Path, '/')
requestURI := evt.Path[index:]
log.Printf(
"%s\t%s\t%s\t%s",
evt.HTTPMethod,
requestURI,
name,
time.Since(start),
)
})
} |
package main
import "fmt"
func main() {
x := [...]int{1, 2, 3, 4, 5}
y := x[0:2]
z := x[1:4]
fmt.Println(len(y), cap(y), len(z), cap(z))
}
|
package seev
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document03800103 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:seev.038.001.03 Document"`
Message *CorporateActionNarrativeV03 `xml:"CorpActnNrrtv"`
}
func (d *Document03800103) AddMessage() *CorporateActionNarrativeV03 {
d.Message = new(CorporateActionNarrativeV03)
return d.Message
}
// Scope
// The CorporateActionNarrative message is sent between an account servicer and an account owner or its designated agent to cater for tax reclaims, restrictions, documentation requirements. This message is bi-directional.
// Usage
// The message may also be used to:
// - re-send a message previously sent (the sub-function of the message is Duplicate),
// - provide a third party with a copy of a message for information (the sub-function of the message is Copy),
// - re-send to a third party a copy of a message for information (the sub-function of the message is Copy Duplicate),
// using the relevant elements in the business application header (BAH).
// ISO 15022 - 20022 COEXISTENCE
// This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”.
type CorporateActionNarrativeV03 struct {
// General information about the safekeeping account and the account owner.
AccountDetails *iso20022.AccountIdentification14Choice `xml:"AcctDtls,omitempty"`
// Provides information about the securitised right for entitlement.
UnderlyingSecurity *iso20022.SecurityIdentification14 `xml:"UndrlygScty,omitempty"`
// General information about the corporate action event.
CorporateActionGeneralInformation *iso20022.CorporateActionGeneralInformation40 `xml:"CorpActnGnlInf"`
// Provides additional information.
AdditionalInformation *iso20022.UpdatedAdditionalInformation2 `xml:"AddtlInf"`
// Additional information that can not be captured in the structured fields and/or any other specific block.
SupplementaryData []*iso20022.SupplementaryData1 `xml:"SplmtryData,omitempty"`
}
func (c *CorporateActionNarrativeV03) AddAccountDetails() *iso20022.AccountIdentification14Choice {
c.AccountDetails = new(iso20022.AccountIdentification14Choice)
return c.AccountDetails
}
func (c *CorporateActionNarrativeV03) AddUnderlyingSecurity() *iso20022.SecurityIdentification14 {
c.UnderlyingSecurity = new(iso20022.SecurityIdentification14)
return c.UnderlyingSecurity
}
func (c *CorporateActionNarrativeV03) AddCorporateActionGeneralInformation() *iso20022.CorporateActionGeneralInformation40 {
c.CorporateActionGeneralInformation = new(iso20022.CorporateActionGeneralInformation40)
return c.CorporateActionGeneralInformation
}
func (c *CorporateActionNarrativeV03) AddAdditionalInformation() *iso20022.UpdatedAdditionalInformation2 {
c.AdditionalInformation = new(iso20022.UpdatedAdditionalInformation2)
return c.AdditionalInformation
}
func (c *CorporateActionNarrativeV03) AddSupplementaryData() *iso20022.SupplementaryData1 {
newValue := new(iso20022.SupplementaryData1)
c.SupplementaryData = append(c.SupplementaryData, newValue)
return newValue
}
|
package main
import "strings"
func replaceWithUnderscores(text string) string {
replacer := strings.NewReplacer(" ", "_", ",", "_", "\t", "_", ",", "_", "/", "_", "\\", "_", ".", "_", "-", "_", ":", "_", "=", "_")
return replacer.Replace(text)
}
|
package route
import (
"net/http"
"path/filepath"
"strings"
)
type filesSystem struct {
fs http.FileSystem
}
func (fs filesSystem) Open(path string) (http.File, error) {
f, err := fs.fs.Open(path)
if err != nil {
return nil, err
}
s, err := f.Stat()
if err != nil {
return nil, err
}
if s.IsDir() {
index := filepath.Join(path, "index.html")
if _, err := fs.fs.Open(index); err != nil {
closeErr := f.Close()
if closeErr != nil {
return nil, closeErr
}
return nil, err
}
}
return f, nil
}
// FilesServer gives you a (sub)router that serves only files
// from a directory (it hides your direcory structure) - accessing
// a (sub)directory will lead to index.html or StatusNotFound
//
// A full path to served dir is required.
//
// If mounted as a subrouter under certain path XY,
// specify stripPrefix equal to the mounting path XY.
//
// Here is an example usage:
// router := route.New()
// ...
// workDir, _ := os.Getwd()
// dir := filepath.Join(workDir, "relative_path_to_dir")
// router.Mount("/static", route.FileServer("/static", dir))
func FilesServer(stripPrefix string, dir string) Router {
if strings.ContainsAny(stripPrefix, "{}*") {
panic("FileServer does not permit any URL parameters.")
}
router := New()
fileServer := http.FileServer(filesSystem{http.Dir(dir)})
router.Get("/*", func(w http.ResponseWriter, r *http.Request) {
fs := http.StripPrefix(stripPrefix, fileServer)
fs.ServeHTTP(w, r)
})
return router
}
|
package config
import (
"crud-product/constant"
"crud-product/model"
"encoding/json"
"io/ioutil"
)
func GetConfig() (*model.Config, error) {
cfg := &model.Config{}
jsonFile, err := ioutil.ReadFile(constant.ConfigProjectFilepath)
if err != nil {
return nil, err
}
err = json.Unmarshal(jsonFile, &cfg)
return cfg, nil
} |
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-10-14 09:00
# @File : lt_81_Search_in_Rotated_Sorted_Array_II.go
# @Description :
# @Attention :
*/
package array
/*
判断值是否存在
有序数组在某个下标进行了反转,然后判断是否存在
有序中查找,肯定是二分最快
*/
func search(nums []int, target int) bool {
if len(nums) == 0 {
return false
}
halfSearch := func(nums [] int, targe int) bool {
start, end := 0, len(nums)-1
for start <= end {
mid := start + (end-start)>>1
if nums[mid] > target {
end = mid-1
} else if nums[mid] == target {
return true
} else {
start = mid+1
}
}
return false
}
for i := 0; i < len(nums); i++ {
if nums[i] == target {
return true
}
// 说明在此处发生了反转,则进行二分查找
if i-1 >= 0 && nums[i] < nums[i-1] {
return halfSearch(nums[i:], target)
}
}
return false
}
|
package model
import (
"math/rand"
)
const (
SuitSpade Suit = iota
SuitClub
SuitDiamond
SuitHeart
)
const (
JokerNumber Number = 14
InvalidCardNumber Number = -1
)
var (
Suits = []Suit{SuitSpade, SuitClub, SuitDiamond, SuitHeart}
Numbers = []Number{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, JokerNumber}
InvalidCard = Card{Number: InvalidCardNumber}
)
type Suit int
type Number int
type Card struct {
Number Number
Suit Suit
}
func (c *Card) IsJoker() bool {
return c.Number == JokerNumber
}
func (c *Card) IsInvalid() bool {
return c.Number == InvalidCardNumber
}
func (c *Card) SameSuit(o Card) bool {
return c.Suit == o.Suit
}
func (c *Card) SameNumber(o Card) bool {
return c.Number == o.Number
}
type Cards []Card
func (cs Cards) Shuffle(r *rand.Rand) {
r.Shuffle(len(cs), func(i, j int) {
cs[i], cs[j] = cs[j], cs[i]
})
}
func (cs Cards) Top() (Card, bool) {
if l := len(cs); l > 0 {
return cs[l-1], true
}
return InvalidCard, false
}
func (cs *Cards) Contain(target Card) bool {
for _, c := range *cs {
if c == target {
return true
}
}
return false
}
func (cs Cards) Empty() bool {
return len(cs) == 0
}
func (cs *Cards) Add(target Card) {
*cs = append(*cs, target)
}
func (cs *Cards) Remove(target Card) bool {
for i, c := range *cs {
if c == target {
*cs = append((*cs)[:i], (*cs)[i+1:]...)
return true
}
}
return false
}
func (cs *Cards) RemoveTop() {
l := len(*cs)
if l > 0 {
*cs = (*cs)[:l-1]
}
}
func (cs *Cards) Clear() {
*cs = Cards{}
}
|
package main
import (
"strings"
)
// Define the top level swagger defintion structs here.
// These definitions are good enough for parsing goa generated swaggers but definately
// don't reflect the complete swagger spec as of yet.
// Doc holds the swagger data structure
type Doc struct {
SwaggerVersion string `json:"swagger"`
Info Info `json:"info"`
Host string `json:"host"`
BasePath string `json:"basePath"`
Paths map[string]EndpointMap `json:"paths"`
Definitions map[string]*Definition `json:"definitions"`
SecurityDefinitions map[string]*SecurityDefinition `json:"securityDefinitions"`
}
// Info holds additional info from the swagger document
type Info struct {
Title string `json:"title"`
Description string `json:"description"`
Version string `json:"version"`
}
// EndpointMap is a map of http verbs ("get") -> Endpoint
type EndpointMap map[string]*Endpoint
// Endpoint defines an API endpoint
type Endpoint struct {
Tags []string `json:"tags"`
Summary string `json:"summary"`
Description string `json:"description"`
OperationID string `json:"operationId"`
Parameters []*Parameter `json:"parameters"`
Responses map[int]*Response `json:"responses"`
Schemes []string `json:"schemes"`
Security SecurityRefs `json:"security"`
}
// Definition defines a result type
type Definition struct {
Title string `json:"title"`
Type string `json:"type"`
Description string `json:"description"`
Example interface{} `json:"example"`
Properties map[string]*Property `json:"properties"`
Required []string `json:"required"`
Items Ref `json:"items"`
}
// Property is an attribute of a result type
type Property struct {
Fault bool `json:"fault"`
Items Ref `json:"items"`
Description string `json:"description"`
Example interface{} `json:"example"`
Type string `json:"type"`
Enum []interface{} `json:"enum"`
Schema Ref `json:"schema"`
Pattern string `json:"pattern"`
Format string `json:"format"`
Default interface{} `json:"default"`
Minimum interface{} `json:"minimum"`
Maximum interface{} `json:"maximum"`
Ref string `json:"$ref"`
}
// Parameter is an attribute for a request type
type Parameter struct {
Description string `json:"description"`
Name string `json:"name"`
In string `json:"in"`
Required bool `json:"required"`
Type string `json:"type"`
Enum []interface{} `json:"enum"`
Schema Ref `json:"schema"`
Pattern string `json:"pattern"`
Format string `json:"format"`
}
// Ref references a definition
type Ref map[string]interface{}
// Response defines attributes about an HTTP response
type Response struct {
Description string `json:"description"`
Schema Ref `json:"schema"`
Headers map[string]Parameter `json:"headers"`
}
// SecurityDefinition defines the authentication scheme
type SecurityDefinition struct {
Type string `json:"type"`
Description string `json:"description"`
Name string `json:"string"`
In string `json:"in"`
}
// SecurityRefs a series of pointers to security definitions
type SecurityRefs []SecurityRef
// SecurityRef is a single pointer to a security definition
type SecurityRef map[string]interface{}
// Ref gets a definition for a Schema reference, if it exists
func (d *Doc) Ref(r Ref) *Definition {
if refIF, ok := r["$ref"]; ok {
refKey := strings.TrimPrefix(refIF.(string), "#/definitions/")
return d.Definitions[refKey]
}
return nil
}
// Type gets a type for a Schema reference, if it exists
func (r Ref) Type() string {
if _, ok := r["$ref"]; ok {
return "object"
}
if refIF, ok := r["type"]; ok {
return refIF.(string)
}
return ""
}
// Required gets a type for a Schema reference, if it exists
func (r Ref) Required() []string {
if refIF, ok := r["required"]; ok {
return refIF.([]string)
}
return []string{}
}
// ID of the reference
func (r Ref) ID() string {
if refIF, ok := r["$ref"]; ok {
return strings.TrimPrefix(refIF.(string), "#/definitions/")
}
return ""
}
// Service returns the goa.v2 service
func (ep *Endpoint) Service() string {
if len(ep.Tags) > 0 {
return ep.Tags[0]
}
if len(ep.OperationID) > 0 {
return strings.Split(ep.OperationID, "#")[0]
}
return ""
}
// Methods returns the goa.v2 method
func (ep *Endpoint) Method() string {
if strings.Contains(ep.OperationID, "#") {
return strings.Split(ep.OperationID, "#")[1]
}
return ""
}
// IsRef is whether this property is a reference to another object.
func (p *Property) IsRef() bool {
return len(p.Ref) > 0
}
// GetRef will get the object pointed to by this property
func (p *Property) GetRef() Ref {
return Ref{"$ref": p.Ref}
}
|
package main
import (
"fmt"
"os"
"time"
"github.com/iovisor/gobpf/elf"
)
func main() {
module := elf.NewModule("./program.o")
err := module.Load(nil)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to load program from elf: %v\n", err)
os.Exit(1)
}
defer func() {
if err := module.Close(); err != nil {
fmt.Fprintf(os.Stderr, "Failed to close module: %v\n", err)
}
}()
err = module.EnableTracepoint("tracepoint/raw_syscalls/sys_enter")
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to enable tracepoint: %v\n", err)
os.Exit(1)
}
time.Sleep(10 * time.Second)
}
|
package extractors
type VersionExtractor interface {
GetVersion() string
GetAppName() string
}
|
package _744_Find_Smallest_Letter_Greater_Than_Target
import (
"fmt"
"testing"
)
func TestNextGreatestLetter(t *testing.T) {
letters := []byte{'c', 'f', 'j'}
target := byte('z')
fmt.Println(string(nextGreatestLetter(letters, target)))
target = byte('c')
fmt.Println(string(nextGreatestLetter(letters, target)))
}
|
package _14_Longest_Common_Prefix
import (
"testing"
)
func TestLongestCommonPrefix(t *testing.T) {
if ret := longestCommonPrefix([]string{"a", "ab"}); ret != "a" {
t.Error("not a with test1.")
}
}
|
package mars
func runMOV(core *Core, process *process, addrA, addrB int, modifier instructionModifier) {
switch modifier {
case modifierAB:
core.cells[addrB].bField = core.cells[addrA].aField
case modifierB:
core.cells[addrB].bField = core.cells[addrA].bField
case modifierI:
core.cells[addrB] = core.cells[addrA]
default:
panic("Unsupported mode for instruction")
}
process.moveNext(core)
}
func runOP(core *Core, process *process, addrA, addrB int, modifier instructionModifier, op func(*Core, int, int) int) {
switch modifier {
case modifierAB:
core.cells[addrB].bField = op(core, core.cells[addrB].bField, core.cells[addrA].aField)
case modifierB:
core.cells[addrB].bField = op(core, core.cells[addrB].bField, core.cells[addrA].bField)
case modifierF:
core.cells[addrB].aField = op(core, core.cells[addrB].aField, core.cells[addrA].aField)
core.cells[addrB].bField = op(core, core.cells[addrB].bField, core.cells[addrA].bField)
default:
panic("Unsupported mode for instruction")
}
process.moveNext(core)
}
func runJMP(core *Core, process *process, addrA, addrB int, modifier instructionModifier) {
process.threads[process.nextThread] = addrA
process.moveNextThread()
}
func runSPL(core *Core, process *process, address, addrA, addrB int, modifier instructionModifier) {
process.threads[process.nextThread] = (address + 1) % core.size
process.threads = append(process.threads, addrA)
process.moveNextThread()
}
func runJMZ(core *Core, process *process, addrA, addrB int, modifier instructionModifier) {
if core.cells[addrB].bField == 0 {
process.threads[process.nextThread] = addrA
process.moveNextThread()
} else {
process.moveNext(core)
}
}
func runDJN(core *Core, process *process, addrA, addrB int, modifier instructionModifier) {
value := core.clampValue(core.cells[addrB].bField - 1)
core.cells[addrB].bField = value
if value != 0 {
process.threads[process.nextThread] = addrA
process.moveNextThread()
} else {
process.moveNext(core)
}
}
func runSEQ(core *Core, process *process, address, addrA, addrB int, modifier instructionModifier) {
var equal bool
switch modifier {
case modifierAB:
equal = core.cells[addrB].bField == core.cells[addrA].aField
case modifierB:
equal = core.cells[addrB].bField == core.cells[addrA].bField
case modifierI:
equal = core.cells[addrB] == core.cells[addrA]
default:
panic("Unsupported mode for instruction")
}
if equal {
process.threads[process.nextThread] = (address + 2) % core.size
process.moveNextThread()
} else {
process.moveNext(core)
}
}
func (process *process) moveNext(core *Core) {
process.threads[process.nextThread] = (process.threads[process.nextThread] + 1) % core.size
process.nextThread = (process.nextThread + 1) % len(process.threads)
}
func (process *process) moveNextThread() {
process.nextThread = (process.nextThread + 1) % len(process.threads)
}
func (process *process) step(core *Core) {
address := process.threads[process.nextThread]
instruction := core.cells[address]
addrA := core.address(address, instruction.aAddr, instruction.aField)
addrB := core.address(address, instruction.bAddr, instruction.bField)
switch instruction.opcode {
case insnMOV:
runMOV(core, process, addrA, addrB, instruction.modifier)
case insnADD:
runOP(core, process, addrA, addrB, instruction.modifier, func(core *Core, a, b int) int { return core.clampValue(a + b) })
case insnSUB:
runOP(core, process, addrA, addrB, instruction.modifier, func(core *Core, a, b int) int { return core.clampValue(a - b) })
case insnJMP:
runJMP(core, process, addrA, addrB, instruction.modifier)
case insnJMZ:
runJMZ(core, process, addrA, addrB, instruction.modifier)
case insnDJN:
runDJN(core, process, addrA, addrB, instruction.modifier)
case insnCMP:
runSEQ(core, process, address, addrA, addrB, instruction.modifier)
case insnSPL:
runSPL(core, process, address, addrA, addrB, instruction.modifier)
default:
process.removeThread(process.nextThread)
}
}
func (process *process) removeThread(thread int) {
threadCount := len(process.threads) - 1
copy(process.threads[thread:], process.threads[thread+1:])
process.threads = process.threads[0:threadCount]
if process.nextThread >= thread && threadCount > 0 {
process.nextThread = (process.nextThread + threadCount - 1) % threadCount
}
}
|
package handler
import (
"bytes"
"github.com/tealeg/xlsx"
"io/ioutil"
"net/http"
"net/url"
"time"
"tpay_backend/export"
"tpay_backend/merchantapi/internal/common"
_func "tpay_backend/merchantapi/internal/handler/func"
"tpay_backend/merchantapi/internal/logic/export"
"tpay_backend/merchantapi/internal/svc"
"tpay_backend/merchantapi/internal/types"
"github.com/tal-tech/go-zero/rest/httpx"
)
func PayOrderExportHandler(ctx *svc.ServiceContext) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var req types.PayOrderExportRequest
if err := httpx.Parse(r, &req); err != nil {
httpx.Error(w, common.NewCodeErrorWithMsg(common.VerifyParamFailed, err.Error()))
return
}
if req.StartCreateTime == 0 && req.EndCreateTime == 0 {
httpx.Error(w, common.NewCodeError(common.CheckExportTime))
return
}
userId, errs := _func.GetLoginedUserIdRequestHeader(r)
if errs != nil {
httpx.Error(w, errs)
return
}
l := logic.NewPayOrderExportLogic(r.Context(), ctx)
resp, err := l.PayOrderExport(userId, req)
if err != nil {
httpx.Error(w, err)
} else {
if len(resp.List) <= 0 || resp.Total == 0 {
httpx.Error(w, common.NewCodeError(common.NotData))
return
}
var orderList []export.PayOrder
for _, v := range resp.List {
orderList = append(orderList, export.PayOrder{
Id: v.Id,
MerchantName: v.MerchantName,
OrderNo: v.OrderNo,
MerchantOrderNo: v.MerchantOrderNo,
ReqAmount: v.ReqAmount,
PaymentAmount: v.PaymentAmount,
Rate: v.Rate,
SingleFee: v.SingleFee,
Fee: v.Fee,
IncreaseAmount: v.IncreaseAmount,
ChannelName: v.ChannelName,
OrderStatus: v.OrderStatus,
CreateTime: v.CreateTime,
UpdateTime: v.UpdateTime,
})
}
file := xlsx.NewFile()
file, err = export.CreatePayOrderFile(file, &export.CreatePayOrderFileRequest{
Sheet: "Sheet1",
Title: "代收订单导出表",
Timezone: ctx.Config.Timezone,
IsDivideHundred: resp.IsDivideHundred,
Total: &export.PayOrderTotal{
Total: resp.Total,
TotalReqAmount: resp.TotalReqAmount,
TotalPayAmount: resp.TotalPayAmount,
TotalFee: resp.TotalFee,
TotalIncreaseAmount: resp.TotalIncreaseAmount,
},
Content: orderList,
})
if err != nil {
l.Errorf("创建文件失败, err=%v", err)
httpx.Error(w, common.NewCodeError(common.ExportFail))
return
}
var buffer bytes.Buffer
if err := file.Write(&buffer); err != nil {
l.Errorf("文件转换失败, err=%v", err)
httpx.Error(w, common.NewCodeError(common.ExportFail))
return
}
r := bytes.NewReader(buffer.Bytes())
fSrc, err := ioutil.ReadAll(r)
if err != nil {
l.Errorf("文件读取失败, err=%v", err)
httpx.Error(w, common.NewCodeError(common.ExportFail))
return
}
fileName := "代收订单导出表-" + time.Now().Format("2006/01/02") + ".xlsx"
// 防止中文乱码
fileName = url.QueryEscape(fileName)
//w.Header().Add("Content-Type", "application/octet-stream")
//w.Header().Add("Content-Type", "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet")
w.Header().Add("Content-Disposition", "attachment; filename=\""+fileName+"\"")
if _, err := w.Write(fSrc); err != nil {
l.Errorf("文件写入response失败, err=%v", err)
httpx.Error(w, common.NewCodeError(common.ExportFail))
return
}
httpx.Ok(w)
}
}
}
|
package frontend
import (
"encoding/json"
"fmt"
"io"
"net/http"
"github.com/getaceres/payment-demo/persistence"
)
func ReadBody(reader io.Reader, result interface{}) error {
decoder := json.NewDecoder(reader)
if err := decoder.Decode(result); err != nil {
return fmt.Errorf("Invalid payload: %s", err.Error())
}
return nil
}
func Respond(w http.ResponseWriter, code int, payload []byte, contentType string) {
w.Header().Set("Content-Type", contentType)
w.WriteHeader(code)
w.Write(payload)
}
func RespondWithText(w http.ResponseWriter, code int, text string) {
Respond(w, code, []byte(text), "application/text")
}
func RespondWithJSON(w http.ResponseWriter, code int, payload interface{}) error {
content, err := json.Marshal(payload)
if err != nil {
return err
}
Respond(w, code, content, "application/json")
return nil
}
func RespondWithError(w http.ResponseWriter, code int, err error) {
RespondWithText(w, code, err.Error())
}
func GetPersistenceErrorCode(err error) int {
code := http.StatusInternalServerError
switch err.(type) {
case persistence.NotFoundError:
code = http.StatusNotFound
case persistence.AlreadyExistsError:
code = http.StatusConflict
}
return code
}
|
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"time"
"gopkg.in/yaml.v2"
"github.com/eiannone/keyboard"
"tezos-contests.izibi.com/backend/signing"
"tezos-contests.izibi.com/tc-node/api"
"tezos-contests.izibi.com/tc-node/block_store"
"tezos-contests.izibi.com/tc-node/client"
)
type Config struct {
BaseUrl string `yaml:"base_url"`
ApiBaseUrl string `yaml:"api_base"`
StoreBaseUrl string `yaml:"store_base"`
StoreCacheDir string `yaml:"store_dir"`
ApiKey string `yaml:"api_key"`
Task string `yaml:"task"`
KeypairFilename string `yaml:"signing"`
WatchGameUrl string `yaml:"watch_game_url"`
NewGameParams map[string]interface{} `yaml:"new_game_params"`
Bots []client.BotConfig `yaml:"bots"`
LastRoundCommandsSent uint64
Latency time.Duration
TimeDelta time.Duration
}
var config Config
var cl client.Client
var remote *api.Server
var store *block_store.Store
var notifier = &Notifier{}
func main() {
var err error
flag.Parse()
cmd := flag.Args()
/* Load the configuration file. */
notifier.Partial("Loading config.yaml")
err = Configure()
if err != nil { panic(err) }
/* Load the team's key pair */
notifier.Partial("Loading the team's keypair")
var teamKeyPair *signing.KeyPair
teamKeyPair, err = loadKeyPair(config.KeypairFilename)
if err != nil {
teamKeyPair, err = generateKeypair()
if err != nil {
DangerFmt.Printf("failed to generate keypair: %v\n", err)
os.Exit(0)
}
notifier.Final("")
fmt.Print("A new keypair has been saved in ")
SuccessFmt.Println(config.KeypairFilename)
fmt.Printf("Your team's public key: \n ")
ImportantFmt.Printf("%s\n\n", teamKeyPair.Public)
fmt.Printf("\nProvide this key in the team tab of the web interface, and\n")
fmt.Printf("share your team.json file with your teammates\n")
/* Exit because the new key must be associated with a team in the
contest's web interface before we can proceed (will not be able
to connect the event stream until the team key is recognized). */
os.Exit(0)
}
notifier.Final(fmt.Sprintf("Team key: %s", teamKeyPair.Public))
/* Connect to the API, set up the store, and initialize the game client. */
remote = api.New(config.ApiBaseUrl, config.ApiKey, teamKeyPair)
store = block_store.New(config.StoreBaseUrl, config.StoreCacheDir)
cl = client.New(notifier, config.Task, remote, store, teamKeyPair, config.Bots)
/* Check the local time. */
notifier.Partial("Checking the local time")
err = checkTime()
if err != nil {
notifier.Error(err)
os.Exit(0)
}
notifier.Partial("Connecting to the event stream")
var ech <-chan interface{}
ech, err = cl.Connect()
if err != nil {
notifier.Error(err)
DangerFmt.Printf("\nFailed to connect to the event stream.\n\n")
fmt.Printf("Did you link your public key (above) to your team?\n");
os.Exit(0)
}
if len(cmd) == 0 {
/* "tc-node" reloads the current game. */
err = cl.LoadGame()
if err != nil {
notifier.Error(err)
fmt.Printf("Use the new or join commands to recover.\n");
os.Exit(0)
}
notifier.Final("Game loaded")
} else {
switch cmd[0] {
case "new":
/* "tc-node new" creates a new game */
err = cl.NewGame(config.NewGameParams)
if err != nil {
notifier.Error(err)
os.Exit(0)
}
notifier.Final("Game created")
break
case "join":
/* "tc-node join GAME_KEY" joins the specified game */
if len(cmd) < 2 {
DangerFmt.Print("\nUsage: join GAME_KEY\n")
os.Exit(0)
}
err = cl.JoinGame(cmd[1])
if err != nil {
notifier.Error(err)
os.Exit(0)
}
notifier.Final("Game joined")
default:
DangerFmt.Print("\nwat.\n")
os.Exit(0)
}
}
if cl.Game() != nil {
fmt.Printf("Game key: ")
GameKeyFmt.Println(cl.Game().Key)
InteractiveLoop(ech)
}
os.Exit(0)
}
func Configure() error {
var err error
var configFile []byte
configFile, err = ioutil.ReadFile("config.yaml")
if err != nil { return err }
err = yaml.Unmarshal(configFile, &config)
if err != nil { return err }
if config.ApiBaseUrl == "" {
config.ApiBaseUrl = config.BaseUrl + "/backend"
}
if config.StoreBaseUrl == "" {
config.StoreBaseUrl = config.BaseUrl + "/backend/Blocks"
}
if config.WatchGameUrl == "" {
config.WatchGameUrl = config.BaseUrl + "/games"
}
if config.StoreCacheDir == "" {
config.StoreCacheDir = "store"
}
if config.KeypairFilename == "" {
config.KeypairFilename = "team.json"
}
config.StoreCacheDir, err = filepath.Abs(config.StoreCacheDir)
if err != nil { return err }
return nil
}
func InteractiveLoop(ech <-chan interface{}) {
kch := keyboardChannel()
wch, ich := cl.Worker()
defer keyboard.Close()
var ticker *time.Ticker
var tch <-chan time.Time
wch<- client.AlwaysSendCommands()
for {
select {
/* TODO: We get a block event when a new block has been downloaded;
we should test whether the block is current, and keep waiting if not.
We should also bail out on an end-of-game events.
*/
case ev := <-ech:
switch e := ev.(type) {
case client.NewBlockEvent:
wch<- client.SyncThenSendCommands()
case error:
notifier.Error(e)
default:
fmt.Printf("event %v\n", ev)
}
// fmt.Printf("TODO: client worker <- send commands (if needed)\n")
case <-tch:
fmt.Printf("--- automatic end of round ---\n")
ich<- client.EndOfRound()
case kp := <-kch:
switch kp.key {
case 0:
switch kp.ch {
case 0:
return
case 'p', 'P':
ich<- client.Ping()
case 's':
ich<- client.Sync()
case 'S':
ich<- client.SyncThenSendCommands()
case 'a':
// toggle automatic timed end-of-round
if ticker == nil {
ticker = time.NewTicker(15 * time.Second)
tch = ticker.C
fmt.Printf("Automatic play mode enabled\n")
fmt.Printf("Rounds will automatically end every 15 seconds\n")
} else {
ticker.Stop()
ticker = nil
tch = nil
fmt.Printf("Automatic play mode disabled\n")
}
default:
// fmt.Printf("ch '%c'\n", kp.ch)
}
case keyboard.KeyEsc, keyboard.KeyCtrlC:
return
case keyboard.KeySpace:
ich<- client.EndOfRound()
case keyboard.KeyEnter:
fmt.Println("Enter")
ich<- client.AlwaysSendCommands()
default:
fmt.Printf("key %v\n", kp.key)
}
}
}
}
func loadKeyPair (filename string) (*signing.KeyPair, error) {
var err error
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
return signing.ReadKeyPair(f)
}
func generateKeypair() (*signing.KeyPair, error) {
var err error
var kp *signing.KeyPair
kp, err = signing.NewKeyPair()
if err != nil { return nil, err }
file, err := os.OpenFile(config.KeypairFilename, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0644)
if err != nil { return nil, err }
defer file.Close()
err = kp.Write(file)
if err != nil { return nil, err }
return kp, nil
}
func checkTime() error {
ts, err := cl.GetTimeStats()
if err != nil { return err }
// TODO: post results to an API for statistics?
config.TimeDelta = ts.Delta
config.Latency = ts.Latency
return nil
}
/*
var feedback = func (player *BotConfig, source string, err error) {
if err == nil {
fmt.Printf("Local player %d is ready\n", player.Number)
} else {
fmt.Printf("Error for local player %d\n", player.Number)
fmt.Printf("Player %d error\n", player.Number)
switch source {
case "run":
fmt.Printf("Error running command \"%s\"\n", player.CommandLine)
fmt.Println(err.Error())
case "send":
fmt.Printf("Input rejected by server\n")
fmt.Println(err.Error())
}
if cl.remote.LastError != "" {
fmt.Println(cl.remote.LastError)
}
if cl.remote.LastDetails != "" {
fmt.Println(cl.remote.LastDetails)
}
}
}
*/
|
package config
import (
"encoding/json"
"fmt"
"io/ioutil"
)
type Config struct {
Web web `json:"web"`
Database database `json:"database"`
}
type web struct {
Host string `json:"host"`
Port int `json:"port"`
}
type database struct {
Host string `json:"host"`
Password string `json:"password"`
}
func NewConfig(filename string) (*Config, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
fmt.Printf("%s", b)
c := new(Config)
if err := json.Unmarshal(b, &c); err != nil {
return nil, err
}
fmt.Printf("%s\n", c)
return c, nil
}
|
package main
import (
"fmt"
"math/rand"
"sort"
)
type Hero struct {
Name string
Age int
}
type HeroSlice []Hero
func (hs HeroSlice) Len() int {
return len(hs)
}
// less方法就是决定你用什么标准进行排序
// 按照年龄进行排序
func (hs HeroSlice) Less(i, j int) bool {
return hs[i].Age < hs[j].Age
}
func (hs HeroSlice) Swap(i, j int) {
hs[i], hs[j] = hs[j], hs[i]
}
func main() {
// 数组切片排序
var intSlice = []int{0, -1, 10, 7, 90}
// 1. 冒泡排序
// 2. 系统提供的方法排序
sort.Ints(intSlice)
fmt.Println(intSlice)
// 结构体切片排序
// 1. 冒泡排序
// 2. 系统提供的方法排序
var heros HeroSlice
for i := 0; i < 10; i++ {
hero := Hero{
Name: fmt.Sprintf("英雄~%d", rand.Intn(100)),
Age: rand.Intn(100),
}
heros = append(heros, hero)
}
for _, v := range heros {
fmt.Println(v)
}
fmt.Println("排序后--------------------")
sort.Sort(heros)
for _, v := range heros {
fmt.Println(v)
}
}
|
package main
import "fmt"
var java , python bool
func main(){
var temp int
var varcheck1 = true
varcheck2 := true
varcheck3, varcheck4 := 10, "string"
fmt.Println(temp, java, python, varcheck1, varcheck2)
fmt.Println(varcheck3, varcheck4)
}
|
package action
import (
"fmt"
"html/template"
"regexp"
"strings"
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/modules/constant"
"github.com/GoAdminGroup/go-admin/modules/language"
"github.com/GoAdminGroup/go-admin/modules/utils"
template2 "github.com/GoAdminGroup/go-admin/template"
"github.com/GoAdminGroup/go-admin/template/icon"
"github.com/GoAdminGroup/go-admin/template/types"
)
type PopUpAction struct {
BaseAction
Url string
Method string
Id string
Title string
Draggable bool
Width string
Height string
HasIframe bool
HideFooter bool
BtnTitle template.HTML
ParameterJS template.JS
Data AjaxData
Handlers []context.Handler
Event Event
}
func PopUp(id, title string, handler types.Handler) *PopUpAction {
if id == "" {
panic("wrong popup action parameter, empty id")
}
return &PopUpAction{
Url: URL(id),
Title: title,
Method: "post",
BtnTitle: "",
Data: NewAjaxData(),
Id: "info-popup-model-" + utils.Uuid(10),
Handlers: context.Handlers{handler.Wrap()},
Event: EventClick,
}
}
func (pop *PopUpAction) SetData(data map[string]interface{}) *PopUpAction {
pop.Data = pop.Data.Add(data)
return pop
}
func (pop *PopUpAction) SetDraggable() *PopUpAction {
pop.Draggable = true
return pop
}
func (pop *PopUpAction) SetWidth(width string) *PopUpAction {
pop.Width = width
return pop
}
func (pop *PopUpAction) SetHeight(height string) *PopUpAction {
pop.Height = height
return pop
}
func (pop *PopUpAction) SetParameterJS(parameterJS template.JS) *PopUpAction {
pop.ParameterJS += parameterJS
return pop
}
func (pop *PopUpAction) SetUrl(url string) *PopUpAction {
pop.Url = url
return pop
}
type IframeData struct {
Width string
Height string
Src string
AddParameterFn func(ctx *context.Context) string
}
func PopUpWithIframe(id, title string, data IframeData, width, height string) *PopUpAction {
if id == "" {
panic("wrong popup action parameter, empty id")
}
if data.Width == "" {
data.Width = "100%"
}
if data.Height == "" {
data.Height = "100%"
}
if strings.Contains(data.Src, "?") {
data.Src = data.Src + "&"
} else {
data.Src = data.Src + "?"
}
modalID := "info-popup-model-" + utils.Uuid(10)
var handler types.Handler = func(ctx *context.Context) (success bool, msg string, res interface{}) {
param := ""
if data.AddParameterFn != nil {
param = data.AddParameterFn(ctx)
}
return true, "ok", fmt.Sprintf(`<iframe style="width:%s;height:%s;"
scrolling="auto"
allowtransparency="true"
frameborder="0"
src="%s__goadmin_iframe=true&__go_admin_no_animation_=true&__goadmin_iframe_id=%s`+param+`"><iframe>`,
data.Width, data.Height, data.Src, modalID)
}
return &PopUpAction{
Url: URL(id),
Title: title,
Method: "post",
BtnTitle: "",
Height: height,
HasIframe: true,
HideFooter: isFormURL(data.Src),
Width: width,
Draggable: true,
Data: NewAjaxData(),
Id: modalID,
Handlers: context.Handlers{handler.Wrap()},
Event: EventClick,
}
}
type PopUpData struct {
Id string
Title string
Width string
Height string
}
type GetForm func(panel *types.FormPanel) *types.FormPanel
type GetCtxForm func(ctx *context.Context, panel *types.FormPanel) *types.FormPanel
var operationHandlerSetter context.NodeProcessor
func InitOperationHandlerSetter(p context.NodeProcessor) {
operationHandlerSetter = p
}
func PopUpWithForm(data PopUpData, fn GetForm, url string) *PopUpAction {
if data.Id == "" {
panic("wrong popup action parameter, empty id")
}
modalID := "info-popup-model-" + utils.Uuid(10)
var handler types.Handler = func(ctx *context.Context) (success bool, msg string, res interface{}) {
col1 := template2.Default().Col().GetContent()
btn1 := template2.Default().Button().SetType("submit").
SetContent(language.GetFromHtml("Save")).
SetThemePrimary().
SetOrientationRight().
SetLoadingText(icon.Icon("fa-spinner fa-spin", 2) + language.GetFromHtml("Save")).
GetContent()
btn2 := template2.Default().Button().SetType("reset").
SetContent(language.GetFromHtml("Reset")).
SetThemeWarning().
SetOrientationLeft().
GetContent()
col2 := template2.Default().Col().SetSize(types.SizeMD(8)).
SetContent(btn1 + btn2).GetContent()
panel := fn(types.NewFormPanel())
operationHandlerSetter(panel.Callbacks...)
fields, tabFields, tabHeaders := panel.GetNewFormFields()
return true, "ok", template2.Default().Box().
SetHeader("").
SetBody(template2.Default().Form().
SetContent(fields).
SetTabHeaders(tabHeaders).
SetTabContents(tabFields).
SetAjax(panel.AjaxSuccessJS, panel.AjaxErrorJS).
SetPrefix(config.PrefixFixSlash()).
SetUrl(url).
SetOperationFooter(col1 + col2).GetContent()).
SetStyle(template.HTMLAttr(`overflow-x: hidden;overflow-y: hidden;`)).
GetContent()
}
return &PopUpAction{
Url: URL(data.Id),
Title: data.Title,
Method: "post",
BtnTitle: "",
HideFooter: true,
Height: data.Height,
Width: data.Width,
Draggable: true,
Data: NewAjaxData(),
Id: modalID,
Handlers: context.Handlers{handler.Wrap()},
Event: EventClick,
}
}
func PopUpWithCtxForm(data PopUpData, fn GetCtxForm, url string) *PopUpAction {
if data.Id == "" {
panic("wrong popup action parameter, empty id")
}
modalID := "info-popup-model-" + utils.Uuid(10)
var handler types.Handler = func(ctx *context.Context) (success bool, msg string, res interface{}) {
col1 := template2.Default().Col().GetContent()
btn1 := template2.Default().Button().SetType("submit").
SetContent(language.GetFromHtml("Save")).
SetThemePrimary().
SetOrientationRight().
SetLoadingText(icon.Icon("fa-spinner fa-spin", 2) + language.GetFromHtml("Save")).
GetContent()
btn2 := template2.Default().Button().SetType("reset").
SetContent(language.GetFromHtml("Reset")).
SetThemeWarning().
SetOrientationLeft().
GetContent()
col2 := template2.Default().Col().SetSize(types.SizeMD(8)).
SetContent(btn1 + btn2).GetContent()
panel := fn(ctx, types.NewFormPanel())
operationHandlerSetter(panel.Callbacks...)
fields, tabFields, tabHeaders := panel.GetNewFormFields()
return true, "ok", template2.Default().Box().
SetHeader("").
SetBody(template2.Default().Form().
SetContent(fields).
SetTabHeaders(tabHeaders).
SetTabContents(tabFields).
SetAjax(panel.AjaxSuccessJS, panel.AjaxErrorJS).
SetPrefix(config.PrefixFixSlash()).
SetUrl(url).
SetOperationFooter(col1 + col2).GetContent()).
GetContent()
}
return &PopUpAction{
Url: URL(data.Id),
Title: data.Title,
Method: "post",
BtnTitle: "",
HideFooter: true,
Height: data.Height,
Width: data.Width,
Draggable: true,
Data: NewAjaxData(),
Id: modalID,
Handlers: context.Handlers{handler.Wrap()},
Event: EventClick,
}
}
func (pop *PopUpAction) SetBtnTitle(title template.HTML) *PopUpAction {
pop.BtnTitle = title
return pop
}
func (pop *PopUpAction) SetEvent(event Event) *PopUpAction {
pop.Event = event
return pop
}
func (pop *PopUpAction) SetMethod(method string) *PopUpAction {
pop.Method = method
return pop
}
func (pop *PopUpAction) GetCallbacks() context.Node {
return context.Node{
Path: pop.Url,
Method: pop.Method,
Handlers: pop.Handlers,
Value: map[string]interface{}{constant.ContextNodeNeedAuth: 1},
}
}
func (pop *PopUpAction) Js() template.JS {
return template.JS(`$('`+pop.BtnId+`').on('`+string(pop.Event)+`', function (event) {
let data = `+pop.Data.JSON()+`;
`) + pop.ParameterJS + template.JS(`
let id = $(this).attr("data-id");
if (id && id !== "") {
data["id"] = id;
}
data['popup_id'] = "`+pop.Id+`"
$.ajax({
method: '`+pop.Method+`',
url: "`+pop.Url+`",
data: data,
success: function (data) {
if (typeof (data) === "string") {
data = JSON.parse(data);
}
if (data.code === 0) {
$('#`+pop.Id+` .modal-body').html(data.data);
} else {
swal(data.msg, '', 'error');
}
},
error: function (data) {
if (data.responseText !== "") {
swal(data.responseJSON.msg, '', 'error');
} else {
swal('error', '', 'error');
}
setTimeout(function() {
$('#`+pop.Id+`').hide();
$('.modal-backdrop.fade.in').hide();
}, 500)
},
});
});`)
}
func (pop *PopUpAction) BtnAttribute() template.HTML {
return template.HTML(`data-toggle="modal" data-target="#` + pop.Id + ` " data-id="{{.Id}}" style="cursor: pointer;"`)
}
func (pop *PopUpAction) FooterContent() template.HTML {
up := template2.Default().Popup().SetID(pop.Id).
SetTitle(template.HTML(pop.Title)).
SetFooter(pop.BtnTitle).
SetWidth(pop.Width).
SetHeight(pop.Height).
SetBody(template.HTML(``))
if pop.Draggable {
if pop.HideFooter {
up = up.SetHideFooter()
}
return up.SetDraggable().GetContent()
}
return up.GetContent()
}
func isFormURL(s string) bool {
reg, _ := regexp.Compile("(.*)info/(.*)/(new|edit)(.*?)")
return reg.MatchString(s)
}
|
// Accessing fields of a struct
// To access individual fields of a struct you have to use dot (.) operator.
// Golang program to show how to
// access the fields of struct
package main
import "fmt"
// defining the struct
type Car struct {
Name, Model, Color string
WeightInKg float64
}
// Main Function
func main() {
c := Car{Name: "Ferrari", Model: "GTC4",
Color: "Red", WeightInKg: 1920}
// Accessing struct fields
// using the dot operator
fmt.Println("Car Name: ", c.Name)
fmt.Println("Car Color: ", c.Color)
// Assigning a new value
// to a struct field
c.Color = "Black"
// Displaying the result
fmt.Println("Car: ", c)
}
|
package config
import (
"fmt"
"log"
"os"
)
const (
mongoDBURIStr = "mongodb://%s:%s@%s/?authSource=admin&readPreference=primary&ssl=false"
)
var (
//MongoDBURI ...
MongoDBURI string
//RedisURI ...
RedisURI string
//KafkaHost ...
KafkaHost string
//EmpAPILogger ...
EmpAPILogger *log.Logger
)
//InitializeAppConfig ...
func InitializeAppConfig() {
//logger
EmpAPILogger = log.New(os.Stdout, "employee-api : ", log.LstdFlags)
//mongo db config
dbServer := os.Getenv("MONGODB_SERVER")
dbUsername, dbPassword := os.Getenv("MONGODB_ADMINUSERNAME"), os.Getenv("MONGODB_ADMINPASSWORD")
MongoDBURI = fmt.Sprintf(mongoDBURIStr, dbUsername, dbPassword, dbServer)
EmpAPILogger.Printf("mongodb server URI is : %s", dbServer)
//redis config
RedisURI = fmt.Sprintf("%s:%s", os.Getenv("REDIS_SERVER"), os.Getenv("REDIS_PORT"))
//kafka config
KafkaHost = os.Getenv("KAFKA_SERVER")
EmpAPILogger.Printf("redis, kafka : %s %s", RedisURI, KafkaHost)
}
|
// Copyright (c) 2019 Leonardo Faoro. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package security
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewToken(t *testing.T) {
assert.Panics(t, func() {
NewToken(4, false)
})
tlen := 32
tok := NewToken(tlen, false)
assert.Equal(t, len(tok), tlen, "token length")
assert.Contains(t, tok, "tok_", nil)
t.Log("token:", tok)
ttok := NewToken(tlen, true)
assert.Contains(t, ttok, "tok_test_", nil)
}
|
package mocks
import (
"strings"
"github.com/stretchr/testify/mock"
)
type ExecCmd struct {
mock.Mock
Args []string
}
func (cmd *ExecCmd) Run() error {
args := cmd.Called()
return args.Error(0)
}
func (cmd *ExecCmd) CombinedOutput() ([]byte, error) {
args := cmd.Called()
return args.Get(0).([]byte), args.Error(1)
}
func (cmd *ExecCmd) AttachStdIO() {
cmd.Called()
}
func (cmd *ExecCmd) ArgsString() string {
return strings.Join(cmd.Args, " ")
}
|
package main
import (
"flag"
"fmt"
"time"
"github.com/PuerkitoBio/goquery"
"github.com/gotokatsuya/incidents/util/slack"
)
var (
incidentID int
slackURL string
cacheIncidentInfoMap map[string]struct{}
)
func init() {
flag.IntVar(&incidentID, "i", 18022, "incident")
flag.StringVar(&slackURL, "u", "", "slack url")
cacheIncidentInfoMap = make(map[string]struct{})
}
func postFetchedIncidentInfo(url string) {
doc, _ := goquery.NewDocument(url)
doc.Find("table > tbody > tr > td").Each(func(_ int, s *goquery.Selection) {
msg := s.Text()
if _, ok := cacheIncidentInfoMap[msg]; ok {
return
}
fmt.Println(msg)
if len(slackURL) != 0 {
slack.Post(slack.Request{
Text: msg,
Username: "bigquery incidents",
IconEmoji: ":see_no_evil:",
}, slackURL)
}
cacheIncidentInfoMap[msg] = struct{}{}
})
}
func main() {
url := fmt.Sprintf("https://status.cloud.google.com/incident/bigquery/%d", incidentID)
for {
postFetchedIncidentInfo(url)
time.Sleep(1 * time.Minute)
}
}
|
package main
import (
"context"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"strconv"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
const (
authServerURL = "http://localhost:9096"
)
var (
config = oauth2.Config{
ClientID: "222222",
ClientSecret: "22222222",
Scopes: []string{"all"},
RedirectURL: "http://localhost:9094/oauth2",
Endpoint: oauth2.Endpoint{
AuthURL: authServerURL + "/oauth/authorize",
TokenURL: authServerURL + "/oauth/token",
},
}
globalToken *oauth2.Token // Non-concurrent security
)
// DY mod START
// To measure execution time
// var mStartTime Time
// var mElapsedTime Time
// DY mod END
var mStartTime = time.Now()
var mElapsedTime = time.Since(mStartTime)
var numTokenCreation int64 = 0
var numResourceAccess int64 = 0
func main() {
// DY mod START
if len(os.Args) < 3 {
log.Printf("Argument Error\n")
return
}
numTokenCreation, _ := strconv.Atoi(os.Args[1])
numResourceAccess, _ := strconv.Atoi(os.Args[2])
log.Printf("numTokenCreation : %d \n", numTokenCreation)
log.Printf("numResourceAccess :%d \n", numResourceAccess)
// DY mod END
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
// DY mod START
if numTokenCreation == 0 {
return
}
mStartTime = time.Now()
// DY mod END
u := config.AuthCodeURL("xyz",
oauth2.SetAuthURLParam("code_challenge", genCodeChallengeS256("s256example")),
oauth2.SetAuthURLParam("code_challenge_method", "S256"))
http.Redirect(w, r, u, http.StatusFound)
})
http.HandleFunc("/oauth2", func(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
state := r.Form.Get("state")
if state != "xyz" {
http.Error(w, "State invalid", http.StatusBadRequest)
return
}
code := r.Form.Get("code")
if code == "" {
http.Error(w, "Code not found", http.StatusBadRequest)
return
}
// DY mod START
mElapsedTime = time.Since(mStartTime)
// log.Printf("Ctime : %d: %s", numTokenCreation, mElapsedTime)
mStartTime = time.Now()
// DY mod END
token, err := config.Exchange(context.Background(), code, oauth2.SetAuthURLParam("code_verifier", "s256example"))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
globalToken = token
// DY mod START
mElapsedTime = time.Since(mStartTime)
log.Printf("Ttime : %d: %s", numTokenCreation, mElapsedTime)
numTokenCreation -= 1
// DY mod END
e := json.NewEncoder(w)
e.SetIndent("", " ")
e.Encode(token)
// DY mod START
time.Sleep(time.Millisecond)
http.Get("http://localhost:9094/")
// DY mod END
})
http.HandleFunc("/refresh", func(w http.ResponseWriter, r *http.Request) {
if globalToken == nil {
http.Redirect(w, r, "/", http.StatusFound)
return
}
globalToken.Expiry = time.Now()
token, err := config.TokenSource(context.Background(), globalToken).Token()
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
globalToken = token
e := json.NewEncoder(w)
e.SetIndent("", " ")
e.Encode(token)
})
http.HandleFunc("/try", func(w http.ResponseWriter, r *http.Request) {
// DY mod START
if numResourceAccess == 0 {
return
}
mStartTime = time.Now()
// DY mod END
if globalToken == nil {
http.Redirect(w, r, "/", http.StatusFound)
return
}
resp, err := http.Get(fmt.Sprintf("%s/test?access_token=%s", authServerURL, globalToken.AccessToken))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
defer resp.Body.Close()
io.Copy(w, resp.Body)
// DY mod START
mElapsedTime = time.Since(mStartTime)
log.Printf("Rtime : %d: %s", numResourceAccess, mElapsedTime)
// log.Printf("Rtime : %s", mElapsedTime)
numResourceAccess -= 1
time.Sleep(time.Millisecond)
http.Get("http://localhost:9094/try")
// DY mod END
})
http.HandleFunc("/pwd", func(w http.ResponseWriter, r *http.Request) {
token, err := config.PasswordCredentialsToken(context.Background(), "test", "test")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
globalToken = token
e := json.NewEncoder(w)
e.SetIndent("", " ")
e.Encode(token)
})
http.HandleFunc("/client", func(w http.ResponseWriter, r *http.Request) {
cfg := clientcredentials.Config{
ClientID: config.ClientID,
ClientSecret: config.ClientSecret,
TokenURL: config.Endpoint.TokenURL,
}
token, err := cfg.Token(context.Background())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
e := json.NewEncoder(w)
e.SetIndent("", " ")
e.Encode(token)
})
log.Println("Client is running at 9094 port.Please open http://localhost:9094")
log.Fatal(http.ListenAndServe(":9094", nil))
}
func genCodeChallengeS256(s string) string {
s256 := sha256.Sum256([]byte(s))
return base64.URLEncoding.EncodeToString(s256[:])
}
|
package main
import "fmt"
import "sort"
func main() {
votes := []int{3, 1, 1, 3, 1}
k := 2
//votes := make([]int, len(votes))
//var winners []int
count := 0
sort.Ints(votes)
if k == 0 {
// sort.Sort(votes)
if votes[len(votes)-1] == votes[len(votes)-2] {
fmt.Println("0")
return
}
}
// copy(votes, votes)
for i := 0; i < len(votes); i++ {
//temp := 0
// sort.Ints(votes)
if votes[i]+k < votes[len(votes)-1] {
continue
} else if votes[i]+k > votes[len(votes)-1] {
count++
continue
}
if len(votes) > 2 && i == len(votes)-1 {
if votes[len(votes)-1] > votes[len(votes)-2] {
count++
}
}
}
fmt.Println(count)
// fmt.Println(winners)
}
|
package hzutils
import "html"
// HTMLPre return html of string.
// @param shtml
// @return string
func HTMLPre(shtml string) string {
return `<html>` + html.EscapeString(shtml) + `</html>`
}
|
// This package contains tests related to dnf-json and rpmmd package.
// +build integration
package main
import (
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/osbuild/osbuild-composer/internal/blueprint"
"github.com/osbuild/osbuild-composer/internal/distro"
"github.com/osbuild/osbuild-composer/internal/distro/fedora33"
"github.com/osbuild/osbuild-composer/internal/rpmmd"
"github.com/osbuild/osbuild-composer/internal/test"
)
func TestFetchChecksum(t *testing.T) {
dir, err := test.SetUpTemporaryRepository()
defer func(dir string) {
err := test.TearDownTemporaryRepository(dir)
assert.Nil(t, err, "Failed to clean up temporary repository.")
}(dir)
assert.Nilf(t, err, "Failed to set up temporary repository: %v", err)
repoCfg := rpmmd.RepoConfig{
Name: "repo",
BaseURL: fmt.Sprintf("file://%s", dir),
IgnoreSSL: true,
}
// use a fullpath to dnf-json, this allows this test to have an arbitrary
// working directory
rpmMetadata := rpmmd.NewRPMMD(path.Join(dir, "rpmmd"), "/usr/libexec/osbuild-composer/dnf-json")
_, c, err := rpmMetadata.FetchMetadata([]rpmmd.RepoConfig{repoCfg}, "platform:f31", "x86_64")
assert.Nilf(t, err, "Failed to fetch checksum: %v", err)
assert.NotEqual(t, "", c["repo"], "The checksum is empty")
}
// This test loads all the repositories available in /repositories directory
// and tries to run depsolve for each architecture. With N architectures available
// this should run cross-arch dependency solving N-1 times.
func TestCrossArchDepsolve(t *testing.T) {
// Load repositories from the definition we provide in the RPM package
repoDir := "/usr/share/osbuild-composer"
// NOTE: we can add RHEL, but don't make it hard requirement because it will fail outside of VPN
for _, distroStruct := range []distro.Distro{fedora33.New()} {
t.Run(distroStruct.Name(), func(t *testing.T) {
// Run tests in parallel to speed up run times.
t.Parallel()
// Set up temporary directory for rpm/dnf cache
dir, err := ioutil.TempDir("/tmp", "rpmmd-test-")
require.Nilf(t, err, "Failed to create tmp dir for depsolve test: %v", err)
defer os.RemoveAll(dir)
// use a fullpath to dnf-json, this allows this test to have an arbitrary
// working directory
rpm := rpmmd.NewRPMMD(dir, "/usr/libexec/osbuild-composer/dnf-json")
repos, err := rpmmd.LoadRepositories([]string{repoDir}, distroStruct.Name())
require.NoErrorf(t, err, "Failed to LoadRepositories %v", distroStruct.Name())
for _, archStr := range distroStruct.ListArches() {
t.Run(archStr, func(t *testing.T) {
arch, err := distroStruct.GetArch(archStr)
require.NoError(t, err)
for _, imgTypeStr := range arch.ListImageTypes() {
t.Run(imgTypeStr, func(t *testing.T) {
imgType, err := arch.GetImageType(imgTypeStr)
require.NoError(t, err)
packages := imgType.PackageSets(blueprint.Blueprint{})
_, _, err = rpm.Depsolve(packages["build-packages"], repos[archStr], distroStruct.ModulePlatformID(), archStr)
assert.NoError(t, err)
_, _, err = rpm.Depsolve(packages["packages"], repos[archStr], distroStruct.ModulePlatformID(), archStr)
assert.NoError(t, err)
})
}
})
}
})
}
}
|
package logger
import (
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
"os"
)
type LogConfig struct {
Develop bool `json:"develop"`
Level string `json:"level"`
Structured bool `json:"structured"`
Path string `json:"path"`
ErrorPath string `json:"errorPath"`
MaxFileSize int `json:"maxFileSize"` // megabytes
MaxBackups int `json:"maxBackups"`
}
func NewRotateLogger(logConfig LogConfig) *zap.Logger {
var zapLogger *zap.Logger
encoderCfg := zap.NewProductionEncoderConfig()
encoderCfg.EncodeTime = zapcore.ISO8601TimeEncoder
if logConfig.Develop {
encoderCfg.EncodeLevel = zapcore.CapitalColorLevelEncoder
}
var encoder zapcore.Encoder
if logConfig.Structured {
encoder = zapcore.NewJSONEncoder(encoderCfg)
} else {
encoder = zapcore.NewConsoleEncoder(encoderCfg)
}
hook := lumberjack.Logger{
Filename: logConfig.Path,
MaxSize: logConfig.MaxFileSize,
MaxBackups: logConfig.MaxBackups,
}
errorHook := lumberjack.Logger{
Filename: logConfig.ErrorPath,
MaxSize: logConfig.MaxFileSize,
MaxBackups: logConfig.MaxBackups,
}
fileWriter := zapcore.AddSync(&hook)
errorFileWriter := zapcore.AddSync(&errorHook)
consoleWriter := zapcore.Lock(os.Stdout)
zapLevel := new(zapcore.Level)
zapLevel.Set(logConfig.Level)
levelEnable := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
return lvl >= *zapLevel
})
errLevelEnable := zap.LevelEnablerFunc(func(lvl zapcore.Level) bool {
return lvl >= zapcore.ErrorLevel
})
var core zapcore.Core
if logConfig.Develop {
core = zapcore.NewTee(
zapcore.NewCore(encoder, consoleWriter, levelEnable),
zapcore.NewCore(encoder, fileWriter, levelEnable),
zapcore.NewCore(encoder, errorFileWriter, errLevelEnable),
)
} else {
core = zapcore.NewTee(
zapcore.NewCore(encoder, fileWriter, levelEnable),
zapcore.NewCore(encoder, errorFileWriter, errLevelEnable),
)
}
zapLogger = zap.New(core, zap.AddCaller(), zap.AddCallerSkip(1), zap.AddStacktrace(zapcore.DPanicLevel))
return zapLogger
}
|
package entity
type Vehicle interface {
Cambio() string
}
|
package updatetodb
import (
"errorhandlers"
"errors"
"storage"
)
/*StoreUser handles
saving the user object to the db
*/
func UpdateUserNickname(id int, newNickname string) error {
var db = storage.GetDb()
stmt, err := db.Prepare("UPDATE users " +
"SET nickname = ? " +
"WHERE id = ?;")
if err != nil {
errorhandlers.LogError(err, "Error updating user nickname")
return errors.New("Prepare statement update user nickname failed")
}
_, err = stmt.Exec(
newNickname,
id)
if err != nil {
errorhandlers.LogError(err, "Error updating user nickname")
return errors.New("Exec statement update user nickname failed")
}
return nil
}
|
// A part of go-tour
package main
// +build ignore
import (
"fmt"
)
type Fetcher interface {
// Fetch returns the body of URL and a slice of URLs fond on that page.
Fetch(url string) (body string, urls []string, err error)
}
// Crawl uses fetcher to recursively crawl pages starting with url,
// to a maximum of depth.
func CrawlImpl(
url string, depth int, fetcher Fetcher, m map[string]bool,
finish chan int) {
if depth <= 0 {
finish <- 0
return
}
body, urls, err := fetcher.Fetch(url)
if err != nil {
fmt.Println(err)
finish <- 0
return
}
fmt.Printf("found: %s %q\n", url, body)
channels := []chan int{}
for _, u := range urls {
if !m[u] {
m[u] = true
ch := make(chan int)
go CrawlImpl(u, depth-1, fetcher, m, ch)
channels = append(channels, ch)
}
}
for _, ch := range channels {
<-ch
}
finish <- 0
}
func Crawl(url string, depth int, fetcher Fetcher) {
m := make(map[string]bool)
ch := make(chan int)
go CrawlImpl(url, depth, fetcher, m, ch)
<-ch
}
func main() {
Crawl("http://golang.org/", 4, fetcher)
}
// fakeFetcher is Fetcher that returns canned results.
type fakeFetcher map[string]*fakeResult
type fakeResult struct {
body string
urls []string
}
func (f fakeFetcher) Fetch(url string) (string, []string, error) {
if res, ok := f[url]; ok {
return res.body, res.urls, nil
}
return "", nil, fmt.Errorf("not found: %s", url)
}
var fetcher = fakeFetcher{
"http://golang.org/": &fakeResult{
"The Go programming Language",
[]string{
"http://golang.org/pkg/",
"http://golang.org/cmd/",
},
},
"http://golang.org/pkg/": &fakeResult{
"Packages",
[]string{
"http://golang.org/",
"http://golang.org/cmd/",
"http://golang.org/pkg/fmt/",
"http://golang.org/pkg/os/",
},
},
"http://golang.org/pkg/fmt/": &fakeResult{
"Package fmt",
[]string{
"http://golang.org/",
"http://golang.org/pkg/",
},
},
"http://golang.org/pkg/os/": &fakeResult{
"Package os",
[]string{
"http://golang.org/",
"http://golang.org/pkg/",
},
},
}
|
package datamodel
import (
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/modules/db"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/table"
"github.com/GoAdminGroup/go-admin/template/types/form"
)
// GetAuthorsTable return the model of table author.
func GetAuthorsTable(ctx *context.Context) (authorsTable table.Table) {
authorsTable = table.NewDefaultTable(table.DefaultConfig())
// connect your custom connection
// authorsTable = table.NewDefaultTable(table.DefaultConfigWithDriverAndConnection("mysql", "admin"))
info := authorsTable.GetInfo()
info.AddField("ID", "id", db.Int).FieldSortable()
info.AddField("First Name", "first_name", db.Varchar)
info.AddField("Last Name", "last_name", db.Varchar)
info.AddField("Email", "email", db.Varchar)
info.AddField("Birthdate", "birthdate", db.Date)
info.AddField("Added", "added", db.Timestamp)
info.SetTable("authors").SetTitle("Authors").SetDescription("Authors")
formList := authorsTable.GetForm()
formList.AddField("ID", "id", db.Int, form.Default).FieldDisplayButCanNotEditWhenUpdate().FieldDisableWhenCreate()
formList.AddField("First Name", "first_name", db.Varchar, form.Text)
formList.AddField("Last Name", "last_name", db.Varchar, form.Text)
formList.AddField("Email", "email", db.Varchar, form.Text)
formList.AddField("Birthdate", "birthdate", db.Date, form.Text)
formList.AddField("Added", "added", db.Timestamp, form.Text)
formList.SetTable("authors").SetTitle("Authors").SetDescription("Authors")
return
}
|
package levenshtein
import (
"testing"
)
// LevRec method benchmarks.
func BenchmarkRecursiveLen5(b *testing.B) {
s1 := "about"
s2 := "above"
for i := 0; i < b.N; i++ {
LevRec(s1, s2)
}
}
func BenchmarkRecursiveLen10(b *testing.B) {
s1 := "abbanition"
s2 := "abaptiston"
for i := 0; i < b.N; i++ {
LevRec(s1, s2)
}
}
func BenchmarkRecursiveLen15(b *testing.B) {
s1 := "characteristics"
s2 := "recommendations"
for i := 0; i < b.N; i++ {
LevRec(s1, s2)
}
}
// LevMtrRec method becnhmarks.
func BenchmarkRecursiveMatrixLen10(b *testing.B) {
s1 := "abbanition"
s2 := "abaptiston"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
func BenchmarkRecursiveMatrixLen20(b *testing.B) {
s1 := "abdominohysterectomy"
s2 := "acetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
func BenchmarkRecursiveMatrixLen30(b *testing.B) {
s1 := "chlorobenzylidenemalononitrile"
s2 := "abdominalexternalobliquemuscle"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
func BenchmarkRecursiveMatrixLen50(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
func BenchmarkRecursiveMatrixLen100(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
func BenchmarkRecursiveMatrixLen200(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtrRec(s1, s2)
}
}
// LevMtr method benchmarks.
func BenchmarkIterativeMatrixLen10(b *testing.B) {
s1 := "abbanition"
s2 := "abaptiston"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
func BenchmarkIterativeMatrixLen20(b *testing.B) {
s1 := "abdominohysterectomy"
s2 := "acetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
func BenchmarkIterativeMatrixLen30(b *testing.B) {
s1 := "chlorobenzylidenemalononitrile"
s2 := "abdominalexternalobliquemuscle"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
func BenchmarkIterativeMatrixLen50(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
func BenchmarkIterativeMatrixLen100(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
func BenchmarkIterativeMatrixLen200(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
LevMtr(s1, s2)
}
}
// DamLevMtr method benchmarks.
func BenchmarkDamerauLevenshteinLen10(b *testing.B) {
s1 := "abbanition"
s2 := "abaptiston"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
func BenchmarkDamerauLevenshteinLen20(b *testing.B) {
s1 := "abdominohysterectomy"
s2 := "acetylcholinesterase"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
func BenchmarkDamerauLevenshteinLen30(b *testing.B) {
s1 := "chlorobenzylidenemalononitrile"
s2 := "abdominalexternalobliquemuscle"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
func BenchmarkDamerauLevenshteinLen50(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
func BenchmarkDamerauLevenshteinLen100(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
func BenchmarkDamerauLevenshteinLen200(b *testing.B) {
s1 := "chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy" +
"chlorobenzylidenemalononitrileabdominohysterectomy"
s2 := "abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase" +
"abdominalexternalobliquemuscleacetylcholinesterase"
for i := 0; i < b.N; i++ {
DamLevMtr(s1, s2)
}
}
|
package build
import (
"context"
"github.com/werf/logboek"
"github.com/werf/logboek/pkg/style"
"github.com/werf/logboek/pkg/types"
)
type ExportPhase struct {
BasePhase
ExportPhaseOptions
}
type ExportPhaseOptions struct {
ExportTagFuncList []func(string) string
}
func NewExportPhase(c *Conveyor, opts ExportPhaseOptions) *ExportPhase {
return &ExportPhase{
BasePhase: BasePhase{c},
ExportPhaseOptions: opts,
}
}
func (phase *ExportPhase) Name() string {
return "export"
}
func (phase *ExportPhase) AfterImageStages(ctx context.Context, img *Image) error {
if img.isArtifact {
return nil
}
if err := phase.exportLastStageImage(ctx, img); err != nil {
return err
}
return nil
}
func (phase *ExportPhase) exportLastStageImage(ctx context.Context, img *Image) error {
if len(phase.ExportTagFuncList) == 0 {
return nil
}
return logboek.Context(ctx).Default().LogProcess("Exporting image...").
Options(func(options types.LogProcessOptionsInterface) {
options.Style(style.Highlight())
}).
DoError(func() error {
for _, tagFunc := range phase.ExportTagFuncList {
tag := tagFunc(img.GetName())
if err := logboek.Context(ctx).Default().LogProcess("tag %s", tag).
DoError(func() error {
stageDesc := img.GetLastNonEmptyStage().GetImage().GetStageDescription()
if err := phase.Conveyor.StorageManager.GetStagesStorage().ExportStage(ctx, stageDesc, tag); err != nil {
return err
}
return nil
}); err != nil {
return err
}
}
return nil
})
}
func (phase *ExportPhase) Clone() Phase {
u := *phase
return &u
}
|
package log
import (
"log"
"os"
"runtime"
)
// now we need log promptly
//var logger *zap.SugaredLogger
var logger *log.Logger
func init() {
logger = log.New(os.Stdout, "", log.Lshortfile)
}
func Info(a ...interface{}) {
logger.Println(a...)
}
func Infof(format string, a ...interface{}) {
logger.Printf(format, a...)
}
func Error(a ...interface{}) {
_, file, line, ok := runtime.Caller(1)
if ok {
logger.Printf("file:%s,line:%d", file, line)
}
logger.Println(a...)
}
func Errorf(format string, a ...interface{}) {
_, file, line, ok := runtime.Caller(1)
if ok {
logger.Printf("file:%s,line:%d", file, line)
}
logger.Printf(format, a...)
}
func Panic(a ...interface{}) {
_, file, line, ok := runtime.Caller(1)
if ok {
logger.Printf("file:%s,line:%d", file, line)
}
logger.Println(a...)
panic("")
}
func Panicf(format string, a ...interface{}) {
_, file, line, ok := runtime.Caller(1)
if ok {
logger.Printf("file:%s,line:%d", file, line)
}
logger.Printf(format, a...)
panic("")
}
|
package main
import (
"fmt"
"os"
"github.com/lubovskiy/app"
)
func main() {
a := app.New()
_ = a
fmt.Println(os.Getenv("GOPATH"))
}
|
package pool
import (
"testing"
)
func TestPool_AddingOnBeforeRunningServer(t *testing.T) {
t.Run("A simple information", func(t *testing.T) {
ch := make(chan struct{ ID string })
p := New(1)
xid := add(t, p, func(taskID string) error {
ch <- struct{ ID string }{ID: taskID}
return nil
})
go p.Server()
wf := <-ch
if wf.ID != xid {
t.Errorf("unexpected ID on wf, got: %s", wf.ID)
}
})
t.Run("multiple adds on same rotine before server", func(t *testing.T) {
chQuit := make(chan struct{})
p := New(100)
checkList := make(map[string]bool)
nIter := 1000
for i := 0; i < nIter; i++ {
xid := add(t, p, func(taskID string) error {
return nil
})
checkList[xid] = false
}
ch := p.GetInfoChannel()
go func() {
count := 0
for w := range ch {
count++
used, ok := checkList[w.ID]
checkList[w.ID] = true
if !ok {
t.Errorf("received unexpected id: %s", w.ID)
}
if used {
t.Errorf("received already used id: %s, it looks like the task run twice", w.ID)
}
if count >= nIter {
break
}
}
chQuit <- struct{}{}
}()
go p.Server()
<-chQuit
})
t.Run("multiple adds with multiples rotine before server", func(t *testing.T) {
chQuit := make(chan struct{})
chMap := make(chan string)
chNext := make(chan struct{})
p := New(10)
checkList := make(map[string]bool)
nRoutines := 10
nPerRoutines := 100
nMaxIter := nRoutines * nPerRoutines
go func() {
count := 0
for xid := range chMap {
count++
checkList[xid] = false
if count >= nMaxIter {
chNext <- struct{}{}
}
}
}()
for i := 0; i < nRoutines; i++ {
go func() {
for j := 0; j < nPerRoutines; j++ {
xid := add(t, p, func(taskID string) error {
return nil
})
chMap <- xid
}
}()
}
<-chNext
ch := p.GetInfoChannel()
go func() {
count := 0
for w := range ch {
count++
used, ok := checkList[w.ID]
checkList[w.ID] = true
if !ok {
t.Errorf("received unexpected id: %s", w.ID)
}
if used {
t.Errorf("received already used id: %s, it looks like the task run twice", w.ID)
}
if count >= nMaxIter {
break
}
}
chQuit <- struct{}{}
}()
go p.Server()
<-chQuit
})
}
|
package main
import (
"github.com/spf13/pflag"
"net"
"log"
"os"
"io"
)
// your own dns server
var (
local = pflag.String("local", ":53", "please input dns server listen addr")
remote = pflag.String("remote", "", "please remote dns server addr")
)
func init() {
pflag.Parse()
if *remote == "" {
log.Println("remote dns server addr is not empty!")
os.Exit(100)
}
}
func main() {
addr, err := net.ResolveUDPAddr("udp", *local)
if err != nil {
log.Printf("reslove udp addr error: %s ", err.Error())
os.Exit(100)
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
log.Printf("udp listen error: %s ", err.Error())
os.Exit(100)
}
handle(conn)
}
func handle(conn *net.UDPConn) {
log.Println("udp server start ")
// remote dns server
remoteConn, err := net.Dial("udp", *remote)
if err != nil {
log.Printf("remote dns server conn error %s", err.Error())
os.Exit(100)
}
// io bind
go func() {
io.Copy(conn, remoteConn)
}()
io.Copy(remoteConn, conn)
}
|
package apichannels
// ChannelError types of errors that can be thrown
type ChannelError string
func (che ChannelError) Error() string {
return string(che)
}
// ChannelNotFound error for when the channel is not found
const ChannelNotFound = ChannelError("Channel not found")
// MessageNotFound error for when the channel is not found
const MessageNotFound = ChannelError("Channel not found")
// AlreadyExists error for when trying to create a channel but it already exists
const AlreadyExists = ChannelError("Channel already exists")
|
// ˅
package main
// ˄
type INumber interface {
Generate()
// ˅
// ˄
}
// ˅
// ˄
|
package database
import (
"database/sql"
"fmt"
"github.com/go-sql-driver/mysql"
)
var DB *sql.DB
func Connect() {
cfg := mysql.Config{
User: "debian-sys-maint",
Passwd: "YZkKRHnDn0I8XsvK",
Net: "tcp",
DBName: "test",
}
db, err := sql.Open("mysql", cfg.FormatDSN())
DB = db
fmt.Println("Database connected")
if err != nil {
panic(err.Error())
}
}
|
package main
import (
"adutils"
"fmt"
"html/template"
"io/ioutil"
"log"
"mime"
"net/http"
)
const (
STATIC_DIR = "../static/"
VIEW_DIR = "../view/"
)
type home struct {
Title string
}
func ext2Mime(ext string) string {
switch ext {
case ".css":
return "text/css"
case ".js":
return "text/js"
case ".html":
return "text/html"
default:
return ""
}
return "*/*"
}
func rootHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
log.Println("Info:request file ", r.URL.Path)
//visitor main page
if r.URL.Path == "/" {
title := home{Title: "advise show system "}
t, _ := template.ParseFiles(VIEW_DIR + "index.html")
t.Execute(w, title)
return
}
requestfile := STATIC_DIR + r.URL.Path
ret, _ := adutils.Exists(requestfile)
if ret {
content, err := ioutil.ReadFile(requestfile)
if err == nil {
mType := ext2Mime(requestfile)
if mType == "text/js" {
w.Header().Set("Content-Type", mType)
}
w.Header().Set("Cache-Control", "public, max-age=86400")
w.Write(content)
return
}
}
http.NotFound(w, r)
log.Println("Error: file not find. path=", r.URL.Path)
return
}
func showHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
if r.Method == "GET" {
t, err := template.ParseFiles(VIEW_DIR + "showCtrl.html")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
srv, err := adutils.ServerParse()
if err != nil {
fmt.Println("Parse server file error")
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
srv.DspCtrl.Delaytime *= 1000
fmt.Println(srv)
t.Execute(w, srv)
fmt.Println("End showHandler")
return
}
}
func fileHandler(w http.ResponseWriter, r *http.Request) {
http.StripPrefix("/file", http.FileServer(http.Dir(STATIC_DIR))).ServeHTTP(w, r)
}
func displayHandler(w http.ResponseWriter, r *http.Request) {
r.ParseForm()
index := r.Form["index"]
filename, linktype, err := adutils.GetAdvFilename(index[0])
if err != nil {
fmt.Println("displayHandler getAdvFilename failed")
filename = "0.html"
}
if linktype == 1 {
http.Redirect(w, r, filename, 300)
return
// resp, err := http.Get(filename)
// defer resp.Body.Close()
// if err != nil { panic(err) }
// for k, v := range resp.Header {
// for _, vv := range v {
// w.Header().Add(k, vv)
// }
// }
// w.WriteHeader(resp.StatusCode)
// result, err := ioutil.ReadAll(resp.Body)
// if err != nil { panic(err) }
// w.Write(result)
// return
}
t, err := template.ParseFiles(filename)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
t.Execute(w, nil)
fmt.Println("End displayHandler")
return
}
func main() {
http.HandleFunc("/", rootHandler)
http.HandleFunc("/show", showHandler)
http.HandleFunc("/display", displayHandler)
http.HandleFunc("/file", fileHandler)
srv, err := adutils.ServerParse()
if err != nil {
log.Fatal("server.xml error", err.Error())
return
}
port := ":" + srv.Monitor.Port
err = http.ListenAndServe(port, nil)
if err != nil {
log.Fatal("God Like listen wrong: ", err.Error())
}
}
|
/*
A distributed block-chain transactional key-value service
Assignment 7 of UBC CS 416 2016 W2
http://www.cs.ubc.ca/~bestchai/teaching/cs416_2016w2/assign7/index.html
Created by Harlan Sim and Sean Blair, April 2017
This package represents the kvnode component of the system.
The kvnode process command line usage must be:
go run kvnode.go [ghash] [num-zeroes] [nodesFile] [nodeID] [listen-node-in IP:port] [listen-client-in IP:port]
example: go run kvnode.go 5473be60b466a24872fd7a007c41d1455e9044cca57d433eb51271b61bc16987 2 nodeList.txt 1 localhost:2223 localhost:2222
[ghash] : SHA 256 hash in hexadecimal of the genesis block for this instantiation of the system.
[num-zeroes] : required number of leading zeroes in the proof-of-work algorithm, greater or equal to 1.
[nodesFile] : a file containing one line per node in the key-value service. Each line must be terminated by '\n'
and indicates the IP:port that should be used to by this node to connect to the other nodes in the service.
[nodeID] : an integer between 1 and number of lines in nodesFile. The IP:port on line i of nodesFile is the external
IP:port corresponding to the listen-node-in IP:port, which will be used by other nodes to connect to this node.
[listen-node-in IP:port] : the IP:port that this node should listen on to receive connections from other nodes.
[listen-client-in IP:port] : the IP:port that this node should listen on to receive connections from clients.
*/
package main
import (
"crypto/sha256"
"fmt"
"io/ioutil"
"log"
"math"
"net"
"net/rpc"
"os"
"strconv"
"strings"
"sync"
"time"
)
var (
genesisHash string
leafBlocks map[string]Block
numLeadingZeroes int
nodeIpAndStatuses []NodeIpPortStatus
myNodeID int
listenKVNodeIpPort string
listenClientIpPort string
// Transaction ID's are incremented by 1
nextTransactionID int
// All transactions the system has seen
transactions map[int]Transaction
// Represents the values corresponding to the in-order execution of all the
// transactions along the block-chain. Only holds values of commited transactions.
keyValueStore map[Key]Value
// Maps BlockHash to Block
blockChain map[string]Block
// true when not generating Commit Blocks
isGenerateNoOps bool
// true when not receiving a Block from other kvnode
isGenerateCommits bool
// true when busy working on NoOp Block
isWorkingOnNoOp bool
// true when busy working on commit Block
isWorkingOnCommit bool
mutex *sync.Mutex
abortedMessage string = "This transaction is aborted!!"
)
// Represents a key in the system.
type Key string
// Represent a value in the system.
type Value string
// A block in the blockChain
type Block struct {
// hash of HashBlock field
Hash string
ChildrenHashes []string
Depth int
PutSet map[Key]Value
HashBlock HashBlock
}
// The part of a Block that gets hashed (Read only
// except for the Nonce and the ParentHash when computing the hash)
type HashBlock struct {
ParentHash string
TxID int
NodeID int
Nonce uint32
}
type Transaction struct {
ID int
PutSet map[Key]Value
KeySet map[Key]bool
IsAborted bool
IsCommitted bool
CommitID int
CommitHash string
AllHashes []string
}
type NodeIpPortStatus struct {
IpPort string
IsAlive bool
}
// For registering RPC's
type KVNode int
type KVServer int
// KVNode Request and Response structs
type AddBlockRequest struct {
Block Block
}
// KVClient Request and Response structs
type NewTransactionResp struct {
TxID int
// The keyValueStore state on call to NewTX
KeyValueStore map[Key]Value
}
type CommitRequest struct {
Transaction Transaction
// The original values in keyValueStore of keys that the transaction touched
RequiredKeyValues map[Key]Value
ValidateNum int
}
type CommitResponse struct {
Success bool
CommitID int
Err string
}
type GetChildrenRequest struct {
ParentHash string
}
type GetChildrenResponse struct {
Children []string
}
func main() {
err := ParseArguments()
checkError("Error in main(), ParseArguments():\n", err, true)
fmt.Println("KVNode's command line arguments are:\ngenesisHash:", genesisHash,
"numLeadingZeroes:", numLeadingZeroes, "nodeIpAndStatuses:", nodeIpAndStatuses, "myNodeID:", myNodeID,
"listenKVNodeIpPort:", listenKVNodeIpPort, "listenClientIpPort:", listenClientIpPort)
nextTransactionID = 10
transactions = make(map[int]Transaction)
keyValueStore = make(map[Key]Value)
blockChain = make(map[string]Block)
leafBlocks = make(map[string]Block)
// Add genesis block to blockChain map
genesisBlock := Block{Hash: genesisHash, Depth: 0}
blockChain[genesisHash] = genesisBlock
// Add genesis block to leafBlocks map
leafBlocks[genesisHash] = genesisBlock
isGenerateNoOps = true
isWorkingOnNoOp = false
isGenerateCommits = true
isWorkingOnCommit = false
mutex = &sync.Mutex{}
go listenNodeRPCs()
go listenClientRPCs()
time.Sleep(4 * time.Second)
generateNoOpBlocks()
}
// Generates NoOp Blocks and adds to blockChain when not generating a Commit Block
func generateNoOpBlocks() {
for {
if isGenerateNoOps && !isWorkingOnCommit {
isWorkingOnNoOp = true
generateNoOpBlock()
isWorkingOnNoOp = false
time.Sleep(time.Millisecond * 100)
} else {
time.Sleep(time.Second)
}
}
}
// While isGenerateNoOps, works on adding NoOps to the blockChain
// Returns either when isGenerateNoOps = false or successfully generates 1 NoOp
func generateNoOpBlock() {
fmt.Println("Generating a NoOp Block...")
fmt.Println("Block chain size:", len(blockChain), "number transactions:", len(transactions))
// TODO this printstate() actually seemed to help performance... Maybe could use a tiny sleep here?
printState()
if len(leafBlocks) > 1 {
fmt.Println("We have a fork!!!!!!!!!!!!!!")
}
noOpBlock := Block{HashBlock: HashBlock{TxID: 0, NodeID: myNodeID, Nonce: 0}}
noOpBlock = setCorrectParentHashAndDepth(noOpBlock)
for isGenerateNoOps {
success, _ := generateBlock(&noOpBlock)
if success {
return
}
}
// received a call to commit or AddBlock which set isGenerateNoOps = false
return
}
func setCorrectParentHashAndDepth(block Block) Block {
commitBlocks := getCommitLeafBlocks()
var parentBlock Block
// only one block (no fork), or all noOp blocks
if len(leafBlocks) == 1 || len(commitBlocks) == 0 {
for leafHash := range leafBlocks {
// this randomly picks a block because the order returned from range on maps is undefined
mutex.Lock()
parentBlock = leafBlocks[leafHash]
mutex.Unlock()
break
}
} else {
// need to choose a Commit Block
// if only 1, choose it, or if block is a NoOp choose random
// if block is Commit Block, there shouldn't be conflicting transactions commited
// bacause Commit checks that before this call, therefore random parent should be ok.
for leafHash := range commitBlocks {
parentBlock = commitBlocks[leafHash]
break
}
}
block.HashBlock.ParentHash = parentBlock.Hash
block.Depth = parentBlock.Depth + 1
return block
}
// returns leaf blocks that are Commit blocks (not NoOp blocks)
func getCommitLeafBlocks() map[string]Block {
commitBlocks := make(map[string]Block)
mutex.Lock()
leafBlocksCopy := leafBlocks
mutex.Unlock()
for leafBlockHash := range leafBlocksCopy {
if leafBlocksCopy[leafBlockHash].HashBlock.TxID != 0 {
commitLeafBlock := leafBlocksCopy[leafBlockHash]
commitBlocks[leafBlockHash] = commitLeafBlock
}
}
return commitBlocks
}
// Hashes the given Block's HashBlock once, if has sufficient leading zeroes, adds it
// to blockChain, returns true and the hash. Otherwise, increments the Nonce and returns false, ""
func generateBlock(block *Block) (bool, string) {
b := *block
data := []byte(fmt.Sprintf("%v", b.HashBlock))
sum := sha256.Sum256(data)
hash := sum[:] // Converts from [32]byte to []byte
// TODO: make sure to turn in with call to isLeadingNumZeroCharacters,
// not with call to isLeadingNumZeroes (which is used for finer control of block generation)
if isLeadingNumZeroes(hash) {
// if isLeadingNumZeroCharacters(hash) {
hashString := string(hash)
b.Hash = hashString
addToBlockChain(b)
broadcastBlock(b)
fmt.Println("Done generating new block")
return true, hashString
} else {
b.HashBlock.Nonce = b.HashBlock.Nonce + 1
*block = b
return false, ""
}
}
// For visualizing the current state of a kvnode's keyValueStore and transactions maps
func printState() {
fmt.Println("\nKVNODE STATE:")
fmt.Println("-keyValueStore:")
for k := range keyValueStore {
mutex.Lock()
val := keyValueStore[k]
mutex.Unlock()
fmt.Println(" Key:", k, "Value:", val)
}
fmt.Println("-transactions:")
for txId := range transactions {
mutex.Lock()
tx := transactions[txId]
mutex.Unlock()
fmt.Println(" --Transaction ID:", tx.ID, "IsAborted:", tx.IsAborted, "IsCommitted:", tx.IsCommitted, "CommitId:", tx.CommitID)
fmt.Printf(" Hash:%x\n", tx.CommitHash)
fmt.Printf(" AllHashes:%x\n", tx.AllHashes)
fmt.Println(" PutSet:")
for k := range tx.PutSet {
fmt.Println(" Key:", k, "Value:", tx.PutSet[k])
}
}
fmt.Println("-blockChain:")
printBlockChain()
fmt.Println("blockChain size:", len(blockChain))
fmt.Println("Total number of transactions is:", len(transactions), "\n")
fmt.Println("Nodes List and Status:", nodeIpAndStatuses)
}
// Prints the blockChain to console
func printBlockChain() {
mutex.Lock()
genesisBlock := blockChain[genesisHash]
mutex.Unlock()
fmt.Printf("GenesisBlockHash: %x\n", genesisBlock.Hash)
fmt.Printf("GenesisBlockChildren: %x\n\n", genesisBlock.ChildrenHashes)
for _, childHash := range genesisBlock.ChildrenHashes {
printBlock(childHash)
}
}
// Prints one block in the blockChain to console
func printBlock(blockHash string) {
mutex.Lock()
block := blockChain[blockHash]
mutex.Unlock()
indent := ""
for i := 0; i < block.Depth; i++ {
indent += " "
}
fmt.Printf("%sBlockTransactionID: %v\n", indent, block.HashBlock.TxID)
fmt.Printf("%sBlock.Hash :%x\n", indent, block.Hash)
fmt.Printf("%sBlock.Depth :%v\n", indent, block.Depth)
fmt.Printf("%sBlock.ChildrenHashes :%x\n", indent, block.ChildrenHashes)
fmt.Printf("%sBlock.PutSet :%v\n", indent, block.PutSet)
hashBlock := block.HashBlock
fmt.Printf("%sBlock.HashBlock.ParentHash :%x\n", indent, hashBlock.ParentHash)
fmt.Printf("%sBlock.HashBlock.NodeID :%v\n\n", indent, hashBlock.NodeID)
for _, childHash := range block.ChildrenHashes {
printBlock(childHash)
}
}
// Returns the children hashes of the Block that has the given hash as key in the blockChain
func (p *KVServer) GetChildren(req GetChildrenRequest, resp *GetChildrenResponse) error {
hash := req.ParentHash
if hash == "" {
hash = genesisHash
}
mutex.Lock()
parentBlock := blockChain[hash]
mutex.Unlock()
resp.Children = parentBlock.ChildrenHashes
return nil
}
// Adds a Transaction struct to the transactions map, returns a unique transaction ID
func (p *KVServer) NewTransaction(req bool, resp *NewTransactionResp) error {
txID := nextTransactionID
nextTransactionID = nextTransactionID + 10
mutex.Lock()
kvStore := keyValueStore
mutex.Unlock()
*resp = NewTransactionResp{txID, kvStore}
return nil
}
// If the given transaction is aborted returns false, otherwise commits the transaction,
// and returns its CommitID value,
func (p *KVServer) Commit(req CommitRequest, resp *CommitResponse) error {
fmt.Println("Received a call to Commit(", req, ")")
tx := req.Transaction
mutex.Lock()
transactions[tx.ID] = tx
mutex.Unlock()
isGenerateNoOps = false
for isWorkingOnNoOp {
// This stopped it from hanging... !
time.Sleep(time.Millisecond)
}
if !isCommitPossible(req.RequiredKeyValues) {
mutex.Lock()
t := transactions[tx.ID]
t.IsAborted = true
transactions[tx.ID] = t
mutex.Unlock()
*resp = CommitResponse{false, 0, abortedMessage}
isGenerateNoOps = true
} else {
blockHash := generateCommitBlock(tx.ID, req.RequiredKeyValues)
if blockHash == "" {
// a conflicting transaction just commited
mutex.Lock()
t := transactions[tx.ID]
t.IsAborted = true
transactions[tx.ID] = t
mutex.Unlock()
*resp = CommitResponse{false, 0, abortedMessage + "Another node committed a conflicting transaction!!"}
isGenerateNoOps = true
} else {
isGenerateNoOps = true
validateCommit(req)
mutex.Lock()
commitId := transactions[tx.ID].CommitID
mutex.Unlock()
*resp = CommitResponse{true, commitId, ""}
}
}
printState()
return nil
}
// Returns true if keyValueStore has the same values for the keys of requiredKeyValues
// This means the keyValueStore has the same values it had when the transaction started.
func isCommitPossible(requiredKeyValues map[Key]Value) bool {
for k := range requiredKeyValues {
mutex.Lock()
val, ok := keyValueStore[k]
mutex.Unlock()
if ok && val != requiredKeyValues[k] {
return false
} else if !ok && val != "" {
return false
}
}
return true
}
// Waits until the Block with given blockHash has the correct number of descendant Blocks
// check all blocks for validate commit number of descendants. If find one, sets
// the correct values in the transactions[req.TxID] (CommitHash, CommitID)
func validateCommit(req CommitRequest) {
fmt.Println("In validateCommit()")
for {
// always refresh the hashes list in case other block for same tx has been added
mutex.Lock()
tx := transactions[req.Transaction.ID]
mutex.Unlock()
hashes := tx.AllHashes
for _, hash := range hashes {
mutex.Lock()
block := blockChain[hash]
mutex.Unlock()
fmt.Println("Trying to validate a block with ID", block.HashBlock.TxID, "and children:", len(block.ChildrenHashes))
if isBlockValidated(block, req.ValidateNum) {
// set the correct commit values for returning to client
tx.CommitHash = hash
tx.CommitID = block.Depth
mutex.Lock()
transactions[req.Transaction.ID] = tx
mutex.Unlock()
return
}
}
time.Sleep(time.Second)
fmt.Println("block not yet validated...")
}
}
// Recursively traverses the longest branch of the blockChain tree starting at the given block,
// if there are at least validateNum descendents returns true, else returns false
func isBlockValidated(block Block, validateNum int) bool {
if validateNum == 0 {
return true
} else {
for _, child := range block.ChildrenHashes {
mutex.Lock()
childBlock := blockChain[child]
mutex.Unlock()
fmt.Println("The child block has depth:", childBlock.Depth)
if isBlockValidated(childBlock, validateNum-1) {
return true
}
}
return false
}
}
// Adds a Commit Block with transaction txid to the blockChain,
// or allows AddBlock to add it, returns its hash
func generateCommitBlock(txid int, requiredKeyValues map[Key]Value) string {
fmt.Println("Generating a Commit Block...")
mutex.Lock()
putSet := transactions[txid].PutSet
mutex.Unlock()
block := Block{PutSet: putSet, HashBlock: HashBlock{TxID: txid, NodeID: myNodeID, Nonce: 0}}
for {
if isGenerateCommits {
isWorkingOnCommit = true
// this commit block was just added by AddBlock()
isInChain, hash := isBlockInChain(txid)
if isInChain {
isWorkingOnCommit = false
return hash
} else if !isCommitPossible(requiredKeyValues) {
isWorkingOnCommit = false
return ""
} else {
block = setCorrectParentHashAndDepth(block)
for isGenerateCommits {
success, blockHash := generateBlock(&block)
isWorkingOnCommit = false
if success {
return blockHash
}
}
}
}
// isGenerateCommits was set to false by AddBlock()
time.Sleep(time.Millisecond)
}
}
// Returns true and the hash of the Block that corresponds to the
// given txid if commited, false, "" otherwise
func isBlockInChain(txid int) (bool, string) {
mutex.Lock()
tx := transactions[txid]
mutex.Unlock()
if tx.IsCommitted {
return true, tx.CommitHash
} else {
return false, ""
}
}
// Returns true if hash has numLeadingZeroes number of leading '0' characters (0x30)
// This is the correct implementation provided by the assignment specifications.
func isLeadingNumZeroCharacters(hash []byte) bool {
if numLeadingZeroes == 0 {
return true
} else {
for i := 0; i < numLeadingZeroes; i++ {
if rune(hash[i]) == '0' {
continue
} else {
return false
}
}
return true
}
}
// Returns true if given hash has the minimum number of leading zeroes.
// This is incorrect given the assignment specs, but is useful for debugging
// as it provides more control over different amounts of proof-of-work required.
// TODO: make sure this is not used in the final code!!
func isLeadingNumZeroes(hash []byte) bool {
if numLeadingZeroes == 0 {
return true
} else {
i := 0
numZeroes := numLeadingZeroes
for {
// numZeroes <= 8, byte at hash[i] will determine validity
if numZeroes-8 <= 0 {
break
} else {
// numZeroes is greater than 8, byte at hash[i] must be zero
if hash[i] != 0 {
return false
} else {
i++
numZeroes -= 8
}
}
}
// returns true if byte at hash[i] has the the minimum number of leading zeroes
// if numZeroes is 8: hash[i] < 2^(8-8) == hash[1] < 1 == hash[i] must be (0000 0000)b.
// if numZeroes is 1: hash[i] < 2^(8-1) == hash[1] < (1000 0000)b == hash[i] <= (0111 1111)b
return float64(hash[i]) < math.Pow(2, float64(8-numZeroes))
}
}
func broadcastBlock(block Block) {
fmt.Println("In broadcastBlock()")
req := AddBlockRequest{block}
for i, node := range nodeIpAndStatuses {
id := i + 1
if (id == myNodeID) || !node.IsAlive {
continue
} else {
fmt.Println(id, node.IpPort)
var resp bool
client, err := rpc.Dial("tcp", node.IpPort)
checkError("Error in broadcastBlock(), rpc.Dial()", err, false)
if err != nil {
nodeIpAndStatuses[i].IsAlive = false
continue
}
err = client.Call("KVNode.AddBlock", req, &resp)
checkError("Error in broadcastBlock(), client.Call()", err, false)
if err != nil {
nodeIpAndStatuses[i].IsAlive = false
continue
}
if resp == false {
fmt.Println(id, node.IpPort, "did not accept the HashBlock!!!!!!")
}
err = client.Close()
checkError("Error in commit(), client.Close():", err, false)
if err != nil {
nodeIpAndStatuses[i].IsAlive = false
continue
}
}
}
}
func (p *KVNode) AddBlock(req AddBlockRequest, resp *bool) error {
fmt.Println("Recieved a call to AddBlock with tid:", req.Block.HashBlock.TxID, "and PutSet:", req.Block.PutSet)
b := req.Block
hb := b.HashBlock
data := []byte(fmt.Sprintf("%v", hb))
sum := sha256.Sum256(data)
hash := sum[:] // Converts from [32]byte to []byte
// TODO: make sure to turn in with call to isLeadingNumZeroCharacters,
// not with call to isLeadingNumZeroes (which is used for finer control of block generation)
// *resp = isLeadingNumZeroCharacters(hash)
*resp = isLeadingNumZeroes(hash)
if *resp == true {
fmt.Println("Received HashBlock: VERIFIED")
// to allow return to caller
go func(block Block, txid int) {
// stop generating noOps when we have a new Block in the block chain...
isGenerateNoOps = false
// stop generating Commits when we have a new Block in the chain
isGenerateCommits = false
for isWorkingOnNoOp {
// This stopped it from hanging... !!!
time.Sleep(time.Millisecond)
}
for isWorkingOnCommit {
// This stopped it from hanging... !!!
time.Sleep(time.Millisecond)
}
if txid > 0 {
mutex.Lock()
tx, ok := transactions[txid]
mutex.Unlock()
if !ok {
tx = Transaction{ID: txid, PutSet: block.PutSet}
mutex.Lock()
transactions[txid] = tx
mutex.Unlock()
}
}
addToBlockChain(block)
fmt.Println("Added block:")
printBlock(block.Hash)
time.Sleep(time.Second * 11)
isGenerateCommits = true
isGenerateNoOps = true
}(b, hb.TxID)
} else {
fmt.Println("Received HashBlock: FAILED VERIFICATION")
}
return nil
}
// Should set all the state that represents a commited transaction
// called by both Commit or AddBlock
func addToBlockChain(block Block) {
mutex.Lock()
blockChain[block.Hash] = block
mutex.Unlock()
setParentsNewChild(block)
updateLeafBlocks(block)
hBlock := block.HashBlock
txid := hBlock.TxID
// a Commit transaction
if txid > 0 {
mutex.Lock()
tx := transactions[txid]
mutex.Unlock()
putSet := tx.PutSet
for k := range putSet {
mutex.Lock()
keyValueStore[k] = putSet[k]
mutex.Unlock()
}
tx.IsCommitted = true
tx.CommitHash = block.Hash
hashList := tx.AllHashes
hashList = append(hashList, block.Hash)
tx.AllHashes = hashList
mutex.Lock()
transactions[txid] = tx
mutex.Unlock()
}
}
// Adds block to leafBlocks and remove blocks with lesser depth than block
func updateLeafBlocks(block Block) {
mutex.Lock()
leafBlocks[block.Hash] = block
mutex.Unlock()
for leafBlockHash := range leafBlocks {
mutex.Lock()
leafBlock := leafBlocks[leafBlockHash]
mutex.Unlock()
// Remove blocks with lesser depth
if leafBlock.Depth < block.Depth {
mutex.Lock()
delete(leafBlocks, leafBlockHash)
mutex.Unlock()
}
}
}
// Adds block.Hash to its parent's ChildrenHashes
func setParentsNewChild(block Block) {
mutex.Lock()
parentBlock, ok := blockChain[block.HashBlock.ParentHash]
mutex.Unlock()
if !ok {
}
children := parentBlock.ChildrenHashes
children = append(children, block.Hash)
parentBlock.ChildrenHashes = children
mutex.Lock()
blockChain[parentBlock.Hash] = parentBlock
mutex.Unlock()
}
// Infinitely listens and serves KVNode RPC calls
func listenNodeRPCs() {
kvNode := rpc.NewServer()
kv := new(KVNode)
kvNode.Register(kv)
l, err := net.Listen("tcp", listenKVNodeIpPort)
checkError("Error in listenNodeRPCs(), net.Listen()", err, true)
fmt.Println("Listening for node RPC calls on:", listenKVNodeIpPort)
for {
conn, err := l.Accept()
checkError("Error in listenNodeRPCs(), l.Accept()", err, true)
go kvNode.ServeConn(conn)
}
}
// Infinitely listens and serves KVServer RPC calls
func listenClientRPCs() {
kvServer := rpc.NewServer()
kv := new(KVServer)
kvServer.Register(kv)
l, err := net.Listen("tcp", listenClientIpPort)
checkError("Error in listenClientRPCs(), net.Listen()", err, true)
fmt.Println("Listening for client RPC calls on:", listenClientIpPort)
for {
conn, err := l.Accept()
checkError("Error in listenClientRPCs(), l.Accept()", err, true)
kvServer.ServeConn(conn)
}
}
// Parses and sets the command line arguments to kvnode.go as global variables
func ParseArguments() (err error) {
arguments := os.Args[1:]
if len(arguments) == 6 {
genesisHash = arguments[0]
numLeadingZeroes, err = strconv.Atoi(arguments[1])
checkError("Error in ParseArguments(), strconv.Atoi(arguments[1]):", err, true)
nodeIpAndStatuses = parseNodeFile(arguments[2])
myNodeID, err = strconv.Atoi(arguments[3])
checkError("Error in ParseArguments(), strconv.Atoi(arguments[3]):", err, true)
listenKVNodeIpPort = arguments[4]
listenClientIpPort = arguments[5]
} else {
usage := "Usage: {go run kvnode.go [ghash] [num-zeroes] [nodesFile] [nodeID]" +
" [listen-node-in IP:port] [listen-client-in IP:port]}"
err = fmt.Errorf(usage)
}
return
}
func parseNodeFile(nodeFile string) (nodeIpNStatuses []NodeIpPortStatus) {
nodeContent, err := ioutil.ReadFile(nodeFile)
checkError("Failed to parse Nodefile: ", err, true)
nodeIPs := strings.Split(string(nodeContent), "\n")
nodeIPs = nodeIPs[:len(nodeIPs)-1] // Remove empty string
fmt.Printf(" Nodes = %v, length = %v\n", nodeIPs, len(nodeIPs))
for _, nodeIp := range nodeIPs {
nodeAndStatus := NodeIpPortStatus{nodeIp, true}
nodeIpNStatuses = append(nodeIpNStatuses, nodeAndStatus)
}
return
}
// Prints msg + err to console and exits program if exit == true
func checkError(msg string, err error, exit bool) {
if err != nil {
log.Println(msg, err)
if exit {
os.Exit(-1)
}
}
}
|
package main
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"reflect"
"testing"
)
const (
tmpdir = "tmp"
)
var (
testWALPath = filepath.Join(tmpdir, "test.log")
testDBPath = filepath.Join(tmpdir, "test.db")
testTmpPath = filepath.Join(tmpdir, "test.tmp")
)
func createTestStorage(t *testing.T) *Storage {
_ = os.RemoveAll(tmpdir)
_ = os.MkdirAll(tmpdir, 0777)
file, err := os.OpenFile(testWALPath, os.O_CREATE|os.O_APPEND|os.O_RDWR, 0600)
if err != nil {
t.Fatal(err)
}
return NewStorage(file, testDBPath, testTmpPath)
}
func TestTxn_Insert(t *testing.T) {
var (
value1 = []byte("value1")
value2 = []byte("value2")
value3 = []byte("value3")
)
t.Run("normal case", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Insert("key1", value1); err != ErrExist {
t.Errorf("unexpectedly success to insert duplicate key : %v", err)
}
if err := txn.Insert("key2", value2); err != nil {
t.Errorf("failed to insert key2 : %v", err)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit : %v", err)
}
// insert after commit
if err := txn.Insert("key1", value3); err != ErrExist {
t.Errorf("unexpectedly success to insert duplicate key after commit : %v", err)
}
})
t.Run("insert after delete", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Delete("key1"); err != nil {
t.Errorf("failed to delete key1 : %v", err)
}
if err := txn.Insert("key1", value2); err != nil {
t.Errorf("failed to insert key1 after delete : %v", err)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit : %v", err)
}
if err := txn.Delete("key1"); err != nil {
t.Errorf("failed to delete key1 after commit : %v", err)
}
if err := txn.Insert("key1", value3); err != nil {
t.Errorf("failed to insert key1 after commit and delete : %v", err)
}
})
}
func TestTxn_Read(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
value1 := []byte("value1")
if _, err := txn.Read("key1"); err != ErrNotExist {
t.Errorf("key1 is not (not exist) : %v", err)
}
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 : %v", err)
} else if !bytes.Equal(v, value1) {
t.Errorf("value is not match %v : %v", v, value1)
}
}
func TestTxn_Update(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
var (
value1 = []byte("value1")
value2 = []byte("value2")
value3 = []byte("value3")
)
if err := txn.Update("key1", value1); err != ErrNotExist {
t.Errorf("key1 is not (not exist) : %v", err)
}
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Update("key1", value2); err != nil {
t.Errorf("failed to update key1 : %v", err)
}
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 : %v", err)
} else if !bytes.Equal(v, value2) {
t.Errorf("value is not match %v : %v", v, value2)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit : %v", err)
}
// update after commit
if err := txn.Update("key1", value3); err != nil {
t.Errorf("failed to update key1 : %v", err)
}
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 after commit : %v", err)
} else if !bytes.Equal(v, value3) {
t.Errorf("value is not match after commit %v : %v", v, value3)
}
}
func TestTxn_Delete(t *testing.T) {
var (
value1 = []byte("value1")
)
t.Run("normal case", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Delete("key1"); err != ErrNotExist {
t.Errorf("key1 is not (not exist) : %v", err)
}
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Delete("key1"); err != nil {
t.Errorf("failed to delete key1 : %v", err)
}
if _, err := txn.Read("key1"); err != ErrNotExist {
t.Errorf("key1 is not (not exist) after delete : %v", err)
}
if err := txn.Delete("key1"); err != ErrNotExist {
t.Errorf("deleted key1 must not exist : %v", err)
}
})
t.Run("delete after commit", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit : %v", err)
}
// delete after commit
if err := txn.Delete("key1"); err != nil {
t.Errorf("failed to delete key1 : %v", err)
}
if _, err := txn.Read("key1"); err != ErrNotExist {
t.Errorf("key1 is not (not exist) after delete : %v", err)
}
})
}
func TestTxn_Commit(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
var (
value1 = []byte("value1")
value2 = []byte("value2")
value3 = []byte("value3")
)
// just insert
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
// updated key
if err := txn.Insert("key2", value2); err != nil {
t.Errorf("failed to insert key2 : %v", err)
}
if err := txn.Update("key2", value3); err != nil {
t.Errorf("failed to update key2 : %v", err)
}
// deleted key
if err := txn.Insert("key3", value3); err != nil {
t.Errorf("failed to insert key3 : %v", err)
}
if err := txn.Delete("key3"); err != nil {
t.Errorf("failed to delete key3 : %v", err)
}
// commit
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit")
}
if len(txn.writeSet) != 0 {
t.Errorf("writeSet is not cleared after commit : len == %v", len(txn.writeSet))
}
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 : %v", err)
} else if !bytes.Equal(v, value1) {
t.Errorf("value1 is not match %v : %v", v, value1)
}
if v, err := txn.Read("key2"); err != nil {
t.Errorf("failed to read key2 : %v", err)
} else if !bytes.Equal(v, value3) {
t.Errorf("value2 is not match %v : %v", v, value3)
}
if _, err := txn.Read("key3"); err != ErrNotExist {
t.Errorf("key3 is not (not exist) : %v", err)
}
}
func TestTxn_Abort(t *testing.T) {
var (
value1 = []byte("value1")
value2 = []byte("value2")
)
t.Run("abort insert", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
txn.Abort()
if _, err := txn.Read("key1"); err != ErrNotExist {
t.Errorf("key1 is not (not exist) : %v", err)
}
})
t.Run("abort update", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit")
}
if err := txn.Update("key1", value2); err != nil {
t.Errorf("failed to update key1 : %v", err)
}
txn.Abort()
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 : %v", err)
} else if !bytes.Equal(v, value1) {
t.Errorf("value1 is not match %v : %v", v, value1)
}
})
t.Run("abort delete", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
if err := txn.Insert("key1", value1); err != nil {
t.Errorf("failed to insert key1 : %v", err)
}
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit")
}
if err := txn.Delete("key1"); err != nil {
t.Errorf("failed to delete key1 : %v", err)
}
txn.Abort()
if v, err := txn.Read("key1"); err != nil {
t.Errorf("failed to read key1 : %v", err)
} else if !bytes.Equal(v, value1) {
t.Errorf("value1 is not match %v : %v", v, value1)
}
})
}
func assertValue(t *testing.T, txn *Txn, key string, value []byte) {
if v, err := txn.Read(key); err != nil {
t.Errorf("failed to read %q : %v", key, err)
} else if !bytes.Equal(v, value) {
t.Errorf("read value for %q is not match %v, expected %v", key, v, value)
}
}
func assertNotExist(t *testing.T, txn *Txn, key string) {
if v, err := txn.Read(key); err != ErrNotExist {
if err == nil {
t.Errorf("unexpectedly value for %q exists : %v", key, v)
} else {
t.Errorf("failed to read %q expected not exist : %v", key, err)
}
}
}
func clearFile(t *testing.T, file *os.File) {
if _, err := file.Seek(0, io.SeekStart); err != nil {
t.Errorf("failed to seek : %v", err)
} else if err = file.Truncate(0); err != nil {
t.Errorf("failed to truncate : %v", err)
}
}
func writeLogs(t *testing.T, file *os.File, logs []RecordLog) {
var buf [4096]byte
for i, rlog := range logs {
if n, err := rlog.Serialize(buf[:]); err != nil {
t.Errorf("failed to deserialize %v : %v", i, err)
} else if _, err = file.Write(buf[:n]); err != nil {
t.Errorf("failed to write log %v : %v", i, err)
}
}
}
func readLogs(t *testing.T, filename string) ([]byte, []RecordLog) {
buf, err := ioutil.ReadFile(filename)
if err != nil {
t.Errorf("failed to read WAL file : %v", err)
}
var logsInFile []RecordLog
for i := 0; ; i++ {
var rlog RecordLog
n, err := rlog.Deserialize(buf)
if err == ErrBufferShort {
break
} else if err != nil {
t.Fatalf("failed to deserialize log : n == %v : %v : buffer = %v", i, err, buf)
}
logsInFile = append(logsInFile, rlog)
buf = buf[n:]
}
return buf, logsInFile
}
func applyLogs(t *testing.T, txn *Txn, logs []RecordLog) {
for _, rlog := range logs {
switch rlog.Action {
case LInsert:
if err := txn.Insert(rlog.Key, rlog.Value); err != nil {
t.Errorf("failed to insert %v : %v", rlog, err)
}
case LUpdate:
if err := txn.Update(rlog.Key, rlog.Value); err != nil {
t.Errorf("failed to update %v : %v", rlog, err)
}
case LDelete:
if err := txn.Delete(rlog.Key); err != nil {
t.Errorf("failed to delete %v : %v", rlog, err)
}
case LCommit:
if err := txn.Commit(); err != nil {
t.Errorf("failed to commit %v : %v", rlog, err)
}
default:
t.Fatalf("unexpected log %v", rlog)
}
}
}
func TestWAL(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
logs := []RecordLog{
{Action: LCommit},
{Action: LInsert, Record: Record{Key: "key1", Value: []byte("value1")}},
{Action: LInsert, Record: Record{Key: "key2", Value: []byte("value2")}},
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value3")}},
{Action: LInsert, Record: Record{Key: "key4", Value: []byte("value4")}},
{Action: LUpdate, Record: Record{Key: "key2", Value: []byte("value5")}},
{Action: LDelete, Record: Record{Key: "key3", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value6")}},
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value7")}},
{Action: LCommit},
{Action: LUpdate, Record: Record{Key: "key1", Value: []byte("value8")}},
{Action: LDelete, Record: Record{Key: "key2", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value8")}},
{Action: LCommit},
}
applyLogs(t, txn, logs)
rest, logsInFile := readLogs(t, storage.wal.Name())
if len(rest) != 0 {
t.Fatalf("log file is bigger than expected : %v", rest)
} else if len(logsInFile) != len(logs) {
t.Fatalf("count of log not match %v, expected %v", len(logsInFile), len(logs))
}
for i := 0; i < len(logs); i++ {
if !reflect.DeepEqual(logsInFile[i], logs[i]) {
t.Errorf("log not match : index == %v, %v, %v", i, logsInFile[i], logs[i])
}
}
}
func TestStorage_LoadWAL(t *testing.T) {
t.Run("empty WAL", func(t *testing.T) {
storage := createTestStorage(t)
defer storage.wal.Close()
if n, err := storage.LoadWAL(); err != nil {
t.Errorf("failed to load : %v", err)
} else if n != 0 {
t.Errorf("load wal %v logs, expected 0", n)
}
})
t.Run("normal case", func(t *testing.T) {
logs := []RecordLog{
{Action: LCommit},
{Action: LInsert, Record: Record{Key: "key1", Value: []byte("value1")}},
{Action: LInsert, Record: Record{Key: "key2", Value: []byte("value2")}},
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value3")}},
{Action: LInsert, Record: Record{Key: "key4", Value: []byte("value4")}},
{Action: LUpdate, Record: Record{Key: "key2", Value: []byte("value5")}},
{Action: LDelete, Record: Record{Key: "key3", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value6")}},
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value7")}},
{Action: LCommit},
{Action: LUpdate, Record: Record{Key: "key1", Value: []byte("value8")}},
{Action: LDelete, Record: Record{Key: "key2", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value9")}},
{Action: LCommit},
}
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
// write log to WAL file
writeLogs(t, storage.wal, logs)
if n, err := storage.LoadWAL(); err != nil {
t.Errorf("failed to load : %v", err)
} else if n != len(logs) {
t.Errorf("load wal %v logs, expected %v", n, len(logs))
}
assertValue(t, txn, "key1", []byte("value8"))
assertNotExist(t, txn, "key2")
assertValue(t, txn, "key3", []byte("value9"))
assertValue(t, txn, "key4", []byte("value7"))
// check idenpotency
clearFile(t, storage.wal)
writeLogs(t, storage.wal, logs)
if n, err := storage.LoadWAL(); err != nil {
t.Errorf("failed to load : %v", err)
} else if n != len(logs) {
t.Errorf("load wal %v logs, expected %v", n, len(logs))
}
assertValue(t, txn, "key1", []byte("value8"))
assertNotExist(t, txn, "key2")
assertValue(t, txn, "key3", []byte("value9"))
assertValue(t, txn, "key4", []byte("value7"))
})
t.Run("log is not completed", func(t *testing.T) {
logs := []RecordLog{
{Action: LCommit},
{Action: LInsert, Record: Record{Key: "key1", Value: []byte("value1")}},
{Action: LInsert, Record: Record{Key: "key2", Value: []byte("value2")}},
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value3")}},
{Action: LInsert, Record: Record{Key: "key4", Value: []byte("value4")}},
{Action: LUpdate, Record: Record{Key: "key2", Value: []byte("value5")}},
{Action: LDelete, Record: Record{Key: "key3", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value6")}},
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value7")}},
{Action: LCommit},
{Action: LUpdate, Record: Record{Key: "key1", Value: []byte("value8")}},
{Action: LDelete, Record: Record{Key: "key2", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value9")}},
}
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
// write log to WAL file
writeLogs(t, storage.wal, logs)
if n, err := storage.LoadWAL(); err != nil {
t.Errorf("failed to load : %v", err)
} else if n != len(logs) {
t.Errorf("load wal %v logs, expected %v", n, len(logs))
}
assertValue(t, txn, "key1", []byte("value1"))
assertValue(t, txn, "key2", []byte("value5"))
assertNotExist(t, txn, "key3")
assertValue(t, txn, "key4", []byte("value7"))
// check idenpotency
clearFile(t, storage.wal)
writeLogs(t, storage.wal, logs)
if n, err := storage.LoadWAL(); err != nil {
t.Errorf("failed to load : %v", err)
} else if n != len(logs) {
t.Errorf("load wal %v logs, expected %v", n, len(logs))
}
assertValue(t, txn, "key1", []byte("value1"))
assertValue(t, txn, "key2", []byte("value5"))
assertNotExist(t, txn, "key3")
assertValue(t, txn, "key4", []byte("value7"))
})
}
func TestStorage_ClearWAL(t *testing.T) {
logs := []RecordLog{
{Action: LInsert, Record: Record{Key: "key1", Value: []byte("value1")}},
{Action: LInsert, Record: Record{Key: "key2", Value: []byte("value2")}},
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value3")}},
{Action: LCommit},
}
storage := createTestStorage(t)
defer storage.wal.Close()
writeLogs(t, storage.wal, logs)
if err := storage.ClearWAL(); err != nil {
t.Errorf("failed to clearWAL : %v", err)
}
// rewrite from the offset ClearWAL set (must be 0)
writeLogs(t, storage.wal, logs)
rest, logsInFile := readLogs(t, storage.wal.Name())
if len(rest) != 0 {
t.Fatalf("log file is bigger than expected : %v", rest)
} else if len(logsInFile) != len(logs) {
t.Fatalf("count of log not match %v, expected %v", len(logsInFile), len(logs))
}
for i := 0; i < len(logs); i++ {
if !reflect.DeepEqual(logsInFile[i], logs[i]) {
t.Errorf("log not match : index == %v, %v, %v", i, logsInFile[i], logs[i])
}
}
}
func TestStorage_SaveCheckPoint(t *testing.T) {
logs := []RecordLog{
{Action: LCommit},
{Action: LInsert, Record: Record{Key: "key1", Value: []byte("value1")}},
{Action: LInsert, Record: Record{Key: "key2", Value: []byte("value2")}},
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value3")}},
{Action: LInsert, Record: Record{Key: "key4", Value: []byte("value4")}},
{Action: LUpdate, Record: Record{Key: "key2", Value: []byte("value5")}},
{Action: LDelete, Record: Record{Key: "key3", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value6")}},
{Action: LUpdate, Record: Record{Key: "key4", Value: []byte("value7")}},
{Action: LCommit},
{Action: LUpdate, Record: Record{Key: "key1", Value: []byte("value8")}},
{Action: LDelete, Record: Record{Key: "key2", Value: []byte("")}}, // TODO: delete log not need to have value
{Action: LInsert, Record: Record{Key: "key3", Value: []byte("value9")}},
{Action: LCommit},
}
storage := createTestStorage(t)
defer storage.wal.Close()
txn := storage.NewTxn()
applyLogs(t, txn, logs)
if err := storage.SaveCheckPoint(); err != nil {
t.Errorf("failed to save checkpoint : %v", err)
}
// load checkpoint file by new Txn
wal2, err := os.Open(testWALPath)
if err != nil {
t.Errorf("failed to open wal file : %v", err)
}
storage2 := NewStorage(wal2, testDBPath, testTmpPath)
if err = storage2.LoadCheckPoint(); err != nil {
t.Errorf("failed to load checkpoint : %v", err)
}
if !reflect.DeepEqual(storage2.db, storage2.db) {
t.Errorf("loaded records not match")
}
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apimachinery
import (
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/apiserver/pkg/endpoints/discovery"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/utils/crd"
"github.com/onsi/ginkgo"
)
var storageVersionServerVersion = utilversion.MustParseSemantic("v1.13.99")
var _ = SIGDescribe("Discovery", func() {
f := framework.NewDefaultFramework("discovery")
var namespaceName string
ginkgo.BeforeEach(func() {
namespaceName = f.Namespace.Name
e2eskipper.SkipUnlessServerVersionGTE(storageVersionServerVersion, f.ClientSet.Discovery())
ginkgo.By("Setting up server cert")
setupServerCert(namespaceName, serviceName)
})
ginkgo.It("Custom resource should have storage version hash", func() {
testcrd, err := crd.CreateTestCRD(f)
if err != nil {
return
}
defer testcrd.CleanUp()
spec := testcrd.Crd.Spec
resources, err := testcrd.APIExtensionClient.Discovery().ServerResourcesForGroupVersion(spec.Group + "/" + spec.Versions[0].Name)
if err != nil {
framework.Failf("failed to find the discovery doc for %v: %v", resources, err)
}
found := false
var storageVersion string
for _, v := range spec.Versions {
if v.Storage {
storageVersion = v.Name
}
}
// DISCLAIMER: the algorithm of deriving the storageVersionHash
// is an implementation detail, which shouldn't be relied on by
// the clients. The following calculation is for test purpose
// only.
expected := discovery.StorageVersionHash(spec.Group, storageVersion, spec.Names.Kind)
for _, r := range resources.APIResources {
if r.Name == spec.Names.Plural {
found = true
if r.StorageVersionHash != expected {
framework.Failf("expected storageVersionHash of %s/%s/%s to be %s, got %s", r.Group, r.Version, r.Name, expected, r.StorageVersionHash)
}
}
}
if !found {
framework.Failf("didn't find resource %s in the discovery doc", spec.Names.Plural)
}
})
})
|
package chat
// Create creates a chat
func (m *Model) Create(id int64) (*Chat, error) {
row := m.getInsertBuilder().Columns("id").Values(id).RunWith(m.db).QueryRow()
chat, err := scanRow(row)
if err != nil {
if err.Error() == "UNIQUE constraint failed: chats.id" {
return nil, ErrChatAlreadyExist
}
return nil, err
}
return chat, nil
}
|
package pretty_poly
func validateSolveArguments (order int, extreme int, filename string) error {
if order <= 0 {
return ErrOrderArgumentSize
}
return nil
}
func Solve (order int, extreme int, precision int8, filename string) error {
err := validateSolveArguments(order, extreme, filename)
if (err != nil) {
return err
} else {
SolvePolynomials(extreme, order, filename, precision)
return nil
}
}
func validateDrawArguments (filename string, precision int8) error {
return nil
}
func Draw (filename string, precision int8) error {
return DrawImage(filename, float64(precision))
}
|
// okaq web server
// wbegl 2.0 vectors
// aq@okaq.com
// 2020-03-17
package main
import (
"fmt"
"math/rand"
"net/http"
"sync"
"sync/atomic"
"time"
)
const (
INDEX = "mazu.html"
THREE = "js/three.min.js"
)
var (
R *rand.Rand
C uint64
M *sync.Map
)
func motd() {
fmt.Println("serving now on $PUBLIC_IP:8080")
fmt.Println(time.Now().String())
}
func MazuHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println(r)
// increment counter
atomic.AddUint64(&C, 1)
http.ServeFile(w,r,INDEX)
}
func StatHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println(r)
w.Header().Set("Content-type", "plain/text")
s0 := fmt.Sprintf("Count: %d", C)
w.Write([]byte(s0))
}
func rng() {
R = rand.New(rand.NewSource(time.Now().UnixNano()))
fmt.Printf("rng: %f\n", R.Float32())
}
func cache() {
M = new(sync.Map)
M.Store("0","0|nil")
// fmt.Println(M)
}
func ThreeHandler(w http.ResponseWriter, r *http.Request) {
fmt.Println(r)
http.ServeFile(w,r,THREE)
}
func main() {
motd()
rng()
cache()
C = 0
http.HandleFunc("/", MazuHandler)
http.HandleFunc("/s", StatHandler)
http.HandleFunc("/b", ThreeHandler)
http.ListenAndServe(":8080", nil)
}
|
package blob
import (
"fmt"
"net/http"
"github.com/iotaledger/wasp/packages/hashing"
"github.com/iotaledger/wasp/packages/webapi/httperrors"
"github.com/iotaledger/wasp/packages/webapi/model"
"github.com/iotaledger/wasp/packages/webapi/routes"
"github.com/iotaledger/wasp/plugins/registry"
"github.com/labstack/echo/v4"
"github.com/pangpanglabs/echoswagger/v2"
)
func AddEndpoints(server echoswagger.ApiRouter) {
example := model.NewBlobInfo(true, hashing.RandomHash(nil))
server.GET(routes.PutBlob(), handlePutBlob).
SetSummary("Upload a blob to the registry").
AddResponse(http.StatusOK, "Blob properties", example, nil)
server.GET(routes.GetBlob(":hash"), handleGetBlob).
AddParamPath("", "hash", "Blob hash (base64)").
SetSummary("Fetch a blob by its hash").
AddResponse(http.StatusOK, "Blob data", model.NewBlobData([]byte("blob content")), nil).
AddResponse(http.StatusNotFound, "Not found", httperrors.NotFound("Not found"), nil)
server.GET(routes.HasBlob(":hash"), handleHasBlob).
AddParamPath("", "hash", "Blob hash (base64)").
SetSummary("Find out if a blob exists in the registry").
AddResponse(http.StatusOK, "Blob properties", example, nil)
}
func handlePutBlob(c echo.Context) error {
var req model.BlobData
if err := c.Bind(&req); err != nil {
return httperrors.BadRequest(err.Error())
}
hash, err := registry.DefaultRegistry().PutBlob(req.Data.Bytes())
if err != nil {
return err
}
return c.JSON(http.StatusOK, model.NewBlobInfo(true, hash))
}
func handleGetBlob(c echo.Context) error {
hash, err := hashing.HashValueFromBase58(c.Param("hash"))
if err != nil {
return httperrors.BadRequest("Invalid hash")
}
data, ok, err := registry.DefaultRegistry().GetBlob(hash)
if err != nil {
return err
}
if !ok {
return httperrors.NotFound(fmt.Sprintf("Blob not found: %s", hash.String()))
}
return c.JSON(http.StatusOK, model.NewBlobData(data))
}
func handleHasBlob(c echo.Context) error {
hash, err := hashing.HashValueFromBase58(c.Param("hash"))
if err != nil {
return httperrors.BadRequest("Invalid hash")
}
ok, err := registry.DefaultRegistry().HasBlob(hash)
if err != nil {
return err
}
return c.JSON(http.StatusOK, model.NewBlobInfo(ok, hash))
}
|
package oidc
import (
"context"
"github.com/brigadecore/brigade/v2/apiserver/internal/api"
"github.com/coreos/go-oidc"
"github.com/pkg/errors"
"golang.org/x/oauth2"
)
// OAuth2Config is an interface for the subset of *oauth2.Config functions used
// for Brigade Session management. Dependence on this interface instead of
// directly upon the *oauth2.Config allows for the possibility of utilizing a
// mock implementation for testing purposes. Adding only the subset of functions
// that we actually use limits the effort involved in creating such mocks.
type OAuth2Config interface {
// AuthCodeURL given an OAuth 2 state code and oauth2.AuthCodeOption returns
// the URL that a user may visit with their web browser in order to complete
// authentication using OpenID Connect.
AuthCodeURL(
state string,
opts ...oauth2.AuthCodeOption,
) string
// Exchange exchanges the given OAuth 2 code for an *oauth2.Token.
Exchange(
ctx context.Context,
code string,
opts ...oauth2.AuthCodeOption,
) (*oauth2.Token, error)
}
// IDTokenVerifier is an interface for the subset of *oidc.IDTokenVerifier used
// for Brigade Session management. Dependence on this interface instead of
// directly upon the *oidc.IDTokenVerifier allows for the possibility of
// utilizing a mock implementation for testing purposes. Adding only the subset
// of functions that we actually use limits the effort involved in creating such
// mocks.
type IDTokenVerifier interface {
Verify(ctx context.Context, rawIDToken string) (*oidc.IDToken, error)
}
type thirdPartyAuthHelper struct {
oauth2Config OAuth2Config
idTokenVerifier IDTokenVerifier
}
func NewThirdPartyAuthHelper(
oauth2Config OAuth2Config,
idTokenVerifier IDTokenVerifier,
) api.ThirdPartyAuthHelper {
return &thirdPartyAuthHelper{
oauth2Config: oauth2Config,
idTokenVerifier: idTokenVerifier,
}
}
func (t *thirdPartyAuthHelper) AuthURL(oauth2State string) string {
return t.oauth2Config.AuthCodeURL(oauth2State)
}
func (t *thirdPartyAuthHelper) Exchange(
ctx context.Context,
_ string,
oauth2Code string,
) (api.ThirdPartyIdentity, error) {
identity := api.ThirdPartyIdentity{}
oauth2Token, err := t.oauth2Config.Exchange(ctx, oauth2Code)
if err != nil {
return identity, errors.Wrap(err, "error exchanging code for OAuth2 token")
}
rawIDToken, ok := oauth2Token.Extra("id_token").(string)
if !ok {
return identity, errors.New(
"OAuth2 token did not include an OpenID Connect identity token",
)
}
var idToken *oidc.IDToken
if idToken, err =
t.idTokenVerifier.Verify(ctx, rawIDToken); err != nil {
return identity,
errors.Wrap(err, "error verifying OpenID Connect identity token")
}
claims := struct {
Email string `json:"email"`
Name string `json:"name"`
}{}
if err = idToken.Claims(&claims); err != nil {
return identity, errors.Wrap(
err,
"error decoding OpenID Connect identity token claims",
)
}
identity.ID = claims.Email
identity.Name = claims.Name
return identity, nil
}
|
package main
import (
"errors"
"github.com/caos/orbos/internal/operator/orbiter/kinds/clusters/core/infra"
"github.com/spf13/cobra"
)
func RebootCommand(getRv GetRootValues) *cobra.Command {
return &cobra.Command{
Use: "reboot",
Short: "Gracefully reboot machines",
Long: "Pass machine ids as arguments, omit arguments for selecting machines interactively",
RunE: func(cmd *cobra.Command, args []string) (err error) {
rv, err := getRv()
if err != nil {
return err
}
defer func() {
err = rv.ErrFunc(err)
}()
monitor := rv.Monitor
orbConfig := rv.OrbConfig
gitClient := rv.GitClient
if !rv.Gitops {
return errors.New("reboot command is only supported with the --gitops flag and a committed orbiter.yml")
}
return requireMachines(monitor, gitClient, orbConfig, args, func(machine infra.Machine) (required bool, require func(), unrequire func()) {
return machine.RebootRequired()
})
},
}
}
|
package aoc2016
import (
"testing"
aoc "github.com/janreggie/aoc/internal"
"github.com/stretchr/testify/assert"
)
func Test_littleScreen_rect(t *testing.T) {
assert := assert.New(t)
testCases := []struct {
width, height uint
want littleScreen
wantErr bool
}{
{
width: 3,
height: 2,
want: littleScreen{
lcd: [6][50]bool{
{true, true, true},
{true, true, true},
}, // the rest is false!
},
},
{
width: 51,
height: 1,
wantErr: true,
},
{
width: 1,
height: 7,
wantErr: true,
},
}
for _, tt := range testCases {
var ls littleScreen
err := ls.rect(tt.width, tt.height)
if tt.wantErr {
assert.Error(err, err)
return
}
assert.NoError(err, err)
assert.Equal(tt.want, ls)
}
}
func Test_littleScreen_rotateRow(t *testing.T) {
assert := assert.New(t)
var ls littleScreen
assert.NoError(ls.rect(3, 2), "created 3x2 rectangle")
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{true, true, true},
{true, true, true},
},
},
ls,
"created 3x2 rectangle",
)
// rotate row y=0 by 4
assert.NoError(ls.rotateRow(0, 4), "rotate y=0 by 4")
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{false, false, false, false, true, true, true},
{true, true, true},
},
},
ls,
"rotate y=0 by 4",
)
// rotate row y=6 by 4
assert.Error(ls.rotateRow(6, 4), "should not be able to rotate y=6")
}
func Test_littleScreen_rotateColumn(t *testing.T) {
assert := assert.New(t)
var ls littleScreen
assert.NoError(ls.rect(3, 2), "created 3x2 rectangle")
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{true, true, true},
{true, true, true},
},
},
ls,
"created 3x2 rectangle",
)
// rotate col x=1 by 1
assert.NoError(ls.rotateColumn(1, 1), "rotate col x=1 by 1")
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{true, false, true},
{true, true, true},
{false, true, false},
},
},
ls,
"rotate col x=1 by 1",
)
// rotate col x=50 by 1
assert.Error(ls.rotateColumn(50, 1), "should not be able to rotate x=50")
}
func Test_littleScreen_parseInstruction(t *testing.T) {
assert := assert.New(t)
var ls littleScreen
// rect 3x2
instr := "rect 3x2"
assert.NoError(ls.parseInstruction(instr), "should be able to create 3x2")
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{true, true, true},
{true, true, true},
},
},
ls,
instr,
)
// rotate column x=1 by 1
instr = "rotate column x=1 by 1"
assert.NoError(ls.parseInstruction(instr), "should be able to ", instr)
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{true, false, true},
{true, true, true},
{false, true},
},
},
ls,
instr,
)
// rotate row y=0 by 4
instr = "rotate row y=0 by 4"
assert.NoError(ls.parseInstruction(instr), "should be able to ", instr)
assert.Equal(
littleScreen{
lcd: [6][50]bool{
{false, false, false, false, true, false, true},
{true, true, true},
{false, true, false},
},
},
ls,
instr,
)
}
func TestDay08(t *testing.T) {
assert := assert.New(t)
testCase := aoc.TestCase{
Details: "Y2016D08 my input",
Input: day08myInput,
Result1: `110`,
// Result2 difficult to formalize
}
testCase.Test(Day08, assert)
}
func BenchmarkDay08(b *testing.B) {
aoc.Benchmark(Day08, b, day08myInput)
}
|
package article
import (
"github.com/hardstylez72/bblog/internal/storage/article"
"time"
)
type ArticleWithBody struct {
Article
Body string `json:"body" validate:"required"`
}
type Article struct {
Id string `json:"id"`
Title string `json:"title" validate:"required"`
UserId string `json:"userId" validate:"required"`
Preface string `json:"preface" validate:"required"`
CreatedAt time.Time `json:"createdAt"`
UpdatedAt *time.Time `json:"updatedAt"`
DeletedAt *time.Time `json:"deletedAt"`
}
func NewGetArticleByIdResponse(in *article.Article) *ArticleWithBody {
out := &ArticleWithBody{
Article: Article{
Preface: in.Preface,
Id: in.Id,
Title: in.Title,
UserId: in.UserId,
CreatedAt: in.CreatedAt,
},
Body: in.Body,
}
if in.UpdatedAt.Valid {
out.UpdatedAt = &in.UpdatedAt.Time
}
if in.DeletedAt.Valid {
out.DeletedAt = &in.DeletedAt.Time
}
return out
}
|
/*
Copyright 2022 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package auth
import (
"context"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"io"
"os"
"time"
"github.com/pkg/errors"
authenticationv1 "k8s.io/api/authentication/v1"
certificatesv1 "k8s.io/api/certificates/v1"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/utils/pointer"
"github.com/oam-dev/kubevela/pkg/utils"
)
// DefaultExpireTime is default expire time for both X.509 and SA token apply
const DefaultExpireTime = time.Hour * 24 * 365
// KubeConfigGenerateOptions options for create KubeConfig
type KubeConfigGenerateOptions struct {
X509 *KubeConfigGenerateX509Options
ServiceAccount *KubeConfigGenerateServiceAccountOptions
}
// KubeConfigGenerateX509Options options for create X509 based KubeConfig
type KubeConfigGenerateX509Options struct {
User string
Groups []string
ExpireTime time.Duration
PrivateKeyBits int
}
// KubeConfigGenerateServiceAccountOptions options for create ServiceAccount based KubeConfig
type KubeConfigGenerateServiceAccountOptions struct {
ServiceAccountName string
ServiceAccountNamespace string
ExpireTime time.Duration
}
// KubeConfigWithUserGenerateOption option for setting user in KubeConfig
type KubeConfigWithUserGenerateOption string
// ApplyToOptions .
func (opt KubeConfigWithUserGenerateOption) ApplyToOptions(options *KubeConfigGenerateOptions) {
options.X509.User = string(opt)
}
// KubeConfigWithGroupGenerateOption option for setting group in KubeConfig
type KubeConfigWithGroupGenerateOption string
// ApplyToOptions .
func (opt KubeConfigWithGroupGenerateOption) ApplyToOptions(options *KubeConfigGenerateOptions) {
for _, group := range options.X509.Groups {
if group == string(opt) {
return
}
}
options.X509.Groups = append(options.X509.Groups, string(opt))
}
// KubeConfigWithServiceAccountGenerateOption option for setting service account in KubeConfig
type KubeConfigWithServiceAccountGenerateOption types.NamespacedName
// ApplyToOptions .
func (opt KubeConfigWithServiceAccountGenerateOption) ApplyToOptions(options *KubeConfigGenerateOptions) {
options.X509 = nil
options.ServiceAccount = &KubeConfigGenerateServiceAccountOptions{
ServiceAccountName: opt.Name,
ServiceAccountNamespace: opt.Namespace,
ExpireTime: DefaultExpireTime,
}
}
// KubeConfigWithIdentityGenerateOption option for setting identity in KubeConfig
type KubeConfigWithIdentityGenerateOption Identity
// ApplyToOptions .
func (opt KubeConfigWithIdentityGenerateOption) ApplyToOptions(options *KubeConfigGenerateOptions) {
if opt.User != "" {
KubeConfigWithUserGenerateOption(opt.User).ApplyToOptions(options)
}
for _, group := range opt.Groups {
KubeConfigWithGroupGenerateOption(group).ApplyToOptions(options)
}
if opt.ServiceAccount != "" {
(KubeConfigWithServiceAccountGenerateOption{
Name: opt.ServiceAccount,
Namespace: opt.ServiceAccountNamespace,
}).ApplyToOptions(options)
}
}
// KubeConfigGenerateOption option for create KubeConfig
type KubeConfigGenerateOption interface {
ApplyToOptions(options *KubeConfigGenerateOptions)
}
func newKubeConfigGenerateOptions(options ...KubeConfigGenerateOption) *KubeConfigGenerateOptions {
opts := &KubeConfigGenerateOptions{
X509: &KubeConfigGenerateX509Options{
User: user.Anonymous,
Groups: []string{KubeVelaClientGroup},
ExpireTime: DefaultExpireTime,
PrivateKeyBits: 2048,
},
ServiceAccount: nil,
}
for _, op := range options {
op.ApplyToOptions(opts)
}
return opts
}
const (
// KubeVelaClientGroup the default group to be added to the generated X509 KubeConfig
KubeVelaClientGroup = "kubevela:client"
// CSRNamePrefix the prefix of the CSR name
CSRNamePrefix = "kubevela-csr"
)
// GenerateKubeConfig generate KubeConfig for users with given options.
func GenerateKubeConfig(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, options ...KubeConfigGenerateOption) (*clientcmdapi.Config, error) {
opts := newKubeConfigGenerateOptions(options...)
if opts.X509 != nil {
return generateX509KubeConfig(ctx, cli, cfg, writer, opts.X509)
} else if opts.ServiceAccount != nil {
return generateServiceAccountKubeConfig(ctx, cli, cfg, writer, opts.ServiceAccount)
}
return nil, errors.New("either x509 or serviceaccount must be set for creating KubeConfig")
}
func genKubeConfig(cfg *clientcmdapi.Config, authInfo *clientcmdapi.AuthInfo, caData []byte) (*clientcmdapi.Config, error) {
if len(cfg.Clusters) == 0 {
return nil, fmt.Errorf("there is no clusters in the cluster config")
}
exportCfg := cfg.DeepCopy()
var exportContext *clientcmdapi.Context
if len(cfg.Contexts) > 0 {
exportContext = cfg.Contexts[cfg.CurrentContext].DeepCopy()
exportCfg.Contexts = map[string]*clientcmdapi.Context{cfg.CurrentContext: exportContext}
} else {
exportCfg.Contexts = map[string]*clientcmdapi.Context{}
for name := range cfg.Clusters {
exportContext = &clientcmdapi.Context{
Cluster: name,
AuthInfo: authInfo.Username,
}
exportCfg.Contexts["local"] = exportContext
}
exportCfg.CurrentContext = "local"
}
exportCluster := cfg.Clusters[exportContext.Cluster].DeepCopy()
if caData != nil {
exportCluster.CertificateAuthorityData = caData
}
exportCfg.Clusters = map[string]*clientcmdapi.Cluster{exportContext.Cluster: exportCluster}
exportCfg.AuthInfos = map[string]*clientcmdapi.AuthInfo{exportContext.AuthInfo: authInfo}
return exportCfg, nil
}
func makeCertAndKey(writer io.Writer, opts *KubeConfigGenerateX509Options) ([]byte, []byte, error) {
// generate private key
privateKey, err := rsa.GenerateKey(rand.Reader, opts.PrivateKeyBits)
if err != nil {
return nil, nil, err
}
keyBytes := pem.EncodeToMemory(&pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)})
_, _ = fmt.Fprintf(writer, "Private key generated.\n")
template := &x509.CertificateRequest{
Subject: pkix.Name{
CommonName: opts.User,
Organization: opts.Groups,
},
SignatureAlgorithm: x509.SHA256WithRSA,
}
csrBytes, err := x509.CreateCertificateRequest(rand.Reader, template, privateKey)
if err != nil {
return nil, nil, err
}
csrPemBytes := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE REQUEST", Bytes: csrBytes})
_, _ = fmt.Fprintf(writer, "Certificate request generated.\n")
return csrPemBytes, keyBytes, nil
}
func makeCSRName(user string) string {
return fmt.Sprintf("%s-%s", CSRNamePrefix, user)
}
func generateX509KubeConfig(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, options *KubeConfigGenerateX509Options) (*clientcmdapi.Config, error) {
info, _ := cli.Discovery().ServerVersion()
if info == nil || version.MustParseGeneric(info.String()).AtLeast(version.MustParseSemantic("v1.19.0")) {
return generateX509KubeConfigV1(ctx, cli, cfg, writer, options)
}
return generateX509KubeConfigV1Beta(ctx, cli, cfg, writer, options)
}
func generateX509KubeConfigV1(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, opts *KubeConfigGenerateX509Options) (*clientcmdapi.Config, error) {
csrPemBytes, keyBytes, err := makeCertAndKey(writer, opts)
if err != nil {
return nil, err
}
csr := &certificatesv1.CertificateSigningRequest{}
csr.Name = makeCSRName(opts.User)
csr.Spec.SignerName = certificatesv1.KubeAPIServerClientSignerName
csr.Spec.Usages = []certificatesv1.KeyUsage{certificatesv1.UsageClientAuth}
csr.Spec.Request = csrPemBytes
csr.Spec.ExpirationSeconds = pointer.Int32(int32(opts.ExpireTime.Seconds()))
if _, err := cli.CertificatesV1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Certificate signing request %s generated.\n", csr.Name)
defer func() {
_ = cli.CertificatesV1().CertificateSigningRequests().Delete(ctx, csr.Name, metav1.DeleteOptions{})
}()
csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{
Type: certificatesv1.CertificateApproved,
Status: corev1.ConditionTrue,
Reason: "Self-generated and auto-approved by KubeVela",
Message: "This CSR was approved by KubeVela",
LastUpdateTime: metav1.Now(),
})
if csr, err = cli.CertificatesV1().CertificateSigningRequests().UpdateApproval(ctx, csr.Name, csr, metav1.UpdateOptions{}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Certificate signing request %s approved.\n", csr.Name)
if err := wait.Poll(time.Second, time.Minute, func() (done bool, err error) {
if csr, err = cli.CertificatesV1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}); err != nil {
return false, err
}
if csr.Status.Certificate == nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Signed certificate retrieved.\n")
return genKubeConfig(cfg, &clientcmdapi.AuthInfo{
ClientKeyData: keyBytes,
ClientCertificateData: csr.Status.Certificate,
}, nil)
}
func generateX509KubeConfigV1Beta(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, opts *KubeConfigGenerateX509Options) (*clientcmdapi.Config, error) {
csrPemBytes, keyBytes, err := makeCertAndKey(writer, opts)
if err != nil {
return nil, err
}
csr := &certificatesv1beta1.CertificateSigningRequest{}
csr.Name = makeCSRName(opts.User)
var name = certificatesv1beta1.KubeAPIServerClientSignerName
csr.Spec.SignerName = &name
csr.Spec.Usages = []certificatesv1beta1.KeyUsage{certificatesv1beta1.UsageClientAuth}
csr.Spec.Request = csrPemBytes
csr.Spec.ExpirationSeconds = pointer.Int32(int32(opts.ExpireTime.Seconds()))
// create
if _, err = cli.CertificatesV1beta1().CertificateSigningRequests().Create(ctx, csr, metav1.CreateOptions{}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Certificate signing request %s generated.\n", csr.Name)
defer func() {
_ = cli.CertificatesV1beta1().CertificateSigningRequests().Delete(ctx, csr.Name, metav1.DeleteOptions{})
}()
// approval
csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1beta1.CertificateSigningRequestCondition{
Type: certificatesv1beta1.CertificateApproved,
Status: corev1.ConditionTrue,
Reason: "Self-generated and auto-approved by KubeVela",
Message: "This CSR was approved by KubeVela",
LastUpdateTime: metav1.Now(),
})
if csr, err = cli.CertificatesV1beta1().CertificateSigningRequests().UpdateApproval(ctx, csr, metav1.UpdateOptions{}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Certificate signing request %s approved.\n", csr.Name)
// waiting and get the status
if err = wait.Poll(time.Second, time.Minute, func() (done bool, err error) {
if csr, err = cli.CertificatesV1beta1().CertificateSigningRequests().Get(ctx, csr.Name, metav1.GetOptions{}); err != nil {
return false, err
}
if csr.Status.Certificate == nil {
return false, nil
}
return true, nil
}); err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "Signed certificate retrieved.\n")
return genKubeConfig(cfg, &clientcmdapi.AuthInfo{
ClientKeyData: keyBytes,
ClientCertificateData: csr.Status.Certificate,
}, nil)
}
func generateServiceAccountKubeConfig(ctx context.Context, cli kubernetes.Interface, cfg *clientcmdapi.Config, writer io.Writer, opts *KubeConfigGenerateServiceAccountOptions) (*clientcmdapi.Config, error) {
var (
token string
CA []byte
)
sa, err := cli.CoreV1().ServiceAccounts(opts.ServiceAccountNamespace).Get(ctx, opts.ServiceAccountName, metav1.GetOptions{})
if err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "ServiceAccount %s/%s found.\n", opts.ServiceAccountNamespace, opts.ServiceAccountName)
if len(sa.Secrets) == 0 {
_, _ = fmt.Fprintf(writer, "ServiceAccount %s/%s has no secret. Requesting token", opts.ServiceAccountNamespace, opts.ServiceAccountName)
request := authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{},
ExpirationSeconds: pointer.Int64(int64(opts.ExpireTime.Seconds())),
},
}
tokenRequest, err := cli.CoreV1().ServiceAccounts(opts.ServiceAccountNamespace).CreateToken(ctx, opts.ServiceAccountName, &request, metav1.CreateOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to request token")
}
token = tokenRequest.Status.Token
CAConfigMap, err := cli.CoreV1().ConfigMaps(sa.Namespace).Get(ctx, "kube-root-ca.crt", metav1.GetOptions{})
if err != nil {
return nil, errors.Wrap(err, "failed to get root CA secret")
}
CA = []byte(CAConfigMap.Data["ca.crt"])
} else {
secretKey := sa.Secrets[0]
if secretKey.Namespace == "" {
secretKey.Namespace = sa.Namespace
}
secret, err := cli.CoreV1().Secrets(secretKey.Namespace).Get(ctx, secretKey.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
_, _ = fmt.Fprintf(writer, "ServiceAccount secret %s/%s found.\n", secretKey.Namespace, secret.Name)
if len(secret.Data["token"]) == 0 {
return nil, errors.Errorf("no token found in secret %s/%s", secret.Namespace, secret.Name)
}
_, _ = fmt.Fprintf(writer, "ServiceAccount token found.\n")
token = string(secret.Data["token"])
CA = secret.Data["ca.crt"]
}
return genKubeConfig(cfg, &clientcmdapi.AuthInfo{
Token: token,
}, CA)
}
// ReadIdentityFromKubeConfig extract identity from kubeconfig
func ReadIdentityFromKubeConfig(kubeconfigPath string) (*Identity, error) {
cfg, err := clientcmd.LoadFromFile(kubeconfigPath)
if err != nil {
return nil, err
}
ctx, exists := cfg.Contexts[cfg.CurrentContext]
if !exists {
return nil, fmt.Errorf("cannot find current-context %s", cfg.CurrentContext)
}
authInfo, exists := cfg.AuthInfos[ctx.AuthInfo]
if !exists {
return nil, fmt.Errorf("cannot find auth-info %s", ctx.AuthInfo)
}
identity := &Identity{}
token := authInfo.Token
if token == "" && authInfo.TokenFile != "" {
bs, err := os.ReadFile(authInfo.TokenFile)
if err != nil {
return nil, fmt.Errorf("failed to read token file %s: %w", authInfo.TokenFile, err)
}
token = string(bs)
}
if token != "" {
sub, err := utils.GetTokenSubject(token)
if err != nil {
return nil, fmt.Errorf("failed to recognize serviceaccount: %w", err)
}
identity.ServiceAccountNamespace, identity.ServiceAccount, err = serviceaccount.SplitUsername(sub)
if err != nil {
return nil, fmt.Errorf("cannot parse serviceaccount from %s: %w", sub, err)
}
return identity, nil
}
certData := authInfo.ClientCertificateData
if len(certData) == 0 && authInfo.ClientCertificate != "" {
certData, err = os.ReadFile(authInfo.ClientCertificate)
if err != nil {
return nil, fmt.Errorf("failed to read cert file %s: %w", authInfo.ClientCertificate, err)
}
}
if len(certData) > 0 {
name, err := utils.GetCertificateSubject(certData)
if err != nil {
return nil, fmt.Errorf("failed to get subject from certificate data: %w", err)
}
identity.User = name.CommonName
identity.Groups = name.Organization
return identity, nil
}
return nil, fmt.Errorf("cannot find client certificate or serviceaccount token in kubeconfig")
}
|
package middleware
import (
"log"
myerror "my-app/error"
"my-app/interface/response"
"net/http"
"github.com/gin-gonic/gin"
)
func ErrorMiddleware() gin.HandlerFunc {
return func(c *gin.Context) {
c.Next()
err := c.Errors.Last()
if err == nil {
return
}
ge, ok := err.Err.(myerror.GeneralError)
if ok {
httpStatus := myerror.GetHTTPStatus(ge.Code())
response := response.ErrorResponse{
Code: ge.Code(),
Messages: ge.Messages(),
}
c.AbortWithStatusJSON(httpStatus, response)
}
log.Println(err)
c.AbortWithStatus(http.StatusInternalServerError)
}
}
|
/*
* Strava API v3
*
* The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs.
*
* API version: 3.0.0
* Generated by: Swagger Codegen (https://github.com/swagger-api/swagger-codegen.git)
*/
package swagger
import (
"time"
)
type SummaryAthlete struct {
// The unique identifier of the athlete
Id int32 `json:"id,omitempty"`
// Resource state, indicates level of detail. Possible values: 1 -> \"meta\", 2 -> \"summary\", 3 -> \"detail\"
ResourceState int32 `json:"resource_state,omitempty"`
// The athlete's first name.
Firstname string `json:"firstname,omitempty"`
// The athlete's last name.
Lastname string `json:"lastname,omitempty"`
// URL to a 62x62 pixel profile picture.
ProfileMedium string `json:"profile_medium,omitempty"`
// URL to a 124x124 pixel profile picture.
Profile string `json:"profile,omitempty"`
// The athlete's city.
City string `json:"city,omitempty"`
// The athlete's state or geographical region.
State string `json:"state,omitempty"`
// The athlete's country.
Country string `json:"country,omitempty"`
// The athlete's sex.
Sex string `json:"sex,omitempty"`
// Deprecated. Use summit field instead. Whether the athlete has any Summit subscription.
Premium bool `json:"premium,omitempty"`
// Whether the athlete has any Summit subscription.
Summit bool `json:"summit,omitempty"`
// The time at which the athlete was created.
CreatedAt time.Time `json:"created_at,omitempty"`
// The time at which the athlete was last updated.
UpdatedAt time.Time `json:"updated_at,omitempty"`
}
|
package patcher
// Config represents a set of options that can be passed into an Apply action.
type Config struct {
// AllowCreate specifies wether or not we should be able to create the
// object or not. If this is disabled, when an object does not exist on the
// server and a patch is requested, Kubekit will return an error.
// Disabling this could be useful to ensure the `OnUpdate` CRD Handler
// function only performs updates, not creates.
// This does not count towards the `Force` option, where we'll delete and
// re-create an object if there is an error on updating it.
// Defaults to `true`
AllowCreate bool
// AllowUpdate specifies wether or not we should be able to perform updates.
// If this is disabled and, when an object already exists on the server and
// a patch is sent, Kubekit will return an error.
// Disabling this could be useful to ensure the `OnCreate` CRD Handler
// function only performs create actions.
// Defaults to `true`
AllowUpdate bool
// DeleteFirst enforces us to delete the resource on the server first before
// trying to patch it. This enforces creating a new resource.
// This option is provided to enable replacing specific resources like
// PodDisruptionBudget. These resources can't be updated and need to be
// recreated to reconfigure.
// Defaults to `false`
DeleteFirst bool
// Force allows Kubekit to delete and re-create the object when there is an
// error applying the patch.
// This can come in handy for objects that don't allow updating, like
// PodDisruptionBudget.
// Defaults to `false`
Force bool
// Validation enables the schema validation before sending it off to the
// server.
// Defaults to `false`
Validation bool
// Retries resembles the amount of retries we'll execute when we encounter
// an error applying a patch.
// Defaults to `5`
Retries int
name string
}
var defaultOptions = &Config{
AllowCreate: true,
AllowUpdate: true,
Force: false,
Validation: true,
Retries: 5,
}
// OptionFunc represents a function that can be used to set options for the
// Apply command.
type OptionFunc func(c *Config)
// NewConfig creates a new configuration. Any options passed in will overwrite
// the defaults.
func NewConfig(opts ...OptionFunc) *Config {
return NewFromConfig(defaultOptions, opts...)
}
// NewFromConfig creates a new configuration based off of the given
// configuration and options. The given options will overwrite the specified
// config.
func NewFromConfig(c *Config, opts ...OptionFunc) *Config {
cfg := c.DeepCopy()
for _, opt := range opts {
opt(cfg)
}
return cfg
}
// DeepCopy copies the entire config object to a new struct.
func (c *Config) DeepCopy() *Config {
cfg := *c
return &cfg
}
// DisableValidation disables the schema validation.
func DisableValidation() OptionFunc {
return func(c *Config) {
c.Validation = false
}
}
// DisableCreate disables creating objects. They can only be updated.
func DisableCreate() OptionFunc {
return func(c *Config) {
c.AllowCreate = false
}
}
// DisableUpdate disables updating objects. They can only be created.
func DisableUpdate() OptionFunc {
return func(c *Config) {
c.AllowUpdate = false
}
}
// WithForce Delete and re-create the specified object when there is an error
// and we've retried several times.
// This can come in handy for objects that don't allow updating, like
// PodDisruptionBudget.
func WithForce() OptionFunc {
return func(c *Config) {
c.Force = true
}
}
// WithRetries sets the amount of retries we should execute when encountering
// an error before backing off.
func WithRetries(i int) OptionFunc {
return func(c *Config) {
c.Retries = i
}
}
// WithDeleteFirst will enforce deleting the resource on the server first before
// attempting to update it. This option is provided to enable replacing specific
// resources like PodDisruptionBudget. These resources can't be updated and need
// to be recreated to reconfigure.
func WithDeleteFirst() OptionFunc {
return func(c *Config) {
c.DeleteFirst = true
}
}
func withName(n string) OptionFunc {
return func(c *Config) {
c.name = n
}
}
|
package api
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"sync"
"sync/atomic"
"time"
"github.com/gholt/flog"
"github.com/gholt/ring"
"github.com/gholt/store"
"github.com/pandemicsyn/ftls"
"github.com/pandemicsyn/oort/oort"
synpb "github.com/pandemicsyn/syndicate/api/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
type ReplValueStore struct {
logError func(string, ...interface{})
logDebug func(string, ...interface{})
logDebugOn bool
addressIndex int
valueCap int
concurrentRequestsPerStore int
failedConnectRetryDelay int
ftlsConfig *ftls.Config
grpcOpts []grpc.DialOption
ringLock sync.RWMutex
ring ring.Ring
ringCachePath string
ringServer string
ringServerGRPCOpts []grpc.DialOption
ringServerExitChan chan struct{}
ringClientID string
storesLock sync.RWMutex
stores map[string]*replValueStoreAndTicketChan
}
type replValueStoreAndTicketChan struct {
store store.ValueStore
ticketChan chan struct{}
}
func NewReplValueStore(c *ReplValueStoreConfig) *ReplValueStore {
cfg := resolveReplValueStoreConfig(c)
rs := &ReplValueStore{
logError: cfg.LogError,
logDebug: cfg.LogDebug,
logDebugOn: cfg.LogDebug != nil,
addressIndex: cfg.AddressIndex,
valueCap: int(cfg.ValueCap),
concurrentRequestsPerStore: cfg.ConcurrentRequestsPerStore,
failedConnectRetryDelay: cfg.FailedConnectRetryDelay,
ftlsConfig: cfg.StoreFTLSConfig,
grpcOpts: cfg.GRPCOpts,
stores: make(map[string]*replValueStoreAndTicketChan),
ringServer: cfg.RingServer,
ringServerGRPCOpts: cfg.RingServerGRPCOpts,
ringCachePath: cfg.RingCachePath,
ringClientID: cfg.RingClientID,
}
if rs.logError == nil {
rs.logError = flog.Default.ErrorPrintf
}
if rs.logDebug == nil {
rs.logDebug = func(string, ...interface{}) {}
}
if rs.ringCachePath != "" {
if fp, err := os.Open(rs.ringCachePath); err != nil {
rs.logDebug("replValueStore: error loading cached ring %q: %s", rs.ringCachePath, err)
} else if r, err := ring.LoadRing(fp); err != nil {
fp.Close()
rs.logDebug("replValueStore: error loading cached ring %q: %s", rs.ringCachePath, err)
} else {
fp.Close()
rs.ring = r
}
}
return rs
}
func (rs *ReplValueStore) Ring(ctx context.Context) ring.Ring {
var r ring.Ring
rs.ringLock.RLock()
r = rs.ring
rs.ringLock.RUnlock()
for r == nil {
select {
case <-time.After(250 * time.Millisecond):
case <-ctx.Done():
return nil
}
rs.ringLock.RLock()
r = rs.ring
rs.ringLock.RUnlock()
}
return r
}
func (rs *ReplValueStore) SetRing(r ring.Ring) {
if r == nil {
return
}
rs.ringLock.Lock()
if rs.ringCachePath != "" {
dir, name := path.Split(rs.ringCachePath)
_ = os.MkdirAll(dir, 0755)
fp, err := ioutil.TempFile(dir, name)
if err != nil {
rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err)
} else if err := r.Persist(fp); err != nil {
fp.Close()
os.Remove(fp.Name())
rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err)
} else {
fp.Close()
if err := os.Rename(fp.Name(), rs.ringCachePath); err != nil {
os.Remove(fp.Name())
rs.logDebug("replValueStore: error caching ring %q: %s", rs.ringCachePath, err)
}
}
}
rs.ring = r
var currentAddrs map[string]struct{}
if r != nil {
nodes := r.Nodes()
currentAddrs = make(map[string]struct{}, len(nodes))
for _, n := range nodes {
currentAddrs[n.Address(rs.addressIndex)] = struct{}{}
}
}
var shutdownAddrs []string
rs.storesLock.RLock()
for a := range rs.stores {
if _, ok := currentAddrs[a]; !ok {
shutdownAddrs = append(shutdownAddrs, a)
}
}
rs.storesLock.RUnlock()
if len(shutdownAddrs) > 0 {
shutdownStores := make([]*replValueStoreAndTicketChan, len(shutdownAddrs))
rs.storesLock.Lock()
for i, a := range shutdownAddrs {
shutdownStores[i] = rs.stores[a]
rs.stores[a] = nil
}
rs.storesLock.Unlock()
for i, s := range shutdownStores {
if err := s.store.Shutdown(context.Background()); err != nil {
rs.logDebug("replValueStore: error during shutdown of store %s: %s", shutdownAddrs[i], err)
}
}
}
rs.ringLock.Unlock()
}
func (rs *ReplValueStore) storesFor(ctx context.Context, keyA uint64) ([]*replValueStoreAndTicketChan, error) {
r := rs.Ring(ctx)
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if r == nil {
return nil, noRingErr
}
ns := r.ResponsibleNodes(uint32(keyA >> (64 - r.PartitionBitCount())))
as := make([]string, len(ns))
for i, n := range ns {
as[i] = n.Address(rs.addressIndex)
}
ss := make([]*replValueStoreAndTicketChan, len(ns))
var someNil bool
rs.storesLock.RLock()
for i := len(ss) - 1; i >= 0; i-- {
ss[i] = rs.stores[as[i]]
if ss[i] == nil {
someNil = true
}
}
rs.storesLock.RUnlock()
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
if someNil {
rs.storesLock.Lock()
select {
case <-ctx.Done():
rs.storesLock.Unlock()
return nil, ctx.Err()
default:
}
for i := len(ss) - 1; i >= 0; i-- {
if ss[i] == nil {
ss[i] = rs.stores[as[i]]
if ss[i] == nil {
var err error
tc := make(chan struct{}, rs.concurrentRequestsPerStore)
for i := cap(tc); i > 0; i-- {
tc <- struct{}{}
}
ss[i] = &replValueStoreAndTicketChan{ticketChan: tc}
ss[i].store, err = NewValueStore(as[i], rs.concurrentRequestsPerStore, rs.ftlsConfig, rs.grpcOpts...)
if err != nil {
ss[i].store = errorValueStore(fmt.Sprintf("could not create store for %s: %s", as[i], err))
// Launch goroutine to clear out the error store after
// some time so a retry will occur.
go func(addr string) {
time.Sleep(time.Duration(rs.failedConnectRetryDelay) * time.Second)
rs.storesLock.Lock()
s := rs.stores[addr]
if s != nil {
if _, ok := s.store.(errorValueStore); ok {
rs.stores[addr] = nil
}
}
rs.storesLock.Unlock()
}(as[i])
}
rs.stores[as[i]] = ss[i]
select {
case <-ctx.Done():
rs.storesLock.Unlock()
return nil, ctx.Err()
default:
}
}
}
}
rs.storesLock.Unlock()
}
return ss, nil
}
func (rs *ReplValueStore) ringServerConnector(exitChan chan struct{}) {
sleeperTicks := 2
sleeperTicker := time.NewTicker(time.Second)
sleeper := func() {
for i := sleeperTicks; i > 0; i-- {
select {
case <-exitChan:
break
case <-sleeperTicker.C:
}
}
if sleeperTicks < 60 {
sleeperTicks *= 2
}
}
for {
select {
case <-exitChan:
break
default:
}
ringServer := rs.ringServer
if ringServer == "" {
var err error
ringServer, err = oort.GetRingServer("value")
if err != nil {
rs.logError("replValueStore: error resolving ring service: %s", err)
sleeper()
continue
}
}
conn, err := grpc.Dial(ringServer, rs.ringServerGRPCOpts...)
if err != nil {
rs.logError("replValueStore: error connecting to ring service %q: %s", ringServer, err)
sleeper()
continue
}
stream, err := synpb.NewSyndicateClient(conn).GetRingStream(context.Background(), &synpb.SubscriberID{Id: rs.ringClientID})
if err != nil {
rs.logError("replValueStore: error creating stream with ring service %q: %s", ringServer, err)
sleeper()
continue
}
connDoneChan := make(chan struct{})
somethingICanTakeAnAddressOf := int32(0)
activity := &somethingICanTakeAnAddressOf
// This goroutine will detect when the exitChan is closed so it can
// close the conn so that the blocking stream.Recv will get an error
// and everything will unwind properly.
// However, if the conn errors out on its own and exitChan isn't
// closed, we're going to loop back around and try a new conn, but we
// need to clear out this goroutine, which is what the connDoneChan is
// for.
// One last thing is that if nothing happens for fifteen minutes, we
// can assume the conn has gone stale and close it, causing a loop
// around to try a new conn.
// It would be so much easier if Recv could use a timeout Context...
go func(c *grpc.ClientConn, a *int32, cdc chan struct{}) {
for {
select {
case <-exitChan:
case <-cdc:
case <-time.After(15 * time.Minute):
// I'm comfortable with time.After here since it's just
// once per fifteen minutes or new conn.
v := atomic.LoadInt32(a)
if v != 0 {
atomic.AddInt32(a, -v)
continue
}
}
break
}
c.Close()
}(conn, activity, connDoneChan)
for {
select {
case <-exitChan:
break
default:
}
res, err := stream.Recv()
if err != nil {
rs.logDebug("replValueStore: error with stream to ring service %q: %s", ringServer, err)
break
}
atomic.AddInt32(activity, 1)
if res != nil {
if r, err := ring.LoadRing(bytes.NewBuffer(res.Ring)); err != nil {
rs.logDebug("replValueStore: error with ring received from stream to ring service %q: %s", ringServer, err)
} else {
// This will cache the ring if ringCachePath is not empty.
rs.SetRing(r)
// Resets the exponential sleeper since we had success.
sleeperTicks = 2
rs.logDebug("replValueStore: got new ring from stream to ring service %q: %d", ringServer, res.Version)
}
}
}
close(connDoneChan)
sleeper()
}
}
// Startup is not required to use the ReplValueStore; it will automatically
// connect to backend stores as needed. However, if you'd like to use the ring
// service to receive ring updates and have the ReplValueStore automatically
// update itself accordingly, Startup will launch a connector to that service.
// Otherwise, you will need to call SetRing yourself to inform the
// ReplValueStore of which backends to connect to.
func (rs *ReplValueStore) Startup(ctx context.Context) error {
rs.ringLock.Lock()
if rs.ringServerExitChan == nil {
rs.ringServerExitChan = make(chan struct{})
go rs.ringServerConnector(rs.ringServerExitChan)
}
rs.ringLock.Unlock()
return nil
}
// Shutdown will close all connections to backend stores and shutdown any
// running ring service connector. Note that the ReplValueStore can still be
// used after Shutdown, it will just start reconnecting to backends again. To
// relaunch the ring service connector, you will need to call Startup.
func (rs *ReplValueStore) Shutdown(ctx context.Context) error {
rs.ringLock.Lock()
if rs.ringServerExitChan != nil {
close(rs.ringServerExitChan)
rs.ringServerExitChan = nil
}
rs.storesLock.Lock()
for addr, stc := range rs.stores {
if err := stc.store.Shutdown(ctx); err != nil {
rs.logDebug("replValueStore: error during shutdown of store %s: %s", addr, err)
}
delete(rs.stores, addr)
select {
case <-ctx.Done():
rs.storesLock.Unlock()
return ctx.Err()
default:
}
}
rs.storesLock.Unlock()
rs.ringLock.Unlock()
return nil
}
func (rs *ReplValueStore) EnableWrites(ctx context.Context) error {
return nil
}
func (rs *ReplValueStore) DisableWrites(ctx context.Context) error {
return errors.New("cannot disable writes with this client at this time")
}
func (rs *ReplValueStore) Flush(ctx context.Context) error {
return nil
}
func (rs *ReplValueStore) AuditPass(ctx context.Context) error {
return errors.New("audit passes not available with this client at this time")
}
func (rs *ReplValueStore) Stats(ctx context.Context, debug bool) (fmt.Stringer, error) {
return noStats, nil
}
func (rs *ReplValueStore) ValueCap(ctx context.Context) (uint32, error) {
return uint32(rs.valueCap), nil
}
func (rs *ReplValueStore) Lookup(ctx context.Context, keyA, keyB uint64) (int64, uint32, error) {
type rettype struct {
timestampMicro int64
length uint32
err ReplValueStoreError
}
ec := make(chan *rettype)
stores, err := rs.storesFor(ctx, keyA)
if err != nil {
return 0, 0, err
}
for _, s := range stores {
go func(s *replValueStoreAndTicketChan) {
ret := &rettype{}
var err error
select {
case <-s.ticketChan:
ret.timestampMicro, ret.length, err = s.store.Lookup(ctx, keyA, keyB)
s.ticketChan <- struct{}{}
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
ret.err = &replValueStoreError{store: s.store, err: err}
}
ec <- ret
}(s)
}
var timestampMicro int64
var length uint32
var hadNotFoundErr bool
var errs ReplValueStoreErrorSlice
for _ = range stores {
ret := <-ec
if ret.timestampMicro > timestampMicro || timestampMicro == 0 {
timestampMicro = ret.timestampMicro
length = ret.length
hadNotFoundErr = ret.err != nil && store.IsNotFound(ret.err.Err())
}
if ret.err != nil {
errs = append(errs, ret.err)
}
}
if hadNotFoundErr {
nferrs := make(ReplValueStoreErrorNotFound, len(errs))
for i, v := range errs {
nferrs[i] = v
}
return timestampMicro, length, nferrs
}
if len(errs) < len(stores) {
for _, err := range errs {
rs.logDebug("replValueStore: error during lookup: %s", err)
}
errs = nil
}
if errs == nil {
return timestampMicro, length, nil
}
return timestampMicro, length, errs
}
func (rs *ReplValueStore) Read(ctx context.Context, keyA uint64, keyB uint64, value []byte) (int64, []byte, error) {
type rettype struct {
timestampMicro int64
value []byte
err ReplValueStoreError
}
ec := make(chan *rettype)
stores, err := rs.storesFor(ctx, keyA)
if err != nil {
rs.logDebug("replValueStore Read %x %x: error from storesFor: %s", keyA, keyB, err)
return 0, nil, err
}
for _, s := range stores {
go func(s *replValueStoreAndTicketChan) {
ret := &rettype{}
var err error
select {
case <-s.ticketChan:
ret.timestampMicro, ret.value, err = s.store.Read(ctx, keyA, keyB, nil)
s.ticketChan <- struct{}{}
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
ret.err = &replValueStoreError{store: s.store, err: err}
}
ec <- ret
}(s)
}
var timestampMicro int64
var rvalue []byte
var hadNotFoundErr bool
var errs ReplValueStoreErrorSlice
for _ = range stores {
ret := <-ec
if ret.timestampMicro > timestampMicro || timestampMicro == 0 {
timestampMicro = ret.timestampMicro
rvalue = ret.value
hadNotFoundErr = ret.err != nil && store.IsNotFound(ret.err.Err())
}
if ret.err != nil {
errs = append(errs, ret.err)
}
}
if value != nil && rvalue != nil {
rvalue = append(value, rvalue...)
}
for _, err := range errs {
rs.logDebug("replValueStore Read %x %x: error during read: %s", keyA, keyB, err)
}
if hadNotFoundErr {
nferrs := make(ReplValueStoreErrorNotFound, len(errs))
for i, v := range errs {
nferrs[i] = v
}
rs.logDebug("replValueStore Read %x %x: returning at point1: %d %d %v", keyA, keyB, timestampMicro, len(rvalue), nferrs)
return timestampMicro, rvalue, nferrs
}
if len(errs) < len(stores) {
errs = nil
}
if errs == nil {
rs.logDebug("replValueStore Read %x %x: returning at point2: %d %d", keyA, keyB, timestampMicro, len(rvalue))
return timestampMicro, rvalue, nil
}
rs.logDebug("replValueStore Read %x %x: returning at point3: %d %d %v", keyA, keyB, timestampMicro, len(rvalue), errs)
return timestampMicro, rvalue, errs
}
func (rs *ReplValueStore) Write(ctx context.Context, keyA uint64, keyB uint64, timestampMicro int64, value []byte) (int64, error) {
if len(value) == 0 {
panic(fmt.Sprintf("REMOVEME ReplValueStore asked to Write a zlv"))
}
if len(value) > rs.valueCap {
return 0, fmt.Errorf("value length of %d > %d", len(value), rs.valueCap)
}
type rettype struct {
oldTimestampMicro int64
err ReplValueStoreError
}
ec := make(chan *rettype)
stores, err := rs.storesFor(ctx, keyA)
if err != nil {
return 0, err
}
for _, s := range stores {
go func(s *replValueStoreAndTicketChan) {
ret := &rettype{}
var err error
select {
case <-s.ticketChan:
if len(value) == 0 {
panic(fmt.Sprintf("REMOVEME inside ReplValueStore asked to Write a zlv"))
}
ret.oldTimestampMicro, err = s.store.Write(ctx, keyA, keyB, timestampMicro, value)
s.ticketChan <- struct{}{}
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
ret.err = &replValueStoreError{store: s.store, err: err}
}
ec <- ret
}(s)
}
var oldTimestampMicro int64
var errs ReplValueStoreErrorSlice
for _ = range stores {
ret := <-ec
if ret.err != nil {
errs = append(errs, ret.err)
} else if ret.oldTimestampMicro > oldTimestampMicro {
oldTimestampMicro = ret.oldTimestampMicro
}
}
if len(errs) < (len(stores)+1)/2 {
for _, err := range errs {
rs.logDebug("replValueStore: error during write: %s", err)
}
errs = nil
}
if errs == nil {
return oldTimestampMicro, nil
}
return oldTimestampMicro, errs
}
func (rs *ReplValueStore) Delete(ctx context.Context, keyA uint64, keyB uint64, timestampMicro int64) (int64, error) {
type rettype struct {
oldTimestampMicro int64
err ReplValueStoreError
}
ec := make(chan *rettype)
stores, err := rs.storesFor(ctx, keyA)
if err != nil {
return 0, err
}
for _, s := range stores {
go func(s *replValueStoreAndTicketChan) {
ret := &rettype{}
var err error
select {
case <-s.ticketChan:
ret.oldTimestampMicro, err = s.store.Delete(ctx, keyA, keyB, timestampMicro)
s.ticketChan <- struct{}{}
case <-ctx.Done():
err = ctx.Err()
}
if err != nil {
ret.err = &replValueStoreError{store: s.store, err: err}
}
ec <- ret
}(s)
}
var oldTimestampMicro int64
var errs ReplValueStoreErrorSlice
for _ = range stores {
ret := <-ec
if ret.err != nil {
errs = append(errs, ret.err)
} else if ret.oldTimestampMicro > oldTimestampMicro {
oldTimestampMicro = ret.oldTimestampMicro
}
}
if len(errs) < (len(stores)+1)/2 {
for _, err := range errs {
rs.logDebug("replValueStore: error during delete: %s", err)
}
errs = nil
}
if errs == nil {
return oldTimestampMicro, nil
}
return oldTimestampMicro, errs
}
type ReplValueStoreError interface {
error
Store() store.ValueStore
Err() error
}
type ReplValueStoreErrorSlice []ReplValueStoreError
func (es ReplValueStoreErrorSlice) Error() string {
if len(es) <= 0 {
return "unknown error"
} else if len(es) == 1 {
return es[0].Error()
}
return fmt.Sprintf("%d errors, first is: %s", len(es), es[0])
}
type ReplValueStoreErrorNotFound ReplValueStoreErrorSlice
func (e ReplValueStoreErrorNotFound) Error() string {
if len(e) <= 0 {
return "not found"
} else if len(e) == 1 {
return e[0].Error()
}
return fmt.Sprintf("%d errors, first is: %s", len(e), e[0])
}
func (e ReplValueStoreErrorNotFound) ErrNotFound() string {
return e.Error()
}
type replValueStoreError struct {
store store.ValueStore
err error
}
func (e *replValueStoreError) Error() string {
if e.err == nil {
return "unknown error"
}
return e.err.Error()
}
func (e *replValueStoreError) Store() store.ValueStore {
return e.store
}
func (e *replValueStoreError) Err() error {
return e.err
}
|
package command
import (
"context"
"github.com/quintans/go-clean-ddd/internal/app"
"github.com/quintans/go-clean-ddd/internal/domain"
)
// this command handler would belong to a separate microservice responsible to send emails
type SendEmailHandler interface {
Handle(context.Context, SendEmailCommand) error
}
type SendEmailCommand struct {
ID string
Email domain.Email
}
type SendEmail struct {
sender app.EmailSender
rootUrl string
}
func NewSendEmail(rootUrl string, sender app.EmailSender) SendEmail {
return SendEmail{
sender: sender,
rootUrl: rootUrl,
}
}
func (h SendEmail) Handle(ctx context.Context, e SendEmailCommand) error {
return h.sender.Send(ctx, e.Email, h.rootUrl+e.ID)
}
|
package testutils
import (
"bytes"
"io/ioutil"
"net/http"
"testing"
"github.com/dnaeon/go-vcr/cassette"
"github.com/dnaeon/go-vcr/recorder"
)
// RecordHTTP wraps tests and records all http requests made with the default
// http transport in a file with the given name. If the file exists,
// requests are replayed.
func RecordHTTP(m *testing.M, filename string) int {
t := http.DefaultTransport
rec, err := recorder.New(filename)
if err != nil {
panic(err)
}
rec.SetMatcher(func(rec *http.Request, i cassette.Request) bool {
if !cassette.DefaultMatcher(rec, i) {
return false
}
if rec.Body == nil {
return true
}
var b bytes.Buffer
if _, err := b.ReadFrom(rec.Body); err != nil {
return false
}
rec.Body = ioutil.NopCloser(&b)
return b.String() == "" || b.String() == i.Body
})
defer func() {
http.DefaultTransport = t
rec.Stop()
}()
http.DefaultTransport = rec
return m.Run()
}
|
// auth
package auth
import (
"io"
"net/http"
"aliyun/oss/common"
"strings"
"bytes"
"crypto/hmac"
"encoding/base64"
"crypto/sha1"
"sort"
"hash"
)
//build Signature
func Sign(accessKeySecret string, verb string, header http.Header, resource string) string{
hs := make([]string, 0, len(header))
for k, _ := range header{
if strings.HasPrefix(k, common.HeaderOssPrefix){ //canonical format
hs = append(hs, strings.ToLower(k))
}
}
sort.Strings(hs)
var signBuf bytes.Buffer
signBuf.WriteString(verb)
signBuf.WriteByte('\n')
signBuf.WriteString(header.Get(common.HeaderContentMd5))
signBuf.WriteByte('\n')
signBuf.WriteString(header.Get(common.HeaderContentType))
signBuf.WriteByte('\n')
signBuf.WriteString(header.Get(common.HeaderDate))
signBuf.WriteByte('\n')
//CanonicalizedOSSHeaders
for _, h := range hs {
signBuf.WriteString(h)
signBuf.WriteByte(':')
signBuf.WriteString(strings.TrimSpace(header.Get(h)))
signBuf.WriteByte('\n')
}
signBuf.WriteString(resource)
hm := hmac.New(func() hash.Hash { return sha1.New() }, []byte(accessKeySecret))
io.WriteString(hm, signBuf.String())
return base64.StdEncoding.EncodeToString(hm.Sum(nil))
}
|
package main
import (
"bytes"
"flag"
"io/ioutil"
"net/http"
"os/exec"
"regexp"
"strconv"
"sync"
"gopkg.in/yaml.v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
)
var (
showVersion = flag.Bool("version", false, "Print version information.")
configFile = flag.String("config.file", "config.yml", "shell exporter configuration file.")
listenAddress = flag.String("web.listen-address", ":9191", "The address to listen on for HTTP requests.")
metricsPath = flag.String("web.telemetry-path", "/metrics", "Path under which to expose metrics.")
)
type Config struct {
Shells []*Shell `yaml:"shells"`
}
type Shell struct {
Name string `yaml:"name"`
Help string `yaml:"help"`
Cmd string `yaml:"cmd"`
ConstLabels map[string]string `yaml:"const_labels"`
LabelsRegexp string `yaml:"labels_regexp"`
Bin string `yaml:"bin"`
VariableLabels []string
Metrics []prometheus.Metric
Output string
MatchMaps []map[string]string
Desc *prometheus.Desc
}
type ShellManger struct {
Config Config
Shells []*Shell
}
func findStringSubmatchMaps(re *regexp.Regexp, s string) (matchMaps []map[string]string) {
matchMaps = make([]map[string]string, 0)
matchs := re.FindAllStringSubmatch(s, -1)
labels := re.SubexpNames()[1:]
for _, match := range matchs {
matchMap := make(map[string]string)
for index, name := range labels {
matchMap[name] = match[index+1]
}
matchMaps = append(matchMaps, matchMap)
}
return
}
func NewShellManger() (shellManger *ShellManger, err error){
yamlFile, err := ioutil.ReadFile(*configFile)
if err != nil {
log.Errorln("read config fail", err, *configFile)
return
}
config := Config{}
err = yaml.Unmarshal(yamlFile, &config)
if err != nil {
log.Errorln("parse yaml fail", err, *configFile)
return
}
shellManger = &ShellManger{Config: config}
return
}
func (s *ShellManger) initShellManger() {
s.Shells = s.Config.Shells
for _, shell := range s.Shells {
shell.init()
}
}
func (s *ShellManger) Describe(ch chan<- *prometheus.Desc) {
for _, shell := range s.Shells {
ch <- shell.Desc
}
}
func (s *ShellManger) Collect(ch chan<- prometheus.Metric) {
s.runShells(ch)
}
func (s *ShellManger) runShells(ch chan<- prometheus.Metric) {
var wg sync.WaitGroup
for _, shell := range s.Shells {
shell.Metrics = make([]prometheus.Metric, 0)
wg.Add(1)
go func(shell *Shell) {
shell.run()
shell.match()
shell.collect()
for _, metric := range shell.Metrics {
ch <- metric
}
wg.Done()
}(shell)
}
wg.Wait()
}
func (s *Shell) init() {
lRe := regexp.MustCompile(s.LabelsRegexp)
labels := lRe.SubexpNames()
s.VariableLabels = make([]string, 0)
for _, v := range labels[1:] {
if v == "" {
log.Fatalf("ERROR labels_regexp: '%s', '%s'", s.LabelsRegexp, labels)
}
if v != "value" {
s.VariableLabels = append(s.VariableLabels, v)
}
}
desc := prometheus.NewDesc(
s.Name,
s.Help,
s.VariableLabels,
s.ConstLabels,
)
s.Desc = desc
}
func (s *Shell) run() {
var stdout, stderr bytes.Buffer
cmd := exec.Command(s.Bin, "-c", s.Cmd)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
cmd.Run()
s.Output = stdout.String()
}
func (s *Shell) match() {
re := regexp.MustCompile(s.LabelsRegexp)
s.MatchMaps = findStringSubmatchMaps(re, s.Output)
log.Debugf("Shell Run: '%s', '%s', '%s'", s.Cmd, s.Output, s.MatchMaps)
}
func (s *Shell) collect() {
for _, matchMap := range s.MatchMaps {
labelValues := make([]string, 0)
for _, name := range s.VariableLabels {
labelValues = append(labelValues, matchMap[name])
}
valueStr := matchMap["value"]
value, _ := strconv.ParseFloat(valueStr, 64)
metric := prometheus.MustNewConstMetric(
s.Desc,
prometheus.GaugeValue,
value,
labelValues...,
)
s.Metrics = append(s.Metrics, metric)
}
}
func main() {
flag.Parse()
newShellManger, err := NewShellManger()
if err != nil {
log.Fatalf("new shellmanager fail, %s", err)
}
newShellManger.initShellManger()
prometheus.MustRegister(newShellManger)
http.Handle("/metrics", promhttp.Handler())
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(`<html>
<head><title>Shell Exporter</title></head>
<body>
<h1>Shell Exporter</h1>
<p><a href="` + *metricsPath + `">Metrics</a></p>
</body>
</html>`))
})
log.Infoln("Start Server and Listening on", *listenAddress)
http.ListenAndServe(*listenAddress, nil)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.