text
stringlengths 11
4.05M
|
|---|
package main
import (
"fmt"
"time"
)
func main() {
//START, OMIT
for _, v := range []int{1, 2, 3, 4, 5} {
go func(i int) { // HLxx
//can do anything else
fmt.Println(i)
}(v) // HLxx
}
//STOP, OMIT
<-time.After(2 * time.Second)
}
|
/*
> 2018年4月10日
> 开始抽时间来完成GUI窗口化的szssp信息化设备管理工具
## 程序流程修改:
- 运行程序
- 自动开始获取设备相关信息并设置临时地址
- 由使用者选择硬盘序列号和网卡MAC地址,查询远程数据库中的信息
- 如果查询到信息就显示出来,如果没有信息就提示使用者提交相关信息
- 然后最终设置网络IP地址.
*/
package main
import (
"fmt"
"net"
"os"
"os/exec"
"strings"
"syscall"
"github.com/sciter-sdk/go-sciter"
"github.com/sciter-sdk/go-sciter/window"
"github.com/tealeg/xlsx"
)
// 存放数据的网络服务器中的表格文件
const xlsxFile string = "//33.66.96.14/public/2018台账.xlsx"
// 如果没有固定的ip,设置一个临时的ip,便于访问网络服务器
const tempIp string = "33.66.100.255"
// xlsx 对象
var xlsxObjects *xlsx.File
var err error
// 定义接口
type systemer interface {
getdeviceInfo()
getDbinfo()
updateDeviceInfo()
}
// 定义结构
type thisComputer struct {
userName string
department string
hardDiskNumbers []string
harddisk string
macs []map[string]string
mac string
ip string
osType string
}
// 获取设备信息并初始化窗口中的列表元素
func (p *thisComputer) getdeviceInfo() (h []string, c []map[string]string) {
// 读取硬盘列表
ids := runCmd("wmic diskdrive get serialnumber")
var slicel []string = strings.Fields(ids)[1:]
for _, j := range slicel {
h = append(h, j)
}
// 读取网卡信息列表
intf, err := net.Interfaces()
if err != nil {
return
}
for _, v := range intf {
tmp := make(map[string]string)
tmp["Name"] = v.Name
tmp["Mac"] = v.HardwareAddr.String()
if tmp["Mac"] != "" {
c = append(c, tmp)
}
}
// 系统版本号
s := runCmd("ver")
if strings.Contains(s, "10") {
p.osType = "win10"
} else if strings.Contains(s, "6.1") {
p.osType = "win7"
} else if strings.Contains(s, "5.1") {
p.osType = "winxp"
} else {
p.osType = "null"
}
return
}
// 查询服务器上xlsx表格中的IP地址
func searchIp(t string, xlsxObjects *xlsx.File, searchIpChan chan []string) {
var tlist []string
if xlsxObjects == nil {
return
}
for k, v := range xlsxObjects.Sheets[0].Rows {
for _, l := range v.Cells {
if l.Value == t {
for _, ce := range xlsxObjects.Sheets[0].Rows[k].Cells {
//println(ce.Value)
tlist = append(tlist, ce.Value)
}
}
}
}
searchIpChan <- tlist
return
}
// 用户点击查询按钮,连接数据库获取相关信息
// 这里获取窗口中被选中的硬盘序列号和网卡mac地址。
// 这里打算使用协程,同时在xlsx中查询硬盘和网卡
// 尝试通过三维数组加快搜索速度
func (p *thisComputer) getDbinfo(xlsxObjects *xlsx.File) {
// 这两个变量内容从窗口中的两个选择框获取
var hdId string = p.harddisk
var cMac string = p.mac
info := make(chan []string)
if xlsxObjects == nil {
return
}
go searchIp(hdId, xlsxObjects, info)
go searchIp(cMac, xlsxObjects, info)
hdTarget, cmacTarget := <-info, <-info
switch {
case len(hdTarget) > 0:
p.userName = hdTarget[3]
p.department = hdTarget[2]
p.ip = hdTarget[10]
case len(cmacTarget) > 0:
p.userName = cmacTarget[3]
p.department = cmacTarget[2]
p.ip = cmacTarget[10]
default:
// 两种数据都没有查询到设备在服务器中的记录信息
p.userName = "查询失败"
p.department = "查询失败"
p.ip = "查询失败"
}
return // 无需返回值,因为是使用 *thisComputer 直接操作结构体本身中的元素
}
// 上传信息
func (p *thisComputer) updateDeviceInfo(xlsxObjects *xlsx.File) {
if xlsxObjects == nil {
return
}
row := xlsxObjects.Sheets[0].AddRow()
// 向新生成的行row 插入内容
devInfo := []string{" ", " ",
p.department,
p.userName,
" ", " ", " ", " ", " ",
p.harddisk,
p.ip,
p.mac, " "}
row.WriteSlice(&devInfo, -1)
xlsxObjects.Save(xlsxFile)
}
// 调用系统CMD命令执行外部程序
func runCmd(s string) (echo string) {
t := exec.Command("cmd", "/C", s)
// 隐藏黑框,一开始居然没想到这个,我还想半天为什么会闪黑框.
t.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
o, _ := t.Output()
echo = string(o)
return
}
// 根据系统类型设置ip地址
func setIp(name string, ip string, ostype string) {
var ipstr string
var dnsstr string
// 判断当前操作系统版本 避免出现兼容性问题
switch ostype {
case "winxp":
ipstr = strings.Join([]string{"netsh interface ip set address",
"name=\"" + name + "\"",
"source=static",
"addr=" + ip,
"mask=255.255.224.0 gateway=33.66.99.169 gwmetric=0"}, " ")
dnsstr = strings.Join([]string{"netsh interface ip set dns",
"name=\"" + name + "\"",
"source=static addr=1.1.1.1 register=primary"}, " ")
case "win10":
ipstr = strings.Join([]string{"netsh interface ip set address",
"name=\"" + name + "\"",
"source=static",
"address=" + ip,
"mask=255.255.224.0 gateway=33.66.99.169"}, " ")
dnsstr = strings.Join([]string{"netsh interface ip add dnsservers",
name,
"address=1.1.1.1"}, " ")
case "win7":
ipstr = strings.Join([]string{"netsh interface ip set address",
"name=\"" + name + "\"",
"source=static",
"address=" + ip,
"mask=255.255.224.0 gateway=33.66.99.169"}, " ")
dnsstr = strings.Join([]string{"netsh interface ip add dnsservers",
name,
"address=1.1.1.1"}, " ")
default:
os.Exit(0)
}
// 设置ip 掩码 网关
runCmd(ipstr)
// 设置DNS
runCmd(dnsstr)
return
}
// 获取窗口上两个下拉列表框中当前选中的内容
func (p *thisComputer) getWindowSelectValue(root *sciter.Element) (cardName string) {
// 选择网卡下拉列表框获取当前选中的值
editNetcad, _ := root.SelectFirst(".right>.label>#slNet")
v, _ := editNetcad.GetValue()
// 分离出网络名称和网卡mac地址
netCadinfo := strings.Split(v.String(), "|")
// 给 结构体 中的网卡属性赋值 点击查询按钮的时候会用到
p.mac = netCadinfo[1]
cardName = netCadinfo[0]
editHds, _ := root.SelectFirst(".right>.label>#slHdn")
k, _ := editHds.GetValue()
p.harddisk = k.String()
return
}
// 设置IP按钮被点击事件
func (p *thisComputer) setIpButtonOnclick(root *sciter.Element) {
// 测试,获取被选择的值
btn1, _ := root.SelectById("btn1")
// 按钮点击事件
btn1.OnClick(func() {
v := p.getWindowSelectValue(root)
// 使用网络名 设置网络地址 首先判断当前 ip编辑框中是否有地址
editIp, _ := root.SelectFirst(".right>.label>#eIp")
ip, _ := editIp.GetValue()
//根据ip编辑框中是否有ip存在, 选择设置临时ip还是编辑框中的ip
if ip.String() != "" {
go setIp(v, ip.String(), p.osType)
} else {
go setIp(v, tempIp, p.osType)
}
btn1.CallFunction("popmsgbox", sciter.NewValue("ip地址设置完毕,无论你是否选择了正确的网卡,\n接下来10秒,程序会自动尝试连接服务器获取信息,但如果网络不通畅,\n还请耐心等待重复尝试并排查网络故障."))
})
}
// 查询按钮事件
// 通过两个下拉列表框中的被选中项查询其他信息
func (p *thisComputer) getInfoButtonOnclick(root *sciter.Element) {
btn2, _ := root.SelectById("btn2")
btn2.OnClick(func() {
// 将窗口中下拉选择框中的选中项的值赋于自我对应属性
p.getWindowSelectValue(root)
xlsxObjects, err = xlsx.OpenFile(xlsxFile)
if err != nil {
fmt.Println(err)
btn2.CallFunction("popmsgbox", sciter.NewValue("很抱歉:虽然已经努力的访问过服务器数据文件,\n但似乎还是失败了,请再次检查您的网络状态后重试!\n"))
return
}
// 查询信息
p.getDbinfo(xlsxObjects)
// 把信息填入到窗口中的对应编辑框中
editName, _ := root.SelectFirst("#eName")
editGroup, _ := root.SelectFirst("#eGroup")
editIp, _ := root.SelectFirst("#eIp")
editName.SetValue(sciter.NewValue(p.userName))
editGroup.SetValue(sciter.NewValue(p.department))
editIp.SetValue(sciter.NewValue(p.ip))
})
}
// 上传数据到服务器中的xlsx表格中
func (p *thisComputer) UpdateButtonOnclick(root *sciter.Element) {
btn3, _ := root.SelectById("btn3")
btn3.OnClick(func() {
// 判断xlsx文件对象是否为空
if xlsxObjects == nil {
btn3.CallFunction("popmsgbox", sciter.NewValue("致命错误:访问内网服务器中数据文件失败,\n请先选择正确网卡点击设置网络按钮并保证连接进入内网!"))
return
}
// 上传数据到服务器数据文件中
p.updateDeviceInfo(xlsxObjects)
btn3.CallFunction("popmsgbox", sciter.NewValue("上传数据完毕。"))
})
}
// 窗口右上角的关闭按钮被点击事件
func (p *thisComputer) closeWindow(root *sciter.Element) {
closeBtn, _ := root.SelectById("closebtn")
closeBtn.OnClick(func() {
os.Exit(0)
})
}
func newWindowtextSet(root *sciter.Element, hds []string, mas []map[string]string, cmp *thisComputer) {
setHd, _ := root.SelectById("slHdn")
setNe, _ := root.SelectById("slNet")
// 添加硬盘们的序列号到列表选择框中
for _, j := range hds {
setHd.CallFunction("addOp", sciter.NewValue(j))
}
// 添加网卡MAC地址列表到列表选择框中
for _, j := range mas {
setNe.CallFunction("addMac", sciter.NewValue(j["Name"]+"|"+j["Mac"]))
}
// 更新版本编辑框中的设备系统版本数据
editOstype, _ := root.SelectFirst(".right>.label>#eVersion")
editOstype.SetValue(sciter.NewValue(cmp.osType))
}
func main() {
//cmp 是主接口
cmp := new(thisComputer)
hds, mas := cmp.getdeviceInfo()
// w 是窗口对象
w, _ := window.New(sciter.SW_TITLEBAR|sciter.SW_CONTROLS|sciter.SW_MAIN, &sciter.Rect{Left: 0, Top: 0, Right: 720, Bottom: 340})
w.LoadFile("newgui.html")
w.SetTitle("三洲特管信息化台账录入系统 v1.0")
// 窗口事件响应
root, _ := w.GetRootElement()
// 窗口元素数据填充
newWindowtextSet(root, hds, mas, cmp)
// 设置IP按钮事件
cmp.setIpButtonOnclick(root)
// 获取信息按钮事件
cmp.getInfoButtonOnclick(root)
// 上传数据按钮事件
cmp.UpdateButtonOnclick(root)
// 关闭窗口事件
cmp.closeWindow(root)
// x := make(chan *xlsx.File)
// go func() {
// if xlsxObjects == nil {
// for {
// xl, _ := xlsx.OpenFile(xlsxFile)
// if xl != nil {
// x <- xl
// break
// }
// }
// }
// }()
// xlsxObjects = <-x
w.Show()
w.Run()
// 需要预先在程序启动的时候设置一个临时的IP地址。
}
|
package web
import (
"github.com/vlad-doru/microhiro/gateway/web/loggers"
"fmt"
"github.com/Sirupsen/logrus"
"github.com/julienschmidt/httprouter"
"math/rand"
"net"
"net/http"
"net/url"
"sync"
"time"
)
func init() {
rand.Seed(time.Now().Unix())
}
type logFn func()
// Request encapsulates more data about each request.
type Request struct {
*http.Request
// The fingerprint will be really useful for debugging requests.
FingerPrint int64
StartTime time.Time
ClientIP string
Endpoint *Endpoint
Service *Service
Params *httprouter.Params
InitURL url.URL
// Channels used for syncing various conditions.
done chan struct{}
cancel chan struct{}
canceled bool
// Once used to make sure we only cancel/complete once.
finalize sync.Once
// Used to pipe all logging through it, in a separate goroutine.
logs chan logFn
}
// InitRequest provides us with a Request struct that has
func InitRequest(req *http.Request, e *Endpoint, s *Service, ps *httprouter.Params) *Request {
r := &Request{}
r.Request = req
// Init the fingerprint
r.FingerPrint = rand.Int63()
// Initialize the time.
r.StartTime = time.Now()
// Set the client IP.
r.ClientIP, _, _ = net.SplitHostPort(r.RemoteAddr)
// Set the Endpoint.
r.Endpoint = e
// Set the Service.
r.Service = s
// Set the Parameters.
r.Params = ps
// Initiliaze the channels.
r.done = make(chan struct{})
r.cancel = make(chan struct{})
// Make a buffer channel of size 1.
r.logs = make(chan logFn, 1)
// Save the initial URL as this will be changed by the reverse proxy.
r.InitURL = *(req.URL)
// Start the goroutine that will handle logging.
go func() {
for fn := range r.logs {
fn()
}
}()
// Return the intiliazed request.
return r
}
func (r *Request) DoneNotify() <-chan struct{} {
return r.done
}
func (r *Request) CancelNotify() <-chan struct{} {
return r.cancel
}
// CancelRequest will cancel the request and log it.
func (r *Request) CancelRequest(status int, msg string, rw ResponseWriter) bool {
ok := false
r.finalize.Do(func() {
rw.WriteHeader(status)
fmt.Fprint(rw, http.StatusText(status))
close(r.cancel)
r.canceled = true
ok = true
})
return ok
}
// DoneRequest marks a request as been proxied by closing its Done channel.
func (r *Request) DoneRequest(rw ResponseWriter) bool {
ok := false
r.finalize.Do(func() {
close(r.done)
ok = true
})
return ok
}
// FlushLogging blocks until all the logging has been outputted.
// Useful to enforce that we see a certain log message.
func (r *Request) FlushLogging() {
// Wait for the last know channel to be flushed.
<-r.logs
}
func (r *Request) log(fn logFn) {
r.logs <- fn
return
}
// LogSummary logs all the details about a request.
func (r *Request) LogSummary(rw ResponseWriter) {
// Awesome async logging.
// TimeElapsed will be in milliseconds.
timeElapsed := time.Since(r.StartTime).Seconds() * 1000
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"Canceled": r.canceled,
"ElapsedTime": timeElapsed,
"FingerPrint": r.FingerPrint,
"IP": r.ClientIP,
"Method": r.Method,
"Path": r.Endpoint.Path,
"Protocol": r.Proto,
"ResponseBytes": rw.Size(),
"ServiceName": r.Service.Name,
"StartTime": r.StartTime,
"Status": rw.StatusCode(),
"StatusText": http.StatusText(rw.StatusCode()),
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
"UserAgent": r.UserAgent(),
}).Info("Request handling finished.")
})
// We close the logging channel.
close(r.logs)
}
// BOILERPLATE CODE FOR LOGGING. ALL THE IMPORTANT STUFF LIES ABOVE.
// LogInfo logs with fields from context and level of Info.
func (r *Request) LogInfo(args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Info(args...)
})
}
// LogInfof logs with fields from context and level of Info, with the given format.
func (r *Request) LogInfof(format string, args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Infof(format, args...)
})
}
// LogError logs with fields from context and level of Error.
func (r *Request) LogError(args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Error(args...)
})
}
// LogErrorf logs with fields from context and level of Error, with the given format.
func (r *Request) LogErrorf(format string, args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Errorf(format, args...)
})
}
// LogWarning logs with fields from context and level of Warning.
func (r *Request) LogWarning(args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Warning(args...)
})
}
// LogWarningf logs with fields from context and level of Warning, with the given format.
func (r *Request) LogWarningf(format string, args ...interface{}) {
r.log(func() {
loggers.RequestLogger.WithFields(logrus.Fields{
"FingerPrint": r.FingerPrint,
"Method": r.Method,
"Path": r.Endpoint.Path,
"ServiceName": r.Service.Name,
"TargetHost": r.URL.Host,
"TargetPath": r.URL.Path,
"URI": r.InitURL.Path,
}).Warningf(format, args...)
})
}
|
package webservice
import (
"net/http"
"github.com/gin-gonic/gin"
"github.com/rjjatson/bot-kuraifu-chan/internal/webhook/line"
)
// SetupRoute add routes to gin router
func SetupRoute(basePath string,
lineClient *line.Client,
router *gin.Engine) {
router.GET(basePath+"/ping", func(c *gin.Context) {
c.Writer.WriteHeader(http.StatusOK)
})
router.POST(basePath+"/webhook/line", lineClient.HandleWebHook)
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
)
func main() {
MakeRequest()
}
func MakeRequest() {
reader := bufio.NewReader(os.Stdin)
fmt.Println("Enter address you want to connect to")
fmt.Println("Example: http://127.0.0.1:8080")
Addr, _ := reader.ReadString('\n')
Addr = strings.Trim(Addr, " \n ")
resp, err := http.Get(Addr)
if err != nil {
log.Fatalln(err)
}
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatalln(err)
}
log.Println(string(body))
fmt.Println("Receiving JSON from Server")
//Trims away "ok" from byteslice body
body = body[bytes.IndexRune(body, '{'):]
//Creates new files to store the received json profiles
prefix := "Received"
suffix := ".json"
i := 0
for {
i++
fname := fmt.Sprintf("%s_%d_%s", prefix, i, suffix)
if _, err := os.Stat(fname); os.IsNotExist(err) {
var json, _ = os.Create(fname)
json.Write(body)
fmt.Println("Json profile stored in", fname)
break
} else {
continue
}
}
}
|
package configs
import (
"os"
"github.com/joho/godotenv"
)
type Config struct{}
func (c *Config) Get(key string) string {
err := godotenv.Load(".env")
if err != nil {
panic(err)
}
return os.Getenv(key)
}
|
// Copyright (c) 2018/2019 The DevCo developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package xzc
import (
"errors"
"fmt"
rpc "github.com/zcoinofficial/xzcd/rpcclient"
"github.com/zcoinofficial/xzcd/txscript"
"github.com/zcoinofficial/xzcd/wire"
"github.com/zcoinofficial/xzcutil"
"golang.org/x/crypto/ripemd160"
)
// contractArgs specifies the common parameters used to create the initiator's
// and participant's contract.
type contractArgs struct {
them *xzcutil.AddressPubKeyHash
amount xzcutil.Amount
locktime int64
secretHash []byte
}
// builtContract houses the details regarding a contract and the contract
// payment transaction, as well as the transaction to perform a refund.
type builtContract struct {
contract []byte
contractP2SH xzcutil.Address
contractTx *wire.MsgTx
contractFee xzcutil.Amount
}
// buildContract creates a contract for the parameters specified in args, using
// wallet RPC to generate an internal address to redeem the refund and to sign
// the payment to the contract transaction.
func buildContract(testnet bool, rpcclient *rpc.Client, args *contractArgs) (*builtContract, error) {
refundAddr, err := getRawChangeAddress(testnet, rpcclient)
if err != nil {
return nil, fmt.Errorf("getrawchangeaddress: %v", err)
}
refundAddrH, ok := refundAddr.(interface {
Hash160() *[ripemd160.Size]byte
})
if !ok {
return nil, errors.New("unable to create hash160 from change address")
}
contract, err := atomicSwapContract(refundAddrH.Hash160(), args.them.Hash160(),
args.locktime, args.secretHash)
if err != nil {
return nil, err
}
contractP2SH, err := xzcutil.NewAddressScriptHash(contract, getChainParams(testnet))
if err != nil {
return nil, err
}
contractP2SHPkScript, err := txscript.PayToAddrScript(contractP2SH)
if err != nil {
return nil, err
}
feePerKb, _, err := getFeePerKb(rpcclient)
if err != nil {
return nil, err
}
unsignedContract := wire.NewMsgTx(txVersion)
unsignedContract.AddTxOut(wire.NewTxOut(int64(args.amount), contractP2SHPkScript))
unsignedContract, contractFee, err := fundRawTransaction(rpcclient, unsignedContract, feePerKb)
if err != nil {
return nil, fmt.Errorf("fundrawtransaction: %v", err)
}
contractTx, complete, err := rpcclient.SignRawTransaction(unsignedContract)
if err != nil {
return nil, fmt.Errorf("signrawtransaction: %v", err)
}
if !complete {
return nil, errors.New("signrawtransaction: failed to completely sign contract transaction")
}
return &builtContract{
contract,
contractP2SH,
contractTx,
contractFee,
}, nil
}
// atomicSwapContract returns an output script that may be redeemed by one of
// two signature scripts:
//
// <their sig> <their pubkey> <initiator secret> 1
//
// <my sig> <my pubkey> 0
//
// The first signature script is the normal redemption path done by the other
// party and requires the initiator's secret. The second signature script is
// the refund path performed by us, but the refund can only be performed after
// locktime.
func atomicSwapContract(pkhMe, pkhThem *[ripemd160.Size]byte, locktime int64, secretHash []byte) ([]byte, error) {
b := txscript.NewScriptBuilder()
b.AddOp(txscript.OP_IF) // Normal redeem path
{
// Require initiator's secret to be a known length that the redeeming
// party can audit. This is used to prevent fraud attacks between two
// currencies that have different maximum data sizes.
b.AddOp(txscript.OP_SIZE)
b.AddInt64(secretSize)
b.AddOp(txscript.OP_EQUALVERIFY)
// Require initiator's secret to be known to redeem the output.
b.AddOp(txscript.OP_SHA256)
b.AddData(secretHash)
b.AddOp(txscript.OP_EQUALVERIFY)
// Verify their signature is being used to redeem the output. This
// would normally end with OP_EQUALVERIFY OP_CHECKSIG but this has been
// moved outside of the branch to save a couple bytes.
b.AddOp(txscript.OP_DUP)
b.AddOp(txscript.OP_HASH160)
b.AddData(pkhThem[:])
}
b.AddOp(txscript.OP_ELSE) // Refund path
{
// Verify locktime and drop it off the stack (which is not done by
// CLTV).
b.AddInt64(locktime)
b.AddOp(txscript.OP_CHECKLOCKTIMEVERIFY)
b.AddOp(txscript.OP_DROP)
// Verify our signature is being used to redeem the output. This would
// normally end with OP_EQUALVERIFY OP_CHECKSIG but this has been moved
// outside of the branch to save a couple bytes.
b.AddOp(txscript.OP_DUP)
b.AddOp(txscript.OP_HASH160)
b.AddData(pkhMe[:])
}
b.AddOp(txscript.OP_ENDIF)
// Complete the signature check.
b.AddOp(txscript.OP_EQUALVERIFY)
b.AddOp(txscript.OP_CHECKSIG)
return b.Script()
}
|
package openrtb_ext
type ExtImpAdView struct {
MasterTagID string `json:"placementId"`
AccountID string `json:"accountId"`
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package assistant
import (
"context"
"time"
"chromiumos/tast/local/assistant"
"chromiumos/tast/local/chrome/uiauto/browser"
"chromiumos/tast/local/chrome/uiauto/launcher"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: FocusAndroidApp,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Test that assistant focuses Android app if both web and Android versions are open",
Attr: []string{"group:mainline", "informational"},
Contacts: []string{"yawano@google.com", "assistive-eng@google.com"},
SoftwareDeps: []string{"chrome", "chrome_internal"},
Fixture: "assistantWithArc",
Timeout: 3 * time.Minute,
Params: []testing.Param{{
ExtraSoftwareDeps: []string{"android_p"},
}, {
Name: "vm",
ExtraSoftwareDeps: []string{"android_vm"},
}},
})
}
func FocusAndroidApp(ctx context.Context, s *testing.State) {
const (
QueryOpenGoogleNews = "Open Google News"
)
fixtData := s.FixtValue().(*assistant.FixtData)
cr := fixtData.Chrome
a := fixtData.ARC
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Failed to create test API connection: ", err)
}
if err := assistant.InstallTestApkAndWaitReady(ctx, tconn, a); err != nil {
s.Fatal("Failed to install a test app: ", err)
}
if err := launcher.LaunchApp(tconn, assistant.GoogleNewsAppTitle)(ctx); err != nil {
s.Fatal("Failed to launch Google News Android: ", err)
}
if err := assistant.WaitForGoogleNewsAppActivation(ctx, tconn); err != nil {
s.Fatal("Failed to wait Google News Android gets active: ", err)
}
// TODO(b/245349115): Remove this work around once the bug gets fixed.
s.Log("Wait 3 seconds for Ash and Arc WM state sync")
if testing.Sleep(ctx, 3*time.Second); err != nil {
s.Fatal("Failed to wait 3 seconds for Ash and Arc WM state sync: ", err)
}
if _, err = browser.Launch(ctx, tconn, cr, assistant.GoogleNewsWebURL); err != nil {
s.Fatal("Failed to launch Google News Web: ", err)
}
if err := assistant.WaitForGoogleNewsWebActivation(ctx, tconn); err != nil {
s.Fatal("Failed to wait Google News Web gets active: ", err)
}
if _, err := assistant.SendTextQuery(ctx, tconn, QueryOpenGoogleNews); err != nil {
s.Fatal("Failed to send Assistant text query: ", err)
}
if err := assistant.WaitForGoogleNewsAppActivation(ctx, tconn); err != nil {
s.Fatal("Failed to wait Google News Android gets active: ", err)
}
}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package util
import (
"encoding/json"
"errors"
"io/ioutil"
"math/big"
"sort"
"github.com/MatrixAINetwork/go-matrix/core/matrixstate"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/core/state"
"github.com/MatrixAINetwork/go-matrix/core/types"
"github.com/MatrixAINetwork/go-matrix/params"
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/params/manversion"
)
const (
PackageName = "奖励util"
RewardFullRate = uint64(10000)
Stop = "0"
TxsReward = 0
BlkReward = 1
)
var (
//ValidatorBlockReward *big.Int = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), big.NewInt(0)) // Block reward in wei for successfully mining a block
MultilCoinBlockReward *big.Int = new(big.Int).Exp(big.NewInt(10), big.NewInt(18), big.NewInt(0)) // Block reward in wei for successfully mining a block upward from Byzantium
//分母10000
ByzantiumTxsRewardDen *big.Int = big.NewInt(1000000000) // Block reward in wei for successfully mining a block upward from Byzantium
ValidatorsBlockReward *big.Int = big.NewInt(5e+18)
MinersBlockReward *big.Int = big.NewInt(5e+18)
ManPrice *big.Int = big.NewInt(1e18)
ThousandthManPrice *big.Int = big.NewInt(1e15)
Precision *big.Int = big.NewInt(1)
CalcAlpha = "1"
CalcGamma = "2"
CalcDelta = "3"
CalcEpsilon = "4"
)
type ChainReader interface {
// Config retrieves the blockchain's chain configuration.
//Config() *params.ChainConfig
// CurrentHeader retrieves the current header from the local chain.
// GetHeader retrieves a block header from the database by hash and number.
//GetHeader(hash common.Hash, number uint64) *types.Header
// GetHeaderByNumber retrieves a block header from the database by number.
// GetHeaderByHash retrieves a block header from the database by its hash.
GetHeaderByHash(hash common.Hash) *types.Header
// GetBlock retrieves a block sfrom the database by hash and number.
StateAt(root []common.CoinRoot) (*state.StateDBManage, error)
State() (*state.StateDBManage, error)
GetGraphByState(state matrixstate.StateDB) (*mc.TopologyGraph, *mc.ElectGraph, error)
GetGraphByHash(hash common.Hash) (*mc.TopologyGraph, *mc.ElectGraph, error)
StateAtBlockHash(hash common.Hash) (*state.StateDBManage, error)
GetAncestorHash(sonHash common.Hash, ancestorNumber uint64) (common.Hash, error)
}
type StateDB interface {
GetBalance(typ string, addr common.Address) common.BalanceType
GetMatrixData(hash common.Hash) (val []byte)
SetMatrixData(hash common.Hash, val []byte)
}
type DepositInfo struct {
Deposit *big.Int
FixStock uint64
}
func SetAccountRewards(rewards map[common.Address]*big.Int, account common.Address, reward *big.Int) {
if 0 == reward.Cmp(big.NewInt(0)) {
return
}
if nil == rewards {
return
}
if account.Equal(common.Address{}) {
log.Error(PackageName, "奖励的地址非法", account.Hex())
return
}
if _, ok := rewards[account]; ok {
rewards[account] = rewards[account].Add(rewards[account], reward)
} else {
rewards[account] = reward
}
}
func CalcRateReward(rewardAmount *big.Int, rate uint64) *big.Int {
temp := new(big.Int).Mul(rewardAmount, new(big.Int).SetUint64(rate))
return new(big.Int).Div(temp, new(big.Int).SetUint64(RewardFullRate))
}
func CalcStockRate(reward *big.Int, depositNodes map[common.Address]DepositInfo) map[common.Address]*big.Int {
if 0 == len(depositNodes) {
log.Error(PackageName, "抵押列表为空", "")
return nil
}
totalStock := uint64(0)
for _, v := range depositNodes {
totalStock = v.FixStock + totalStock
}
//log.Info(PackageName, "计算抵押总额,账户股权", totalStock)
sortedKeys := make([]string, 0)
for k := range depositNodes {
sortedKeys = append(sortedKeys, k.String())
}
sort.Strings(sortedKeys)
rewards := make(map[common.Address]*big.Int)
for _, k := range sortedKeys {
temp := new(big.Int).Mul(reward, new(big.Int).SetUint64(uint64(depositNodes[common.HexToAddress(k)].FixStock)))
oneNodeReward := new(big.Int).Div(temp, new(big.Int).SetUint64(uint64(totalStock)))
rewards[common.HexToAddress(k)] = oneNodeReward
//log.Debug(PackageName, "计算奖励金额,账户", k, "奖励金额", oneNodeReward)
}
return rewards
}
func CalcInterestReward(reward *big.Int, interest map[common.Address]*big.Int) map[common.Address]*big.Int {
if 0 == len(interest) {
log.Error(PackageName, "利息列表为空", "")
return nil
}
totalInterest := new(big.Int)
for _, v := range interest {
totalInterest.Add(totalInterest, v)
}
if totalInterest.Cmp(big.NewInt(0)) <= 0 {
log.Error(PackageName, "计算的总利息值非法", totalInterest)
return nil
}
log.Trace(PackageName, "计算的总抵押值", totalInterest)
if 0 == reward.Cmp(big.NewInt(0)) {
log.Error(PackageName, "定点化奖励金额为0", "")
return nil
}
rewards := make(map[common.Address]*big.Int)
for k, v := range interest {
temp := new(big.Int).Mul(reward, v)
rewards[k] = new(big.Int).Div(temp, totalInterest)
//log.Trace(PackageName, "计算奖励金额,账户", k, "抵押", v, "金额", rewards[k])
}
return rewards
}
func MergeReward(dst map[common.Address]*big.Int, src map[common.Address]*big.Int) {
if 0 == len(src) {
return
}
if nil == dst {
log.Error(PackageName, "dst is nil", dst)
return
}
for account, reward := range src {
SetAccountRewards(dst, account, reward)
}
}
func CalcN(halfNum uint64, num uint64) uint64 {
n := uint64(0)
if 0 != halfNum {
n = num / halfNum
}
return n
}
func CalcRewardMount(blockReward *big.Int, n uint64, x uint16) *big.Int {
var reward *big.Int
if 0 == n {
reward = blockReward
} else {
rate := new(big.Int).Exp(new(big.Int).SetUint64(uint64(x)), new(big.Int).SetUint64(n), big.NewInt(0))
tmp := new(big.Int).Mul(blockReward, rate)
base := new(big.Int).Exp(new(big.Int).SetUint64(mc.RewardFullRate), new(big.Int).SetUint64(n), big.NewInt(0))
reward := new(big.Int).Div(tmp, base)
return reward
}
return reward
}
func CalcRewardMountByNumber(st StateDB, blockReward *big.Int, num uint64, halfNum uint64, address common.Address, attenuationRate uint16) *big.Int {
if blockReward.Cmp(big.NewInt(0)) < 0 {
log.Warn(PackageName, "折半计算的奖励金额不合法", blockReward)
return big.NewInt(0)
}
balance, err := getBalance(st, address)
if nil != err {
log.Error(PackageName, "账户余额获取错误,账户为", address.Hex())
return big.NewInt(0)
}
n := CalcN(halfNum, num)
reward := CalcRewardMount(blockReward, n, attenuationRate)
if balance[common.MainAccount].Balance.Cmp(reward) < 0 {
log.Error(PackageName, "账户余额不足,余额为", balance[common.MainAccount].Balance.String())
return big.NewInt(0)
} else {
return reward
}
}
func getBalance(st StateDB, address common.Address) (common.BalanceType, error) {
balance := st.GetBalance(params.MAN_COIN, address)
if len(balance) == 0 {
log.Error(PackageName, "账户余额获取不到", "")
return nil, errors.New("账户余额获取不到")
}
if balance[common.MainAccount].Balance.Cmp(big.NewInt(0)) < 0 {
log.Warn(PackageName, "发送账户余额不合法,地址", address.Hex(), "余额", balance[common.MainAccount].Balance)
return nil, errors.New("发送账户余额不合法")
}
return balance, nil
}
func getBalanceByCoinType(st StateDB, address common.Address, cointype string) (common.BalanceType, error) {
balance := st.GetBalance(cointype, address)
if len(balance) == 0 {
log.Error(PackageName, "账户余额获取不到", "")
return nil, errors.New("账户余额获取不到")
}
if balance[common.MainAccount].Balance.Cmp(big.NewInt(0)) < 0 {
log.Warn(PackageName, "发送账户余额不合法,地址", address.Hex(), "余额", balance[common.MainAccount].Balance)
return nil, errors.New("发送账户余额不合法")
}
return balance, nil
}
func getRewardSum(reardMap map[common.Address]*big.Int) *big.Int {
sum := big.NewInt(0)
for _, v := range reardMap {
sum.Add(sum, v)
}
return sum
}
func CointypeCheck(st StateDB, rewardIn []common.RewarTx) []common.RewarTx {
rewardMap := make(map[string]*big.Int)
for _, v := range rewardIn {
if v.RewardTyp == common.RewardTxsType {
if _, ok := rewardMap[v.CoinType]; ok {
rewardMap[v.CoinType] = new(big.Int).Add(rewardMap[v.CoinType], getRewardSum(v.To_Amont))
} else {
rewardMap[v.CoinType] = getRewardSum(v.To_Amont)
}
}
}
for coinType, all := range rewardMap {
balance, err := getBalanceByCoinType(st, common.TxGasRewardAddress, coinType)
log.Info(PackageName, "发放币种", coinType, "计算的奖励总额为", all, "账户余额为", balance[common.MainAccount].Balance)
if nil != err {
continue
}
if all.Cmp(balance[common.MainAccount].Balance) > 0 {
log.Crit(PackageName, "发放币种", coinType, "交易费奖励余额不足,计算的奖励总额为", all, "账户余额为", balance[common.MainAccount].Balance)
return nil
}
}
return rewardIn
}
func AccumulatorCheck(st StateDB, rewardIn []common.RewarTx) []common.RewarTx {
ValidatorBalance, _ := getBalance(st, common.BlkMinerRewardAddress)
minerBalance, _ := getBalance(st, common.BlkValidatorRewardAddress)
interestBalance, _ := getBalance(st, common.InterestRewardAddress)
lotteryBalance, _ := getBalance(st, common.LotteryRewardAddress)
allValidator := new(big.Int).SetUint64(0)
allMiner := new(big.Int).SetUint64(0)
allInterest := new(big.Int).SetUint64(0)
allLottery := new(big.Int).SetUint64(0)
for _, v := range rewardIn {
if v.Fromaddr == common.BlkMinerRewardAddress {
for account, Amount := range v.To_Amont {
if !account.Equal(common.ContractAddress) {
allMiner = new(big.Int).Add(allMiner, Amount)
}
}
}
if v.Fromaddr == common.BlkValidatorRewardAddress {
for account, Amount := range v.To_Amont {
if !account.Equal(common.ContractAddress) {
allValidator = new(big.Int).Add(allValidator, Amount)
}
}
}
if v.Fromaddr == common.InterestRewardAddress {
for account, Amount := range v.To_Amont {
if !account.Equal(common.ContractAddress) {
allInterest = new(big.Int).Add(allInterest, Amount)
}
}
}
if v.Fromaddr == common.LotteryRewardAddress {
for account, Amount := range v.To_Amont {
if !account.Equal(common.ContractAddress) {
allLottery = new(big.Int).Add(allLottery, Amount)
}
}
}
}
rewardOut := make([]common.RewarTx, 0)
if allMiner.Cmp(minerBalance[common.MainAccount].Balance) <= 0 {
for _, v := range rewardIn {
if v.RewardTyp == common.RewardMinerType {
rewardOut = append(rewardOut, v)
}
}
} else {
log.Error(PackageName, "矿工账户余额不足,余额", allMiner.String())
}
if allValidator.Cmp(ValidatorBalance[common.MainAccount].Balance) <= 0 {
for _, v := range rewardIn {
if v.RewardTyp == common.RewardValidatorType {
rewardOut = append(rewardOut, v)
}
}
} else {
log.Error(PackageName, "验证者账户余额不足", allValidator.String())
}
for _, v := range rewardIn {
if v.RewardTyp == common.RewardTxsType {
rewardOut = append(rewardOut, v)
}
}
if allInterest.Cmp(interestBalance[common.MainAccount].Balance) <= 0 {
for _, v := range rewardIn {
if v.RewardTyp == common.RewardInterestType {
rewardOut = append(rewardOut, v)
}
}
} else {
log.Error(PackageName, "利息账户余额不足", allInterest.String())
}
if allLottery.Cmp(lotteryBalance[common.MainAccount].Balance) <= 0 {
for _, v := range rewardIn {
//通过类型判断
if v.RewardTyp == common.RewardLotteryType {
rewardOut = append(rewardOut, v)
}
}
} else {
log.Error(PackageName, "彩票账户余额不足", allLottery.String())
}
return rewardOut
}
func GetPreMinerReward(state StateDB, rewardType uint8) ([]mc.MultiCoinMinerOutReward, error) {
var currentReward *mc.MinerOutReward
var err error
if TxsReward == rewardType {
version := matrixstate.GetVersionInfo(state)
switch version {
case manversion.VersionAlpha:
currentReward, err = matrixstate.GetPreMinerTxsReward(state)
if err != nil {
log.Error(PackageName, "获取矿工交易奖励金额错误", err)
return nil, errors.New("获取矿工交易金额错误")
}
case manversion.VersionBeta, manversion.VersionGamma, manversion.VersionDelta, manversion.VersionAIMine, manversion.VersionZeta:
multiCoin, err := matrixstate.GetPreMinerMultiCoinTxsReward(state)
if err != nil {
log.Error(PackageName, "获取矿工交易奖励金额错误", err)
return make([]mc.MultiCoinMinerOutReward, 0), errors.New("获取矿工交易金额错误")
}
/* for _, v := range multiCoin {
log.Trace(PackageName, "获取前一个矿工奖励值为", v.Reward, "type", v.CoinType)
}*/
return multiCoin, nil
default:
log.Error(PackageName, "获取前矿工奖励值版本号错误", version)
}
} else {
currentReward, err = matrixstate.GetPreMinerBlkReward(state)
if err != nil {
log.Error(PackageName, "获取矿工区块奖励金额错误", err)
return nil, errors.New("获取矿工区块金额错误")
}
}
multiCoinMinerOut := make([]mc.MultiCoinMinerOutReward, 0)
minerOutReward := mc.MultiCoinMinerOutReward{CoinType: params.MAN_COIN, Reward: currentReward.Reward}
multiCoinMinerOut = append(multiCoinMinerOut, minerOutReward)
return multiCoinMinerOut, nil
}
func GetPrice(calc string) *big.Int {
if calc >= CalcGamma {
return ThousandthManPrice
} else {
return ManPrice
}
}
func GetDataByPosition(data []common.OperationalInterestSlash, position uint64) *common.OperationalInterestSlash {
for _, v := range data {
if v.Position == position {
return &v
}
}
return nil
}
var debugSwitch = uint8(0)
var logSwitch = uint8(0)
func LogExtraDebug(msg string, ctx ...interface{}) {
switch debugSwitch {
case uint8(log.LvlError):
log.Error(msg, ctx...)
case uint8(log.LvlWarn):
log.Warn(msg, ctx...)
case uint8(log.LvlInfo):
log.Info(msg, ctx...)
case uint8(log.LvlDebug):
log.Debug(msg, ctx...)
case uint8(log.LvlTrace):
log.Trace(msg, ctx...)
default:
break
}
}
func SetExtralevel(level uint8) {
debugSwitch = level
}
func SetLoglevel(level uint8) {
logSwitch = level
}
func PrintLog2File(filename string, data interface{}) {
if 0 != logSwitch {
out, err := json.MarshalIndent(data, "", " ")
if err != nil {
log.Error("Failed to save log file", "err", err)
return
}
if err := ioutil.WriteFile(filename, out, 0644); err != nil {
log.Error("Failed to save log file", "filename", filename, "err", err)
return
}
}
return
}
|
package main
import (
"fmt"
"tempconv"
)
func main() {
fmt.Println(tempconv.CToF(tempconv.BoilingC))
fmt.Println(tempconv.CToK(tempconv.AbsoluteZeroC))
fmt.Println(tempconv.KToF(tempconv.AbsoluteZeroK))
}
|
// description : Wordcounter
// author : Tom Geudens (https://github.com/tomgeudens/)
// modified : 2016/07/17
//
package main
import (
"bufio"
"bytes"
"fmt"
"strconv"
)
type WordCounter int
// fullfills io.Writer interface contract
func (counter *WordCounter) Write(p []byte) (int, error) {
var words int
scanner := bufio.NewScanner(bytes.NewReader(p))
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
words++
}
*counter += WordCounter(words) // int to WordCounter
return words, nil
}
// fullfills fmt.Stringer interface contract
func (counter *WordCounter) String() string {
return fmt.Sprintf("%d", int(*counter)) // WordCounter to int
}
func main() {
var c WordCounter
count, _ := (&c).Write([]byte(`this is a text
over multiple lines`)) // notice the backticks
fmt.Println("count = " + strconv.Itoa(count))
fmt.Println("wordcounter = " + c.String())
fmt.Println("")
var text = "this is another text"
count, _ = fmt.Fprintf(&c, "%s", text)
fmt.Println("count = " + strconv.Itoa(count))
fmt.Println("wordcounter = " + c.String())
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
)
func dec2bin(a uint) string {
return fmt.Sprintf("%b", a)
}
func main() {
var a uint
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
if len(scanner.Text()) > 0 {
fmt.Sscan(scanner.Text(), &a)
fmt.Println(dec2bin(a))
}
}
}
|
package service
import (
"bytes"
"context"
"github.com/hyperjumptech/grule-rule-engine/ast"
"github.com/hyperjumptech/grule-rule-engine/builder"
engine2 "github.com/hyperjumptech/grule-rule-engine/engine"
"github.com/hyperjumptech/grule-rule-engine/pkg"
"github.com/kennykarnama/checkout-challenge/cart/entity"
"github.com/kennykarnama/checkout-challenge/cart/repository"
stockEntity "github.com/kennykarnama/checkout-challenge/stock/entity"
"github.com/kennykarnama/checkout-challenge/stock/service"
)
type CartServiceImpl struct {
stockService service.StockService
repo repository.CartRepository
CheckoutRuleFile string
}
func NewCartServiceImpl(stockService service.StockService, repo repository.CartRepository, checkoutRuleFile string) *CartServiceImpl {
return &CartServiceImpl{stockService: stockService, repo: repo, CheckoutRuleFile: checkoutRuleFile}
}
func (s *CartServiceImpl) Checkout(ctx context.Context, ID string) (*entity.Checkout, error) {
cartItems, err := s.repo.GetCartByID(ctx, ID)
if err != nil {
return nil, err
}
skuDetails, err := s.stockService.GetStocksBySKU(ctx, entity.CartItems(cartItems).SKUs())
if err != nil {
return nil, err
}
mappedSkuDetails := stockEntity.StockItems(skuDetails).MappedBySKU()
mappedCartItems := entity.CartItems(cartItems).Normalize()
checkout := &entity.Checkout{
CurrencySymbol: "$",
}
lib := ast.NewKnowledgeLibrary()
rb := builder.NewRuleBuilder(lib)
err = rb.BuildRuleFromResource("Checkout Calculator", "0.0.1", pkg.NewFileResource("CheckoutRule.grl"))
if err != nil {
return nil, err
}
engine := engine2.NewGruleEngine()
kb := lib.NewKnowledgeBaseInstance("Checkout Calculator", "0.0.1")
buff := &bytes.Buffer{}
cat := kb.MakeCatalog()
err = cat.WriteCatalogToWriter(buff)
if err != nil {
return nil, err
}
buff2 := bytes.NewBuffer(buff.Bytes())
cat2 := &ast.Catalog{}
cat2.ReadCatalogFromReader(buff2)
nkb := cat2.BuildKnowledgeBase()
for _, cartItem := range cartItems {
dctx := ast.NewDataContext()
dctx.Add("CartItem", cartItem)
dctx.Add("Checkout", checkout)
dctx.Add("MappedCartItem", mappedCartItems)
dctx.Add("MappedSku", mappedSkuDetails)
err = engine.Execute(dctx, nkb)
}
return checkout, nil
}
func (s *CartServiceImpl) Add(ctx context.Context, item *entity.CartItem) error {
err := s.stockService.DecrementStockQtyBySku(ctx, item.SKU, item.Qty)
if err != nil {
return err
}
err = s.repo.Add(ctx, item)
if err != nil {
return err
}
return nil
}
|
package shared
import (
"os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/apigatewaymanagementapi"
)
func SendWSMessage(connID, apiID string, data []byte) error {
sess, err := session.NewSession(&aws.Config{Region: aws.String(os.Getenv("AWS_REGION"))})
if err != nil {
panic(err)
}
WSClient := apigatewaymanagementapi.New(sess)
WSClient.Endpoint = "https://6hy0ivbdee.execute-api.us-west-1.amazonaws.com/prod/"
payload := apigatewaymanagementapi.PostToConnectionInput{
Data: data,
ConnectionId: aws.String(connID),
}
_, err = WSClient.PostToConnection(&payload)
return err
}
|
package common
import (
"bufio"
"fmt"
"io"
"math"
"os"
"strings"
)
// Graph manage Nodes
type Graph struct {
Nodes []*Node
// vertex from 0~vertexSize
vertexSize uint64
}
// NewGraph return a empty graph with vertexSize vertex
func NewGraph(vertexSize uint64) *Graph {
g := Graph{
Nodes: make([]*Node, vertexSize),
vertexSize: vertexSize,
}
for i := uint64(0); i < vertexSize; i++ {
g.Nodes[i] = NewNode(i)
}
return &g
}
// GetVertexSize return vertexsize
func (g *Graph) GetVertexSize() uint64 {
return g.vertexSize
}
// AddEdge Add edge from src to dst
func (g *Graph) AddEdge(src, dst uint64) {
g.Nodes[src].addNbr(dst)
}
// ChangeColor change node color
func (g *Graph) ChangeColor(id, color uint64) {
g.Nodes[id].Color = color
}
// Node a vertex and Nbrlist
type Node struct {
ID uint64
Nbrlist []uint64
Color uint64
}
// NewNode returns a node with id and empty list
func NewNode(id uint64) *Node {
n := Node{
ID: id,
Nbrlist: make([]uint64, 0),
Color: math.MaxUint64,
}
return &n
}
func (n *Node) addNbr(id uint64) {
n.Nbrlist = append(n.Nbrlist, id)
}
// LoadGraphFromPath load a graph with path
func LoadGraphFromPath(path string) (g *Graph, err error) {
file, err := os.Open(path)
if err != nil {
fmt.Printf(err.Error())
return nil, err
}
defer file.Close()
reader := bufio.NewReader(file)
str, err := reader.ReadString('\n')
var vertexSize uint64
fmt.Fscanf(strings.NewReader(str), "%d", &vertexSize)
g = NewGraph(vertexSize)
var src, dst uint64
edgeSize := 0
for {
str, err := reader.ReadString('\n')
if err == io.EOF {
break
}
edgeSize++
fmt.Fscanf(strings.NewReader(str), "%d %d", &src, &dst)
if src < 0 || src >= vertexSize || dst < 0 || dst >= vertexSize {
return nil, fmt.Errorf("err in edge with src: %d dst: %d vertexSize: %d", src, dst, vertexSize)
}
g.AddEdge(src, dst)
g.AddEdge(dst, src)
}
return g, nil
}
|
package dmsg
import (
"context"
"net"
"testing"
"time"
"github.com/skycoin/skycoin/src/util/logging"
"github.com/stretchr/testify/assert"
"github.com/skycoin/dmsg/cipher"
)
const (
chanReadThreshold = time.Second * 5
)
type transportWithError struct {
tr *Transport
err error
}
func TestClient(t *testing.T) {
logger := logging.MustGetLogger("dms_client")
// Runs two ClientConn's and dials a transport from one to another.
// Checks if states change properly and if closing of transport and connections works.
t.Run("Two connections", func(t *testing.T) {
p1, p2 := net.Pipe()
p1, p2 = invertedIDConn{p1}, invertedIDConn{p2}
var pk1, pk2 cipher.PubKey
err := pk1.Set("024ec47420176680816e0406250e7156465e4531f5b26057c9f6297bb0303558c7")
assert.NoError(t, err)
err = pk2.Set("031b80cd5773143a39d940dc0710b93dcccc262a85108018a7a95ab9af734f8055")
assert.NoError(t, err)
conn1 := NewClientConn(logger, p1, pk1, pk2)
conn2 := NewClientConn(logger, p2, pk2, pk1)
ch1 := make(chan *Transport, AcceptBufferSize)
ch2 := make(chan *Transport, AcceptBufferSize)
ctx := context.TODO()
go func() {
_ = conn1.Serve(ctx, ch1) // nolint:errcheck
}()
go func() {
_ = conn2.Serve(ctx, ch2) // nolint:errcheck
}()
conn1.mx.RLock()
initID := conn1.nextInitID
conn1.mx.RUnlock()
_, ok := conn1.getTp(initID)
assert.False(t, ok)
tr1, err := conn1.DialTransport(ctx, pk2)
assert.NoError(t, err)
_, ok = conn1.getTp(initID)
assert.True(t, ok)
conn1.mx.RLock()
newInitID := conn1.nextInitID
conn1.mx.RUnlock()
assert.Equal(t, initID+2, newInitID)
err = tr1.Close()
assert.NoError(t, err)
err = conn1.Close()
assert.NoError(t, err)
err = conn2.Close()
assert.NoError(t, err)
assert.False(t, isDoneChannelOpen(conn1.done))
assert.False(t, isDoneChannelOpen(conn2.done))
assert.False(t, isDoneChannelOpen(tr1.done))
assert.False(t, isReadChannelOpen(tr1.inCh))
})
// Runs four ClientConn's and dials two transports between them.
// Checks if states change properly and if closing of transports and connections works.
t.Run("Four connections", func(t *testing.T) {
p1, p2 := net.Pipe()
p1, p2 = invertedIDConn{p1}, invertedIDConn{p2}
p3, p4 := net.Pipe()
p3, p4 = invertedIDConn{p3}, invertedIDConn{p4}
var pk1, pk2, pk3 cipher.PubKey
err := pk1.Set("024ec47420176680816e0406250e7156465e4531f5b26057c9f6297bb0303558c7")
assert.NoError(t, err)
err = pk2.Set("031b80cd5773143a39d940dc0710b93dcccc262a85108018a7a95ab9af734f8055")
assert.NoError(t, err)
err = pk3.Set("035b57eef30b9a6be1effc2c3337a3a1ffedcd04ffbac6667cd822892cf56be24a")
assert.NoError(t, err)
conn1 := NewClientConn(logger, p1, pk1, pk2)
conn2 := NewClientConn(logger, p2, pk2, pk1)
conn3 := NewClientConn(logger, p3, pk2, pk3)
conn4 := NewClientConn(logger, p4, pk3, pk2)
conn2.setNextInitID(randID(false))
conn4.setNextInitID(randID(false))
ch1 := make(chan *Transport, AcceptBufferSize)
ch2 := make(chan *Transport, AcceptBufferSize)
ch3 := make(chan *Transport, AcceptBufferSize)
ch4 := make(chan *Transport, AcceptBufferSize)
ctx := context.TODO()
go func() {
_ = conn1.Serve(ctx, ch1) // nolint:errcheck
}()
go func() {
_ = conn2.Serve(ctx, ch2) // nolint:errcheck
}()
go func() {
_ = conn3.Serve(ctx, ch3) // nolint:errcheck
}()
go func() {
_ = conn4.Serve(ctx, ch4) // nolint:errcheck
}()
conn1.mx.RLock()
initID1 := conn1.nextInitID
conn1.mx.RUnlock()
_, ok := conn1.getTp(initID1)
assert.False(t, ok)
conn2.mx.RLock()
initID2 := conn2.nextInitID
conn2.mx.RUnlock()
_, ok = conn2.getTp(initID2)
assert.False(t, ok)
conn3.mx.RLock()
initID3 := conn3.nextInitID
conn3.mx.RUnlock()
_, ok = conn3.getTp(initID3)
assert.False(t, ok)
conn4.mx.RLock()
initID4 := conn4.nextInitID
conn4.mx.RUnlock()
_, ok = conn4.getTp(initID4)
assert.False(t, ok)
trCh1 := make(chan transportWithError)
trCh2 := make(chan transportWithError)
go func() {
tr, err := conn1.DialTransport(ctx, pk2)
trCh1 <- transportWithError{
tr: tr,
err: err,
}
}()
go func() {
tr, err := conn3.DialTransport(ctx, pk3)
trCh2 <- transportWithError{
tr: tr,
err: err,
}
}()
twe1 := <-trCh1
twe2 := <-trCh2
tr1, err := twe1.tr, twe1.err
assert.NoError(t, err)
_, ok = conn1.getTp(initID1)
assert.True(t, ok)
conn1.mx.RLock()
newInitID1 := conn1.nextInitID
conn1.mx.RUnlock()
assert.Equal(t, initID1+2, newInitID1)
tr2, err := twe2.tr, twe2.err
assert.NoError(t, err)
_, ok = conn3.getTp(initID3)
assert.True(t, ok)
conn3.mx.RLock()
newInitID3 := conn3.nextInitID
conn3.mx.RUnlock()
assert.Equal(t, initID3+2, newInitID3)
errCh1 := make(chan error)
errCh2 := make(chan error)
errCh3 := make(chan error)
errCh4 := make(chan error)
go func() {
errCh1 <- tr1.Close()
}()
go func() {
errCh2 <- tr2.Close()
}()
err = <-errCh1
assert.NoError(t, err)
err = <-errCh2
assert.NoError(t, err)
go func() {
errCh1 <- conn1.Close()
}()
go func() {
errCh2 <- conn2.Close()
}()
go func() {
errCh3 <- conn3.Close()
}()
go func() {
errCh4 <- conn4.Close()
}()
err = <-errCh1
assert.NoError(t, err)
err = <-errCh2
assert.NoError(t, err)
err = <-errCh3
assert.NoError(t, err)
err = <-errCh4
assert.NoError(t, err)
assert.False(t, isDoneChannelOpen(conn1.done))
assert.False(t, isDoneChannelOpen(conn3.done))
assert.False(t, isDoneChannelOpen(tr1.done))
assert.False(t, isReadChannelOpen(tr1.inCh))
assert.False(t, isDoneChannelOpen(tr2.done))
assert.False(t, isReadChannelOpen(tr2.inCh))
})
}
func isDoneChannelOpen(ch chan struct{}) bool {
select {
case _, ok := <-ch:
return ok
case <-time.After(chanReadThreshold):
return false
}
}
func isReadChannelOpen(ch chan Frame) bool {
select {
case _, ok := <-ch:
return ok
case <-time.After(chanReadThreshold):
return false
}
}
// used so that we can get two 'ClientConn's directly communicating with one another.
type invertedIDConn struct {
net.Conn
}
// Write ensures odd IDs turn even, and even IDs turn odd on write.
func (c invertedIDConn) Write(b []byte) (n int, err error) {
frame := Frame(b)
newFrame := MakeFrame(frame.Type(), frame.TpID()^1, frame.Pay())
return c.Conn.Write(newFrame)
}
|
package httpmock_test
import (
"bytes"
"context"
"encoding/json"
"errors"
"net"
"net/http"
"net/url"
"reflect"
"regexp"
"strings"
"testing"
"time"
. "github.com/jarcoal/httpmock"
"github.com/jarcoal/httpmock/internal"
)
const testURL = "http://www.example.com/"
func TestMockTransport(t *testing.T) {
Activate()
defer Deactivate()
url := "https://github.com/"
body := `["hello world"]` + "\n"
RegisterResponder("GET", url, NewStringResponder(200, body))
// Read it as a simple string (ioutil.ReadAll of assertBody will
// trigger io.EOF)
func() {
resp, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
if !assertBody(t, resp, body) {
t.FailNow()
}
// the http client wraps our NoResponderFound error, so we just try and match on text
if _, err := http.Get(testURL); !strings.Contains(err.Error(), NoResponderFound.Error()) {
t.Fatal(err)
}
}()
// Do it again, but twice with json decoder (json Decode will not
// reach EOF, but Close is called as the JSON response is complete)
for i := 0; i < 2; i++ {
func() {
resp, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
defer resp.Body.Close()
var res []string
err = json.NewDecoder(resp.Body).Decode(&res)
if err != nil {
t.Fatal(err)
}
if len(res) != 1 || res[0] != "hello world" {
t.Fatalf(`%v read instead of ["hello world"]`, res)
}
}()
}
}
// We should be able to find GET handlers when using an http.Request with a
// default (zero-value) .Method.
func TestMockTransportDefaultMethod(t *testing.T) {
Activate()
defer Deactivate()
const urlString = "https://github.com/"
url, err := url.Parse(urlString)
if err != nil {
t.Fatal(err)
}
body := "hello world"
RegisterResponder("GET", urlString, NewStringResponder(200, body))
req := &http.Request{
URL: url,
// Note: Method unspecified (zero-value)
}
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
assertBody(t, resp, body)
}
func TestMockTransportReset(t *testing.T) {
DeactivateAndReset()
if DefaultTransport.NumResponders() > 0 {
t.Fatal("expected no responders at this point")
}
RegisterResponder("GET", testURL, nil)
if DefaultTransport.NumResponders() != 1 {
t.Fatal("expected one responder")
}
Reset()
if DefaultTransport.NumResponders() > 0 {
t.Fatal("expected no responders as they were just reset")
}
}
func TestMockTransportNoResponder(t *testing.T) {
Activate()
defer DeactivateAndReset()
Reset()
if _, err := http.Get(testURL); err == nil {
t.Fatal("expected to receive a connection error due to lack of responders")
}
RegisterNoResponder(NewStringResponder(200, "hello world"))
resp, err := http.Get(testURL)
if err != nil {
t.Fatal("expected request to succeed")
}
assertBody(t, resp, "hello world")
}
func TestMockTransportQuerystringFallback(t *testing.T) {
Activate()
defer DeactivateAndReset()
// register the testURL responder
RegisterResponder("GET", testURL, NewStringResponder(200, "hello world"))
for _, suffix := range []string{"?", "?hello=world", "?hello=world#foo", "?hello=world&hello=all", "#foo"} {
reqURL := testURL + suffix
t.Log(reqURL)
// make a request for the testURL with a querystring
resp, err := http.Get(reqURL)
if err != nil {
t.Fatalf("expected request %s to succeed", reqURL)
}
assertBody(t, resp, "hello world")
}
}
func TestMockTransportPathOnlyFallback(t *testing.T) {
// Just in case a panic occurs
defer DeactivateAndReset()
for _, test := range []struct {
Responder string
Paths []string
}{
{
// unsorted query string matches exactly
Responder: "/hello/world?query=string&abc=zz#fragment",
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
},
},
{
// sorted query string matches all cases
Responder: "/hello/world?abc=zz&query=string#fragment",
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?abc=zz&query=string#fragment",
},
},
{
// unsorted query string matches exactly
Responder: "/hello/world?query=string&abc=zz",
Paths: []string{
testURL + "hello/world?query=string&abc=zz",
},
},
{
// sorted query string matches all cases
Responder: "/hello/world?abc=zz&query=string",
Paths: []string{
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world?abc=zz&query=string",
},
},
{
// unsorted query string matches exactly
Responder: "/hello/world?query=string&query=string2&abc=zz",
Paths: []string{
testURL + "hello/world?query=string&query=string2&abc=zz",
},
},
// sorted query string matches all cases
{
Responder: "/hello/world?abc=zz&query=string&query=string2",
Paths: []string{
testURL + "hello/world?query=string&query=string2&abc=zz",
testURL + "hello/world?query=string2&query=string&abc=zz",
testURL + "hello/world?abc=zz&query=string2&query=string",
},
},
{
Responder: "/hello/world?query",
Paths: []string{
testURL + "hello/world?query",
},
},
{
Responder: "/hello/world?query&abc",
Paths: []string{
testURL + "hello/world?query&abc",
// testURL + "hello/world?abc&query" won' work as "=" is needed, see below
},
},
{
// In case the sorting does not matter for received params without
// values, we must register params with "="
Responder: "/hello/world?abc=&query=",
Paths: []string{
testURL + "hello/world?query&abc",
testURL + "hello/world?abc&query",
},
},
{
Responder: "/hello/world#fragment",
Paths: []string{
testURL + "hello/world#fragment",
},
},
{
Responder: "/hello/world",
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world#fragment",
testURL + "hello/world",
},
},
// Regexp cases
{
Responder: `=~^http://.*/hello/.*ld\z`,
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world#fragment",
testURL + "hello/world",
},
},
{
Responder: `=~^http://.*/hello/.*ld(\z|[?#])`,
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world#fragment",
testURL + "hello/world",
},
},
{
Responder: `=~^/hello/.*ld\z`,
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world#fragment",
testURL + "hello/world",
},
},
{
Responder: `=~^/hello/.*ld(\z|[?#])`,
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
testURL + "hello/world#fragment",
testURL + "hello/world",
},
},
{
Responder: `=~abc=zz`,
Paths: []string{
testURL + "hello/world?query=string&abc=zz#fragment",
testURL + "hello/world?query=string&abc=zz",
},
},
} {
Activate()
// register the responder
RegisterResponder("GET", test.Responder, NewStringResponder(200, "hello world"))
for _, reqURL := range test.Paths {
t.Logf("%s: %s", test.Responder, reqURL)
// make a request for the testURL with a querystring
resp, err := http.Get(reqURL)
if err != nil {
t.Errorf("%s: expected request %s to succeed", test.Responder, reqURL)
continue
}
assertBody(t, resp, "hello world")
}
DeactivateAndReset()
}
}
type dummyTripper struct{}
func (d *dummyTripper) RoundTrip(*http.Request) (*http.Response, error) {
return nil, nil
}
func TestMockTransportInitialTransport(t *testing.T) {
DeactivateAndReset()
tripper := &dummyTripper{}
http.DefaultTransport = tripper
Activate()
if http.DefaultTransport == tripper {
t.Fatal("expected http.DefaultTransport to be a mock transport")
}
Deactivate()
if http.DefaultTransport != tripper {
t.Fatal("expected http.DefaultTransport to be dummy")
}
}
func TestMockTransportNonDefault(t *testing.T) {
// create a custom http client w/ custom Roundtripper
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 60 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
},
}
// activate mocks for the client
ActivateNonDefault(client)
defer DeactivateAndReset()
body := "hello world!"
RegisterResponder("GET", testURL, NewStringResponder(200, body))
req, err := http.NewRequest("GET", testURL, nil)
if err != nil {
t.Fatal(err)
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
assertBody(t, resp, body)
}
func TestMockTransportRespectsCancel(t *testing.T) {
Activate()
defer DeactivateAndReset()
const (
cancelNone = iota
cancelReq
cancelCtx
)
cases := []struct {
withCancel int
cancelNow bool
withPanic bool
expectedBody string
expectedErr error
}{
// No cancel specified at all. Falls back to normal behavior
{cancelNone, false, false, "hello world", nil},
// Cancel returns error
{cancelReq, true, false, "", errors.New("request canceled")},
// Cancel via context returns error
{cancelCtx, true, false, "", errors.New("context canceled")},
// Request can be cancelled but it is not cancelled.
{cancelReq, false, false, "hello world", nil},
// Request can be cancelled but it is not cancelled.
{cancelCtx, false, false, "hello world", nil},
// Panic in cancelled request is handled
{cancelReq, false, true, "", errors.New(`panic in responder: got "oh no"`)},
// Panic in cancelled request is handled
{cancelCtx, false, true, "", errors.New(`panic in responder: got "oh no"`)},
}
for _, c := range cases {
Reset()
if c.withPanic {
RegisterResponder("GET", testURL, func(r *http.Request) (*http.Response, error) {
time.Sleep(10 * time.Millisecond)
panic("oh no")
})
} else {
RegisterResponder("GET", testURL, func(r *http.Request) (*http.Response, error) {
time.Sleep(10 * time.Millisecond)
return NewStringResponse(http.StatusOK, "hello world"), nil
})
}
req, err := http.NewRequest("GET", testURL, nil)
if err != nil {
t.Fatal(err)
}
switch c.withCancel {
case cancelReq:
cancel := make(chan struct{}, 1)
req.Cancel = cancel // nolint: staticcheck
if c.cancelNow {
cancel <- struct{}{}
}
case cancelCtx:
ctx, cancel := context.WithCancel(req.Context())
req = req.WithContext(ctx)
if c.cancelNow {
cancel()
} else {
defer cancel() // avoid ctx leak
}
}
resp, err := http.DefaultClient.Do(req)
// If we expect an error but none was returned, it's fatal for this test...
if err == nil && c.expectedErr != nil {
t.Fatal("Error should not be nil")
}
if err != nil {
got := err.(*url.Error)
// Do not use reflect.DeepEqual as go 1.13 includes stack frames
// into errors issued by errors.New()
if c.expectedErr == nil || got.Err.Error() != c.expectedErr.Error() {
t.Errorf("Expected error: %v, got: %v", c.expectedErr, got.Err)
}
}
if c.expectedBody != "" {
assertBody(t, resp, c.expectedBody)
}
}
}
func TestMockTransportRespectsTimeout(t *testing.T) {
timeout := time.Millisecond
client := &http.Client{
Timeout: timeout,
}
ActivateNonDefault(client)
defer DeactivateAndReset()
RegisterResponder(
"GET", testURL,
func(r *http.Request) (*http.Response, error) {
time.Sleep(100 * timeout)
return NewStringResponse(http.StatusOK, ""), nil
},
)
_, err := client.Get(testURL)
if err == nil {
t.Fail()
}
}
func TestMockTransportCallCountReset(t *testing.T) {
Reset()
Activate()
defer Deactivate()
const (
url = "https://github.com/path?b=1&a=2"
url2 = "https://gitlab.com/"
)
RegisterResponder("GET", url, NewStringResponder(200, "body"))
RegisterResponder("POST", "=~gitlab", NewStringResponder(200, "body"))
_, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
json.NewEncoder(buff).Encode("{}") // nolint: errcheck
_, err = http.Post(url2, "application/json", buff)
if err != nil {
t.Fatal(err)
}
_, err = http.Get(url)
if err != nil {
t.Fatal(err)
}
totalCallCount := GetTotalCallCount()
if totalCallCount != 3 {
t.Fatalf("did not track the total count of calls correctly. expected it to be 3, but it was %v", totalCallCount)
}
info := GetCallCountInfo()
expectedInfo := map[string]int{
"GET " + url: 2,
// Regexp match generates 2 entries:
"POST " + url2: 1, // the matched call
"POST =~gitlab": 1, // the regexp responder
}
if !reflect.DeepEqual(info, expectedInfo) {
t.Fatalf("did not correctly track the call count info. expected it to be \n %+v\n but it was \n %+v", expectedInfo, info)
}
Reset()
afterResetTotalCallCount := GetTotalCallCount()
if afterResetTotalCallCount != 0 {
t.Fatalf("did not reset the total count of calls correctly. expected it to be 0 after reset, but it was %v", afterResetTotalCallCount)
}
info = GetCallCountInfo()
if !reflect.DeepEqual(info, map[string]int{}) {
t.Fatalf("did not correctly reset the call count info. expected it to be \n {}\n but it was \n %+v", info)
}
}
func TestMockTransportCallCountZero(t *testing.T) {
Reset()
Activate()
defer Deactivate()
const (
url = "https://github.com/path?b=1&a=2"
url2 = "https://gitlab.com/"
)
RegisterResponder("GET", url, NewStringResponder(200, "body"))
RegisterResponder("POST", "=~gitlab", NewStringResponder(200, "body"))
_, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
buff := new(bytes.Buffer)
json.NewEncoder(buff).Encode("{}") // nolint: errcheck
_, err = http.Post(url2, "application/json", buff)
if err != nil {
t.Fatal(err)
}
_, err = http.Get(url)
if err != nil {
t.Fatal(err)
}
totalCallCount := GetTotalCallCount()
if totalCallCount != 3 {
t.Fatalf("did not track the total count of calls correctly. expected it to be 3, but it was %v", totalCallCount)
}
info := GetCallCountInfo()
expectedInfo := map[string]int{
"GET " + url: 2,
// Regexp match generates 2 entries:
"POST " + url2: 1, // the matched call
"POST =~gitlab": 1, // the regexp responder
}
if !reflect.DeepEqual(info, expectedInfo) {
t.Fatalf("did not correctly track the call count info. expected it to be \n %+v\n but it was \n %+v", expectedInfo, info)
}
ZeroCallCounters()
afterResetTotalCallCount := GetTotalCallCount()
if afterResetTotalCallCount != 0 {
t.Fatalf("did not reset the total count of calls correctly. expected it to be 0 after reset, but it was %v", afterResetTotalCallCount)
}
info = GetCallCountInfo()
expectedInfo = map[string]int{
"GET " + url: 0,
// Regexp match generates 2 entries:
"POST " + url2: 0, // the matched call
"POST =~gitlab": 0, // the regexp responder
}
if !reflect.DeepEqual(info, expectedInfo) {
t.Fatalf("did not correctly reset the call count info. expected it to be \n %+v\n but it was \n %+v", expectedInfo, info)
}
}
func TestRegisterResponderWithQuery(t *testing.T) {
// Just in case a panic occurs
defer DeactivateAndReset()
// create a custom http client w/ custom Roundtripper
client := &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 60 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
},
}
body := "hello world!"
testURLPath := "http://acme.test/api"
for _, test := range []struct {
URL string
Queries []interface{}
URLs []string
}{
{
Queries: []interface{}{
map[string]string{"a": "1", "b": "2"},
"a=1&b=2",
"b=2&a=1",
url.Values{"a": []string{"1"}, "b": []string{"2"}},
},
URLs: []string{
"http://acme.test/api?a=1&b=2",
"http://acme.test/api?b=2&a=1",
},
},
{
Queries: []interface{}{
url.Values{
"a": []string{"3", "2", "1"},
"b": []string{"4", "2"},
"c": []string{""}, // is the net/url way to record params without values
// Test:
// u, _ := url.Parse("/hello/world?query")
// fmt.Printf("%d<%s>\n", len(u.Query()["query"]), u.Query()["query"][0])
// // prints "1<>"
},
"a=1&b=2&a=3&c&b=4&a=2",
"b=2&a=1&c=&b=4&a=2&a=3",
nil,
},
URLs: []string{
testURLPath + "?a=1&b=2&a=3&c&b=4&a=2",
testURLPath + "?a=1&b=2&a=3&c=&b=4&a=2",
testURLPath + "?b=2&a=1&c=&b=4&a=2&a=3",
testURLPath + "?b=2&a=1&c&b=4&a=2&a=3",
},
},
} {
for _, query := range test.Queries {
ActivateNonDefault(client)
RegisterResponderWithQuery("GET", testURLPath, query, NewStringResponder(200, body))
for _, url := range test.URLs {
t.Logf("query=%v URL=%s", query, url)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
t.Fatal(err)
}
resp, err := client.Do(req)
if err != nil {
t.Fatal(err)
}
assertBody(t, resp, body)
}
DeactivateAndReset()
}
}
}
func TestRegisterResponderWithQueryPanic(t *testing.T) {
resp := NewStringResponder(200, "hello world!")
for _, test := range []struct {
Path string
Query interface{}
PanicPrefix string
}{
{
Path: "foobar",
Query: "%",
PanicPrefix: "RegisterResponderWithQuery bad query string: ",
},
{
Path: "foobar",
Query: 1234,
PanicPrefix: "RegisterResponderWithQuery bad query type int. Only url.Values, map[string]string and string are allowed",
},
{
Path: `=~regexp.*\z`,
Query: "",
PanicPrefix: `path begins with "=~", RegisterResponder should be used instead of RegisterResponderWithQuery`,
},
} {
panicked, panicStr := catchPanic(func() {
RegisterResponderWithQuery("GET", test.Path, test.Query, resp)
})
if !panicked {
t.Errorf("RegisterResponderWithQuery + query=%v did not panic", test.Query)
continue
}
if !strings.HasPrefix(panicStr, test.PanicPrefix) {
t.Fatalf(`RegisterResponderWithQuery + query=%v panic="%v" expected prefix="%v"`,
test.Query, panicStr, test.PanicPrefix)
}
}
}
func TestRegisterRegexpResponder(t *testing.T) {
Activate()
defer DeactivateAndReset()
rx := regexp.MustCompile("ex.mple")
RegisterRegexpResponder("GET", rx, NewStringResponder(200, "first"))
// Overwrite responder
RegisterRegexpResponder("GET", rx, NewStringResponder(200, "second"))
resp, err := http.Get(testURL)
if err != nil {
t.Fatalf("expected request %s to succeed", testURL)
}
assertBody(t, resp, "second")
}
func TestSubmatches(t *testing.T) {
req, err := http.NewRequest("GET", "/foo/bar", nil)
if err != nil {
t.Fatal(err)
}
req2 := internal.SetSubmatches(req, []string{"foo", "123", "-123", "12.3"})
t.Run("GetSubmatch", func(t *testing.T) {
_, err := GetSubmatch(req, 1)
if err != ErrSubmatchNotFound {
t.Errorf("Submatch should not be found in req: %v", err)
}
_, err = GetSubmatch(req2, 5)
if err != ErrSubmatchNotFound {
t.Errorf("Submatch #5 should not be found in req2: %v", err)
}
s, err := GetSubmatch(req2, 1)
if err != nil {
t.Errorf("GetSubmatch(req2, 1) failed: %v", err)
}
if s != "foo" {
t.Errorf("GetSubmatch(req2, 1) failed, got: %v, expected: foo", s)
}
s, err = GetSubmatch(req2, 4)
if err != nil {
t.Errorf("GetSubmatch(req2, 4) failed: %v", err)
}
if s != "12.3" {
t.Errorf("GetSubmatch(req2, 4) failed, got: %v, expected: 12.3", s)
}
s = MustGetSubmatch(req2, 4)
if s != "12.3" {
t.Errorf("GetSubmatch(req2, 4) failed, got: %v, expected: 12.3", s)
}
})
t.Run("GetSubmatchAsInt", func(t *testing.T) {
_, err := GetSubmatchAsInt(req, 1)
if err != ErrSubmatchNotFound {
t.Errorf("Submatch should not be found in req: %v", err)
}
_, err = GetSubmatchAsInt(req2, 4) // not an int
if err == nil || err == ErrSubmatchNotFound {
t.Errorf("Submatch should not be an int64: %v", err)
}
i, err := GetSubmatchAsInt(req2, 3)
if err != nil {
t.Errorf("GetSubmatchAsInt(req2, 3) failed: %v", err)
}
if i != -123 {
t.Errorf("GetSubmatchAsInt(req2, 3) failed, got: %d, expected: -123", i)
}
i = MustGetSubmatchAsInt(req2, 3)
if i != -123 {
t.Errorf("MustGetSubmatchAsInt(req2, 3) failed, got: %d, expected: -123", i)
}
})
t.Run("GetSubmatchAsUint", func(t *testing.T) {
_, err := GetSubmatchAsUint(req, 1)
if err != ErrSubmatchNotFound {
t.Errorf("Submatch should not be found in req: %v", err)
}
_, err = GetSubmatchAsUint(req2, 3) // not a uint
if err == nil || err == ErrSubmatchNotFound {
t.Errorf("Submatch should not be an uint64: %v", err)
}
u, err := GetSubmatchAsUint(req2, 2)
if err != nil {
t.Errorf("GetSubmatchAsUint(req2, 2) failed: %v", err)
}
if u != 123 {
t.Errorf("GetSubmatchAsUint(req2, 2) failed, got: %d, expected: 123", u)
}
u = MustGetSubmatchAsUint(req2, 2)
if u != 123 {
t.Errorf("MustGetSubmatchAsUint(req2, 2) failed, got: %d, expected: 123", u)
}
})
t.Run("GetSubmatchAsFloat", func(t *testing.T) {
_, err := GetSubmatchAsFloat(req, 1)
if err != ErrSubmatchNotFound {
t.Errorf("Submatch should not be found in req: %v", err)
}
_, err = GetSubmatchAsFloat(req2, 1) // not a float
if err == nil || err == ErrSubmatchNotFound {
t.Errorf("Submatch should not be an float64: %v", err)
}
f, err := GetSubmatchAsFloat(req2, 4)
if err != nil {
t.Errorf("GetSubmatchAsFloat(req2, 4) failed: %v", err)
}
if f != 12.3 {
t.Errorf("GetSubmatchAsFloat(req2, 4) failed, got: %f, expected: 12.3", f)
}
f = MustGetSubmatchAsFloat(req2, 4)
if f != 12.3 {
t.Errorf("MustGetSubmatchAsFloat(req2, 4) failed, got: %f, expected: 12.3", f)
}
})
t.Run("GetSubmatch* panics", func(t *testing.T) {
for _, test := range []struct {
Name string
Fn func()
PanicPrefix string
}{
{
Name: "GetSubmatch & n < 1",
Fn: func() { GetSubmatch(req, 0) }, // nolint: errcheck
PanicPrefix: "getting submatches starts at 1, not 0",
},
{
Name: "MustGetSubmatch",
Fn: func() { MustGetSubmatch(req, 1) },
PanicPrefix: "GetSubmatch failed: " + ErrSubmatchNotFound.Error(),
},
{
Name: "MustGetSubmatchAsInt",
Fn: func() { MustGetSubmatchAsInt(req2, 4) }, // not an int
PanicPrefix: "GetSubmatchAsInt failed: ",
},
{
Name: "MustGetSubmatchAsUint",
Fn: func() { MustGetSubmatchAsUint(req2, 3) }, // not a uint
PanicPrefix: "GetSubmatchAsUint failed: ",
},
{
Name: "GetSubmatchAsFloat",
Fn: func() { MustGetSubmatchAsFloat(req2, 1) }, // not a float
PanicPrefix: "GetSubmatchAsFloat failed: ",
},
} {
var (
didntPanic bool
panicVal interface{}
)
func() {
defer func() { panicVal = recover() }()
test.Fn()
didntPanic = true
}()
if didntPanic {
t.Errorf("%s did not panic", test.Name)
}
panicStr, ok := panicVal.(string)
if !ok || !strings.HasPrefix(panicStr, test.PanicPrefix) {
t.Errorf(`%s panic="%v" expected prefix="%v"`, test.Name, panicVal, test.PanicPrefix)
}
}
})
t.Run("Full test", func(t *testing.T) {
Activate()
defer DeactivateAndReset()
var (
id uint64
delta float64
deltaStr string
inc int64
)
RegisterResponder("GET", `=~^/id/(\d+)\?delta=(\d+(?:\.\d*)?)&inc=(-?\d+)\z`,
func(req *http.Request) (*http.Response, error) {
id = MustGetSubmatchAsUint(req, 1)
delta = MustGetSubmatchAsFloat(req, 2)
deltaStr = MustGetSubmatch(req, 2)
inc = MustGetSubmatchAsInt(req, 3)
return NewStringResponse(http.StatusOK, "OK"), nil
})
resp, err := http.Get("http://example.tld/id/123?delta=1.2&inc=-5")
if err != nil {
t.Fatal(err)
}
assertBody(t, resp, "OK")
// Check submatches
if id != 123 {
t.Errorf("seems MustGetSubmatchAsUint failed, got: %d, expected: 123", id)
}
if delta != 1.2 {
t.Errorf("seems MustGetSubmatchAsFloat failed, got: %f, expected: 1.2", delta)
}
if deltaStr != "1.2" {
t.Errorf("seems MustGetSubmatch failed, got: %v, expected: 1.2", deltaStr)
}
if inc != -5 {
t.Errorf("seems MustGetSubmatchAsInt failed, got: %d, expected: 123", inc)
}
})
}
func TestCheckStackTracer(t *testing.T) {
// Full test using Trace() Responder
Activate()
defer Deactivate()
const url = "https://foo.bar/"
var mesg string
RegisterResponder("GET", url,
NewStringResponder(200, "{}").
Trace(func(args ...interface{}) { mesg = args[0].(string) }))
resp, err := http.Get(url)
if err != nil {
t.Fatal(err)
}
if !assertBody(t, resp, "{}") {
t.FailNow()
}
// Check that first frame is the net/http.Get() call
if !strings.HasPrefix(mesg, "GET https://foo.bar/\nCalled from net/http.Get()\n at ") ||
strings.HasSuffix(mesg, "\n") {
t.Errorf("Bad mesg: <%v>", mesg)
}
}
|
package main
import (
"math/big"
)
func main() {
z := new(big.Int)
x := new(big.Int)
x = x.SetUint64(2)
y := new(big.Int)
y = y.SetUint64(4)
m := new(big.Int)
m = m.SetUint64(0)
z = z.Exp(x, y, m)
}
|
// Copyright (C) 2019-2020 Zilliz. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the License
// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
// or implied. See the License for the specific language governing permissions and limitations under the License.
package storage
import (
"encoding/json"
)
type Int64Stats struct {
Max int64 `json:"max"`
Min int64 `json:"min"`
}
type StatsWriter struct {
buffer []byte
}
func (sw *StatsWriter) GetBuffer() []byte {
return sw.buffer
}
func (sw *StatsWriter) StatsInt64(msgs []int64) error {
if len(msgs) < 1 {
// return error: msgs must has one element at least
return nil
}
stats := &Int64Stats{
Max: msgs[len(msgs)-1],
Min: msgs[0],
}
b, err := json.Marshal(stats)
if err != nil {
return err
}
sw.buffer = b
return nil
}
type StatsReader struct {
buffer []byte
}
func (sr *StatsReader) SetBuffer(buffer []byte) {
sr.buffer = buffer
}
func (sr *StatsReader) GetInt64Stats() Int64Stats {
stats := Int64Stats{}
json.Unmarshal(sr.buffer, &stats)
return stats
}
|
package main
import (
"log"
)
// Object is a func
type Object func(int) int
// LogDecorator is a decorator
func LogDecorator(fn Object) Object {
return func(a int) int {
log.Println("Start")
result := fn(a)
log.Println("End")
return result
}
}
// Double will calculate double of an integer
func Double(n int) int {
return n * 2
}
func main() {
f := LogDecorator(Double)
log.Println(f(5))
}
|
// Copyright (c) 2014-2018 Salsita Software
// Copyright (c) 2015 Scott Devoid
// Use of this source code is governed by the MIT License.
// The license can be found in the LICENSE file.
package pivotal
import (
"fmt"
"net/http"
"time"
)
// ProjectMembership is the primary data object for the MembershipService.
type ProjectMembership struct {
Person Person
ID int `json:"id,omitempty"`
Kind string `json:"kind,omitempty"`
AccountID int `json:"account_id,omitempty"`
Owner bool `json:"owner,omitempty"`
Admin bool `json:"admin,omitempty"`
ProjectCreator bool `json:"project_creator,omitempty"`
Timekeeper bool `json:"timekeeper,omitempty"`
TimeEnterer bool `json:"time_enterer,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
}
// MembershipService wraps the client context for interacting with project members.
type MembershipService struct {
client *Client
}
func newMembershipService(client *Client) *MembershipService {
return &MembershipService{client}
}
// List all of the memberships in an account.
func (service *MembershipService) List(projectID int) ([]*ProjectMembership, *http.Response, error) {
u := fmt.Sprintf("projects/%v/memberships", projectID)
req, err := service.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
var projectMemberships []*ProjectMembership
resp, err := service.client.Do(req, &projectMemberships)
if err != nil {
return nil, resp, err
}
return projectMemberships, resp, err
}
|
package images
import (
"fmt"
"github.com/768bit/promethium/lib/cloudconfig"
"github.com/768bit/vutils"
"io/ioutil"
"os"
"path/filepath"
)
//capstan was managin images before.. we will continue to use capstan for managing these images but we create isntances of these images as required...
//these utils allow for the management of these image instances
type PImage struct {
}
func (pi *PImage) loadFromDisk() {
}
func MakeCloudInitImageBuilt(hostname string, networkConfig *cloudconfig.MetaDataNetworkConfig, userData *cloudconfig.UserData) ([]byte, error) {
tmp, _ := ioutil.TempDir("", "promethium")
// Once this function is finished, remove temporary file.
defer os.RemoveAll(tmp)
fmt.Printf("Creating Image In: %s\n", tmp)
md := cloudconfig.NewMetaDataWithNetworking(hostname, networkConfig)
err := md.WriteMetaData(filepath.Join(tmp, "meta_data"))
if err != nil {
fmt.Println(err)
return nil, err
}
err = userData.WriteUserData(filepath.Join(tmp, "user_data"))
if err != nil {
fmt.Println(err)
return nil, err
}
ddCmd := vutils.Exec.CreateAsyncCommand("cloud-localds", false, filepath.Join(tmp, "cidata.img"), filepath.Join(tmp, "user_data"), filepath.Join(tmp, "meta_data"))
err = ddCmd.BindToStdoutAndStdErr().StartAndWait()
if err != nil {
fmt.Println(err)
return nil, err
}
img, err := ioutil.ReadFile(filepath.Join(tmp, "cidata.img"))
if err != nil {
fmt.Println(err)
return nil, err
}
return img, nil
}
func MakeCloudInitImage(hostname string, networkConfig *cloudconfig.MetaDataNetworkConfig, userData *cloudconfig.UserData) ([]byte, error) {
tmp, _ := ioutil.TempDir("", "promethium")
// Once this function is finished, remove temporary file.
defer os.RemoveAll(tmp)
fmt.Printf("Creating Image In: %s\n", tmp)
mntPoint := filepath.Join(tmp, "mnt")
imagePath := filepath.Join(tmp, "img")
err := vutils.Files.CreateDirIfNotExist(mntPoint)
if err != nil {
fmt.Println(err)
return nil, err
}
//dd if=/dev/zero of=/rootfs.ext4 bs=1M count=50
ddCmd := vutils.Exec.CreateAsyncCommand("dd", false, "if=/dev/zero", fmt.Sprintf("of=%s", imagePath), "bs=1M", "count=16")
err = ddCmd.BindToStdoutAndStdErr().StartAndWait()
if err != nil {
fmt.Println(err)
return nil, err
}
fmt.Printf("Image Created\n")
//make the exfat file system...
mkfsCmd := vutils.Exec.CreateAsyncCommand("mkfs.exfat", false, "-n", "config-2", imagePath)
err = mkfsCmd.BindToStdoutAndStdErr().StartAndWait()
if err != nil {
fmt.Println(err)
return nil, err
}
fmt.Printf("File System Initialised\n")
//mount the image...
mountCmd := vutils.Exec.CreateAsyncCommand("mount", false, "-o", "loop", "-t", "exfat", imagePath, mntPoint).Sudo()
err = mountCmd.BindToStdoutAndStdErr().StartAndWait()
if err != nil {
fmt.Println(err)
return nil, err
}
fmt.Printf("Image Mounted: %s\n", mntPoint)
//create the structure
ciPath := filepath.Join(mntPoint, "openstack", "latest")
err = vutils.Files.CreateDirIfNotExist(ciPath)
if err != nil {
fmt.Println(err)
return nil, err
}
md := cloudconfig.NewMetaDataWithNetworking(hostname, networkConfig)
err = md.WriteMetaDataJSON(filepath.Join(ciPath, "meta_data.json"))
if err != nil {
fmt.Println(err)
return nil, err
}
err = userData.WriteUserData(filepath.Join(ciPath, "user_data"))
if err != nil {
fmt.Println(err)
return nil, err
}
fmt.Printf("Assets Copied to: %s\n", ciPath)
//unmount
umountCmd := vutils.Exec.CreateAsyncCommand("umount", false, mntPoint).Sudo()
err = umountCmd.BindToStdoutAndStdErr().StartAndWait()
if err != nil {
fmt.Println(err)
return nil, err
}
fmt.Printf("Image Unmounted: %s\n", mntPoint)
//read in the image so that the build can be deleted..
img, err := ioutil.ReadFile(imagePath)
if err != nil {
fmt.Println(err)
return nil, err
}
return img, nil
}
|
package cmd
import (
"github.com/spf13/cobra"
"fmt"
"net/http"
"github.com/HotelsDotCom/flyte/flytepath"
"io"
"os"
httputl "net/http/httputil"
"github.com/spf13/viper"
"bytes"
"mime/multipart"
"net/textproto"
"strings"
"path/filepath"
)
type dsItem struct {
name string
description string
contentType string
filename string
}
var argsUploadDs dsItem
func newCmdUploadDs() *cobra.Command {
cmd := &cobra.Command{
Use: "datastore -f FILENAME",
Aliases: []string{"ds"},
Short: "Upload a datastore item from a file",
Long: longUploadDs,
RunE: runUploadDs,
}
cmd.Flags().StringVarP(&argsUploadDs.filename, flagFilename, "f", "", "filename of the file to use to upload resource")
cmd.MarkFlagRequired(flagFilename)
cmd.Flags().StringVarP(&argsUploadDs.name, flagName, "n", "", "item's name (default derived from the file name)")
cmd.Flags().StringVarP(&argsUploadDs.description, flagDescription, "d", "", "item's description")
cmd.Flags().StringVarP(&argsUploadDs.contentType, flagContentType, "c", "", "item's content type (default derived from the file extension)")
return cmd
}
const longUploadDs = `
Upload a datastore item from a file or from stdin to a flyte API.
Flyte API could be specified by setting $FLYTE_API or overridden by the --url option
Examples:
# Upload a datastore item from env.json file to flyte API specified by $FLYTE_API
flyte upload ds -f ./env.json
# Upload a datastore item from my-script.sh file to flyte API at http://127.0.0.1:8080
flyte upload ds -f ./my-script.sh --url http://127.0.0.1:8080
`
func runUploadDs(c *cobra.Command, args []string) error {
if argsUploadDs.name == "" {
base := filepath.Base(argsUploadDs.filename)
ext := filepath.Ext(argsUploadDs.filename)
argsUploadDs.name = strings.TrimSuffix(base, ext)
}
if argsUploadDs.contentType == "" {
argsUploadDs.contentType = getContentType(argsUploadDs.filename)
}
req, err := newDsRequest(viper.GetString(flagURL), argsUploadDs)
if err != nil {
return err
}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
dump, err := httputl.DumpResponse(resp, true)
if err != nil {
return err
}
if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusNoContent {
return fmt.Errorf("cannot upload datastore\n%s", dump)
}
_, err = fmt.Fprintf(c.OutOrStdout(), "%sLocation: %s\n", dump, resp.Request.URL.String())
return err
}
func newDsRequest(apiURL string, item dsItem) (*http.Request, error) {
file, err := os.Open(item.filename)
if err != nil {
return nil, err
}
defer file.Close()
body := new(bytes.Buffer)
w := multipart.NewWriter(body)
h := newFormFileHeader("value", filepath.Base(item.filename), item.contentType)
part, err := w.CreatePart(h)
if err != nil {
return nil, err
}
if _, err = io.Copy(part, file); err != nil {
return nil, err
}
if item.description != "" {
w.WriteField("description", item.description)
}
if err = w.Close(); err != nil {
return nil, err
}
req, err := http.NewRequest(http.MethodPut, dsItemURL(apiURL, item.name), body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", w.FormDataContentType())
return req, nil
}
func newFormFileHeader(fieldname, filename, contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
h.Set("Content-Disposition", fmt.Sprintf(`form-data; name="%s"; filename="%s"`, fieldname, filename))
h.Set("Content-Type", contentType)
return h
}
func dsItemURL(apiURL, name string) string {
return fmt.Sprintf("%s%s/%s", apiURL, flytepath.DatastorePath, name)
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"context"
"reflect"
"testing"
"github.com/google/gapid/core/assert"
"github.com/google/gapid/core/data/dictionary"
"github.com/google/gapid/core/data/generic"
"github.com/google/gapid/core/data/protoconv"
"github.com/google/gapid/core/log"
"github.com/google/gapid/core/memory/arena"
"github.com/google/gapid/gapis/api"
)
func TestReferences(t *testing.T) {
ctx := log.Testing(t)
assert := assert.To(t)
a := arena.New()
defer a.Dispose()
ctx = arena.Put(ctx, a)
complex := BuildComplex(a)
// complex -> protoA -> decoded -> protoB
protoA, err := protoconv.ToProto(ctx, complex)
if !assert.For("ToProtoA").ThatError(err).Succeeded() {
return
}
decodedObj, err := protoconv.ToObject(ctx, protoA)
if !assert.For("ToObject").ThatError(err).Succeeded() {
return
}
decoded := decodedObj.(Complex)
assert.For("Object ref").That(decoded.RefObjectAlias()).Equals(decoded.RefObject())
assert.For("Map ref").That(decoded.EntriesAlias()).Equals(decoded.Entries())
assert.For("NestedRefs[6]").That(decoded.NestedRefs().Get(6).Ref()).Equals(decoded.RefObject())
assert.For("NestedRefs[7]").That(decoded.NestedRefs().Get(7).Ref()).Equals(decoded.RefObject())
protoB, err := protoconv.ToProto(ctx, decoded)
if !assert.For("ToProtoB").ThatError(err).Succeeded() {
return
}
assert.For("Protos").TestDeepEqual(protoA, protoB)
// Test that all decoded references see changes to their referenced objects.
decoded.RefObject().SetValue(55) // was 42
decoded.Entries().Add(4, NewTestObject(a, 33)) // was 50
assert.For("Object ref").That(decoded.RefObjectAlias()).Equals(decoded.RefObject())
assert.For("Map ref").That(decoded.EntriesAlias()).Equals(decoded.Entries())
assert.For("RefEntries").That(decoded.RefEntries().Get(0)).Equals(decoded.RefObject())
assert.For("NestedRefs[6]").That(decoded.NestedRefs().Get(6).Ref()).Equals(decoded.RefObject())
assert.For("NestedRefs[7]").That(decoded.NestedRefs().Get(7).Ref()).Equals(decoded.RefObject())
}
func TestEquals(t *testing.T) {
ctx := log.Testing(t)
a := arena.New()
defer a.Dispose()
ctx = arena.Put(ctx, a)
complex := BuildComplex(a)
check(ctx, complex, complex, "equals")
}
func TestCloneReferences(t *testing.T) {
ctx := log.Testing(t)
assert := assert.To(t)
a := arena.New()
defer a.Dispose()
ctx = arena.Put(ctx, a)
complex := BuildComplex(a)
cloned := complex.Clone(a, api.CloneContext{})
check(ctx, cloned.Data(), complex.Data(), "Data")
check(ctx, cloned.Object(), complex.Object(), "Object")
check(ctx, cloned.ObjectArray(), complex.ObjectArray(), "ObjectArray")
check(ctx, cloned.RefObject().Value(), complex.RefObject().Value(), "RefObject")
check(ctx, cloned.RefObject().Value(), complex.RefObjectAlias().Value(), "RefObjectAlias")
check(ctx, cloned.NilRefObject().IsNil(), true, "NilRefObject")
check(ctx, cloned.Entries(), complex.Entries(), "Entries")
check(ctx, cloned.EntriesAlias(), complex.EntriesAlias(), "EntriesAlias")
check(ctx, cloned.NilMap(), complex.NilMap(), "NilMap")
check(ctx, cloned.Strings(), complex.Strings(), "Strings")
check(ctx, cloned.BoolMap(), complex.BoolMap(), "BoolMap")
// RefEntries
assert.For("RefEntries.Len").That(cloned.RefEntries().Len()).Equals(complex.RefEntries().Len())
for _, k := range complex.RefEntries().Keys() {
check(ctx, cloned.RefEntries().Contains(k), true, "RefEntries[%d]", k)
e, a := complex.RefEntries().Get(k), cloned.RefEntries().Get(k)
if e.IsNil() {
check(ctx, a.IsNil(), true, "RefEntries[%d]", k)
} else {
check(ctx, a.Value(), e.Value(), "RefEntries[%d]", k)
}
}
// LinkedList
for i, e, a := 0, complex.LinkedList(), cloned.LinkedList(); !e.IsNil(); i++ {
check(ctx, a.IsNil(), false, "LinkedList[%d]", i)
check(ctx, a.Value(), e.Value(), "LinkedList[%d]", i)
check(ctx, a.Next().IsNil(), e.Next().IsNil(), "LinkedList[%d]", i)
e, a = e.Next(), a.Next()
}
// Cycle
check(ctx, cloned.Cycle().IsNil(), false, "Cycle[0]")
check(ctx, cloned.Cycle().Value(), uint32(1), "Cycle[0]")
check(ctx, cloned.Cycle().Next().IsNil(), false, "Cycle[1]")
check(ctx, cloned.Cycle().Next().Value(), uint32(2), "Cycle[1]")
check(ctx, cloned.Cycle().Next().Next(), cloned.Cycle(), "Cycle")
// NestedRefs
check(ctx, cloned.NestedRefs().Len(), complex.NestedRefs().Len(), "NestedRefs.Len")
for _, k := range complex.NestedRefs().Keys() {
check(ctx, cloned.NestedRefs().Contains(k), true, "NestedRefs[%d]", k)
e, a := complex.NestedRefs().Get(k), cloned.NestedRefs().Get(k)
check(ctx, a.IsNil(), e.IsNil(), "NestedRefs[%d]", k)
if !e.IsNil() {
check(ctx, a.Ref().IsNil(), e.Ref().IsNil(), "NestedRefs[%d].ref", k)
if !e.Ref().IsNil() {
check(ctx, a.Ref().Value(), e.Ref().Value(), "NestedRefs[%d].ref", k)
}
}
}
// Test that all cloned references see changes to their referenced objects.
cloned.RefObject().SetValue(55) // was 42
cloned.Entries().Add(4, NewTestObject(a, 33)) // was 50
check(ctx, cloned.RefObjectAlias(), cloned.RefObject(), "Object ref")
check(ctx, cloned.EntriesAlias(), cloned.Entries(), "Map ref")
check(ctx, cloned.RefEntries().Get(0), cloned.RefObject(), "RefEntries")
check(ctx, cloned.NestedRefs().Get(6).Ref(), cloned.RefObject(), "NestedRefs[6]")
check(ctx, cloned.NestedRefs().Get(7).Ref(), cloned.RefObject(), "NestedRefs[7]")
}
func check(ctx context.Context, got, expected interface{}, name string, fmt ...interface{}) bool {
g, e := got, expected
if g, e := dictionary.From(g), dictionary.From(e); g != nil && e != nil {
// Comparing dictionaries
if !assert.For(ctx, "%v.Len", name).That(g.Len()).Equals(e.Len()) {
return false
}
for _, k := range e.Keys() {
e := e.Get(k)
g, ok := g.Lookup(k)
if !assert.For(ctx, "%v.Contains(%v)", name, k).That(ok).Equals(true) {
return false
}
if !check(ctx, g, e, "%v got[%v] == expected[%v]", name, k, k) {
return false
}
}
for _, k := range g.Keys() {
_, ok := e.Lookup(k)
if !assert.For(ctx, "%v.Missing(%v)", name, k).That(ok).Equals(true) {
return false
}
}
return true
}
type ieq interface {
Equals(generic.TO) bool
}
ieqTy := reflect.TypeOf((*ieq)(nil)).Elem()
gTy, eTy := reflect.TypeOf(g), reflect.TypeOf(e)
if m := generic.Implements(reflect.TypeOf(g), ieqTy); m.Ok() && gTy == eTy {
// Comparing using Equals() method
f := reflect.ValueOf(g).MethodByName("Equals")
ok := f.Call([]reflect.Value{reflect.ValueOf(e)})[0].Interface().(bool)
return assert.For(ctx, name, fmt...).Compare(g, "==", e).Test(ok)
}
// Comparing using regular assert comparison
return assert.For(ctx, name, fmt...).That(g).Equals(e)
}
|
/*
协议:
用户可以执行命令, 以 %<command> args... 的形式
如:
%ls // 查看server中的文件
%exit // 推出客户端
%set-name 姬小野 // 设置用户名
%download 十万个为什么.pdf // 下载文件
%upload test.go // 上传文件
可以直接输入消息, 也就是除合法命令格式以外的消息, 都作为发送到chatroom的消息来发送
由于tcp传输字节流, 因此在发送每个消息之前, 用一个 4字节 的数据声明要发送的消息的长度, 避免粘包.
*/
package main
import (
"bufio"
"fmt"
socketUtils "github.com/JameyWoo/goroom/socketUtils"
"io/ioutil"
"log"
"net"
"os"
"strings"
)
func main() {
// 需要指定server的 ip 端口号
if len(os.Args) != 3 {
fmt.Printf("Usage : %s <ip> <port>\n", os.Args[0])
os.Exit(1)
}
conn, err := net.Dial("tcp", os.Args[1]+":"+os.Args[2])
defer conn.Close()
if err != nil {
panic(err)
}
done := make(chan struct{})
go HandleOutput(conn, done) // 处理输出, 包括文件输出(下载)
// 这里不能使用 io.Copy 函数, 因为需要解析命令
input := bufio.NewScanner(os.Stdin)
for input.Scan() {
inputStr := input.Text()
// 将输入解析, 如果是命令的格式, 那么以命令的方式传递
subInput := strings.Fields(inputStr)
if len(subInput) < 1 {
continue
}
switch subInput[0] {
// 在这里只需要 upload, download 是需要上下文操作的, 其他的命令只需要传递过去让server处理
case "%upload": // 上传文件
HandleUpload(conn, inputStr, subInput)
case "%download": // 下载文件
// 尝试
socketUtils.SendBytesToConn(conn, []byte(inputStr))
case "%exit": // 退出聊天室
return
case "%set-name": // 设置用户的名字
fallthrough
case "%ls": // 列出聊天室中所有的文件
fallthrough
default:
// 发送命令
socketUtils.SendBytesToConn(conn, []byte(inputStr))
}
}
<-done
}
// 处理输出. 包括标准输出和文件输出
func HandleOutput(conn net.Conn, done chan struct{}) {
for {
inputByte := socketUtils.ReceiveBytesFromConn(conn)
inputStr := string(inputByte)
if len(inputStr) < 9 {
fmt.Println(inputStr)
} else {
if inputStr[:9] == "%download" {
HandleDownload(inputStr, conn)
} else {
fmt.Println(inputStr)
}
}
fmt.Print("$ ")
}
// 服务器断开连接的时候, 才会往下执行
done <- struct{}{}
}
func HandleDownload(inputStr string, conn net.Conn) {
subInput := strings.Fields(inputStr)
if len(subInput) >= 2 {
for _, filename := range subInput[1:] {
fmt.Println("downloading " + filename + " ...")
// 首先判断文件是否存在, 如果存在那么无法写入
newFilename := "./disk/" + filename
if !socketUtils.Exists("./disk") {
// 如果 disk文件夹不存在, 那么创建
err := os.Mkdir("disk", os.ModePerm)
if err != nil {
log.Fatal(err)
}
}
// 打开文件, 计算字节
fileByte := socketUtils.ReceiveBytesFromConn(conn)
if len(fileByte) == 14 && string(fileByte) == "file not exist" {
fmt.Println("file \"" + filename + "\" not exist")
} else {
if socketUtils.Exists(newFilename) {
// 如果存在, 那么取消上传该文件并通报
fmt.Println("客户端上存在同名文件 \"" + filename + " \", 继续将覆盖该文件!")
err := os.Remove(newFilename)
if err != nil {
log.Fatal(err)
}
}
newFile, err := os.Create(newFilename)
if err != nil {
log.Fatal(err)
}
_, err = newFile.Write(fileByte)
if err != nil {
log.Fatal(err)
} else {
fmt.Println("download " + filename + " successed!")
}
newFile.Close()
}
}
}
}
func HandleUpload(conn net.Conn, inputStr string, subInput []string) {
// 先发送命令
socketUtils.SendBytesToConn(conn, []byte(inputStr))
// 然后上传文件
// 可以同时上传多个文件
if len(subInput) >= 2 {
// TODO: 有的文件是不存在的, 需要加一个检测, 否则会终止程序
for _, filename := range subInput[1:] {
// 打开文件, 计算字节
fileByte, err := ioutil.ReadFile(filename)
if err != nil {
log.Fatal(err)
}
fileByteLen := len(fileByte)
preSend := socketUtils.BytesCombine(socketUtils.IntToBytes(fileByteLen), fileByte)
_, err = conn.Write(preSend)
if err != nil {
log.Fatal(err)
}
}
} else {
fmt.Println("文件上传失败, 请给出文件名, 可同时上传多个文件")
}
}
|
package controllers
import (
"encoding/json"
"errors"
"io/ioutil"
"net/http"
"strconv"
"github.com/gorilla/mux"
"github.com/jameslahm/bloggy_backend/api/responses"
"github.com/jameslahm/bloggy_backend/models"
)
func (server *Server) CreatePost(w http.ResponseWriter, r *http.Request) {
authId := r.Context().Value("id")
if authId == nil {
responses.ERROR(w, http.StatusInternalServerError, errors.New("Internal Error"))
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
responses.ERROR(w, http.StatusUnprocessableEntity, err)
return
}
var post = models.Post{}
err = json.Unmarshal(body, &post)
post.AuthorID = authId.(uint32)
if err != nil {
responses.ERROR(w, http.StatusUnprocessableEntity, err)
return
}
err = models.CreatePost(server.DB, &post)
if err != nil {
responses.ERROR(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, post)
}
func (server *Server) GetPosts(w http.ResponseWriter, r *http.Request) {
posts, err := models.FindAllPosts(server.DB)
if err != nil {
responses.ERROR(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, posts)
}
func (server *Server) GetPost(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, err := strconv.ParseInt(vars["id"], 10, 32)
if err != nil {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
post, err := models.FindPostById(server.DB, int(id))
if err != nil {
responses.ERROR(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, post)
}
func (server *Server) UpdataPost(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, err := strconv.ParseInt(vars["id"], 10, 32)
if err != nil {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
authId := r.Context().Value("id")
post, err := models.FindPostById(server.DB, int(id))
if err != nil {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
if post.AuthorID != authId {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
body, err := ioutil.ReadAll(r.Body)
if err != nil {
responses.ERROR(w, http.StatusUnprocessableEntity, err)
return
}
var obj map[string]interface{}
err = json.Unmarshal(body, &obj)
if err != nil {
responses.ERROR(w, http.StatusUnprocessableEntity, err)
return
}
err = models.UpdatePost(server.DB, int(id), obj)
if err != nil {
responses.ERROR(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, "ok")
}
func (server *Server) DeletePost(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
id, err := strconv.ParseInt(vars["id"], 10, 32)
if err != nil {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
authId := r.Context().Value("id")
post, err := models.FindPostById(server.DB, int(id))
if err != nil {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
if post.AuthorID != authId {
responses.ERROR(w, http.StatusBadRequest, err)
return
}
err = models.DeletePost(server.DB, int(id))
if err != nil {
responses.ERROR(w, http.StatusInternalServerError, err)
return
}
responses.JSON(w, http.StatusOK, "ok")
}
|
package resources
import (
"fmt"
"io/ioutil"
"log"
)
type Resource struct {
Name string
File []byte
}
func Fetch(filename string, ext string) (*Resource, error) {
file := fmt.Sprintf("app/resources/%s/%s.%s", ext, filename, ext)
resource, err := ioutil.ReadFile(file)
if err != nil {
log.Printf("Couldn't gather resources: %s", filename)
return nil, err
}
return &Resource{Name: filename, File: resource}, nil
}
|
package fritz
import (
"crypto/tls"
"crypto/x509"
"net/http"
"net/url"
"github.com/bpicode/fritzctl/config"
"github.com/bpicode/fritzctl/logger"
)
// HomeAuto is a client for the Home Automation HTTP Interface,
// see https://avm.de/fileadmin/user_upload/Global/Service/Schnittstellen/AHA-HTTP-Interface.pdf.
type HomeAuto interface {
Login() error
List() (*Devicelist, error)
On(names ...string) error
Off(names ...string) error
Toggle(names ...string) error
Temp(value float64, names ...string) error
}
type homeAuto struct {
client *Client
aha HomeAutomationAPI
cAha homeAutoConfigurator
}
// Login tries to authenticate against the FRITZ!Box. If not successful, an error is returned. This method should be
// called before any of the other methods unless authentication is turned off at the FRITZ!Box itself.
func (h *homeAuto) Login() error {
return h.client.Login()
}
// List fetches the devices known at the FRITZ!Box. See Devicelist for details. If the devices could not be obtained,
// an error is returned.
func (h *homeAuto) List() (*Devicelist, error) {
return h.aha.ListDevices()
}
// On activates the given devices. Devices are identified by their name. If any of the operations does not succeed,
// an error is returned.
func (h *homeAuto) On(names ...string) error {
return h.cAha.on(names...)
}
// Off deactivates the given devices. Devices are identified by their name. Inverse of On.
func (h *homeAuto) Off(names ...string) error {
return h.cAha.off(names...)
}
// Toggle switches the state of the given devices from ON to OFF and vice versa. Devices are identified by their name.
func (h *homeAuto) Toggle(names ...string) error {
return h.cAha.toggle(names...)
}
// Temp applies the temperature setting to the given devices. Devices are identified by their name.
func (h *homeAuto) Temp(value float64, names ...string) error {
return h.cAha.temp(value, names...)
}
// Option applies fine-grained configuration to the HomeAuto client.
type Option func(h *homeAuto)
// NewHomeAuto a HomeAuto that communicates with the FRITZ!Box by means of the Home Automation HTTP Interface.
func NewHomeAuto(options ...Option) HomeAuto {
client := defaultClient()
aha := HomeAutomation(client)
cAha := concurrentConfigurator(aha)
homeAuto := homeAuto{
client: client,
aha: aha,
cAha: cAha,
}
for _, option := range options {
option(&homeAuto)
}
return &homeAuto
}
// URL sets the target host of the FRITZ!Box. Note that for usual setups, the url https://fritz.box:443 works.
func URL(u *url.URL) Option {
return func(h *homeAuto) {
h.client.Config.Net.Host = u.Hostname()
h.client.Config.Net.Port = u.Port()
h.client.Config.Net.Protocol = u.Scheme
}
}
// Credentials configures the username and password for authentication. If one wants to use the default admin account,
// the username should be an empty string.
func Credentials(username, password string) Option {
return func(h *homeAuto) {
h.client.Config.Login.Username = username
h.client.Config.Login.Password = password
}
}
// SkipTLSVerify omits TLS verification of the FRITZ!Box server. It is not recommended to use it, rather go for the
// an explicit option with Certificate.
func SkipTLSVerify() Option {
return func(h *homeAuto) {
skipTLS := &tls.Config{InsecureSkipVerify: true}
h.client.HTTPClient.Transport = &http.Transport{TLSClientConfig: skipTLS}
}
}
// Certificate actives TLS verification of the FRITZ!Box server, where the certificate is explicitly specified as byte
// array, encoded in PEM format.
func Certificate(bs []byte) Option {
return func(h *homeAuto) {
pool := x509.NewCertPool()
if ok := pool.AppendCertsFromPEM(bs); !ok {
logger.Warn("Using host certificates as fallback. Supplied certificate could not be parsed.")
}
cfg := &tls.Config{RootCAs: pool}
transport := &http.Transport{TLSClientConfig: cfg}
h.client.HTTPClient.Transport = transport
}
}
// AuthEndpoint configures the the endpoint for authentication. The default is "/login_sid.lua".
func AuthEndpoint(s string) Option {
return func(h *homeAuto) {
h.client.Config.Login.LoginURL = s
}
}
func defaultClient() *Client {
return &Client{
Config: defaultConfig(),
HTTPClient: defaultHTTP(),
SessionInfo: defaultSessionInfo(),
}
}
func defaultSessionInfo() *SessionInfo {
return &SessionInfo{}
}
func defaultHTTP() *http.Client {
return &http.Client{}
}
func defaultConfig() *config.Config {
return &config.Config{
Net: defaultTarget(),
Pki: defaultPki(),
Login: defaultLogin(),
}
}
func defaultLogin() *config.Login {
return &config.Login{
LoginURL: "/login_sid.lua",
}
}
func defaultPki() *config.Pki {
return &config.Pki{}
}
func defaultTarget() *config.Net {
return &config.Net{
Protocol: "https",
Host: "fritz.box",
Port: "443",
}
}
|
package chapstream
import (
"net/http"
"path"
"strconv"
"strings"
"text/template"
)
func (cs *chapStream) homeHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
cs.Log.Error("[", r.Method, "] Method not allowed: ", r.URL)
http.Error(w, "Method not allowed", 405)
return
}
p := path.Join(cs.Config.Common.StaticFilesDir, "templates/landing.html")
// TODO: Check file existence
t, err := template.ParseFiles(p)
if err != nil {
cs.Log.Error("[", r.Method, "] URL: ", r.URL, " Internal server error: ", err)
http.Error(w, "Internal server error.", 500)
return
}
t.Execute(w, p)
cs.Log.Info("[", r.Method, "] URL: ", r.URL, " OK")
}
func (cs *chapStream) chatHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
cs.Log.Error("[", r.Method, "] Method not allowed: ", r.URL)
http.Error(w, "Method not allowed", 405)
return
}
p := path.Join(cs.Config.Common.StaticFilesDir, "templates/chat.html")
// TODO: Check file existence
t, err := template.ParseFiles(p)
if err != nil {
cs.Log.Error("[", r.Method, "] URL: ", r.URL, " Internal server error: ", err)
http.Error(w, "Internal server error.", 500)
return
}
t.Execute(w, p)
cs.Log.Info("[", r.Method, "] URL: ", r.URL, " OK")
}
func (cs *chapStream) sendMessageHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
cs.Log.Error("[", r.Method, "] Method not allowed: ", r.URL)
http.Error(w, "Method not allowed", 405)
return
}
publicKey := r.FormValue("PublicKey")
if len(publicKey) == 0 {
cs.jsonErrorResponse(w, SendMessageFailed,
PublicKeyRequired, http.StatusBadRequest)
return
}
name := r.FormValue("Name")
if len(name) == 0 {
cs.jsonErrorResponse(w, SendMessageFailed,
NameRequired, http.StatusBadRequest)
return
}
host := r.FormValue("Host")
if len(publicKey) == 0 {
cs.jsonErrorResponse(w, SendMessageFailed,
HostRequired, http.StatusBadRequest)
return
}
cID := r.FormValue("ClientID")
if len(cID) == 0 {
cs.jsonErrorResponse(w, SendMessageFailed,
ClientIDRequired, http.StatusBadRequest)
return
}
c, err := strconv.Atoi(cID)
if err != nil {
cs.jsonErrorResponse(w, SendMessageFailed,
InvalidClientID, http.StatusBadRequest)
return
}
clientID := uint32(c)
m := r.FormValue("Message")
if len(m) == 0 {
cs.jsonErrorResponse(w, SendMessageFailed,
MessageRequired, http.StatusBadRequest)
return
}
msg := []byte(m)
userHash := GetUserHash(name, publicKey)
cs.sessions.Lock()
if s, ok := cs.sessions.s[userHash]; ok {
s.session.Output <- msg
cs.sessions.Unlock()
} else {
s, err := cs.telegraph.NewConnection(publicKey, host, userHash, clientID)
if err != nil {
cs.Log.Error("[", r.Method, "] URL: ", r.URL, " Error occured while handshake: ", err)
cs.jsonErrorResponse(w, SendMessageFailed,
HandshakeFailed, http.StatusBadRequest)
cs.sessions.Unlock()
return
}
cs.sessions.Unlock()
cs.addSession(s)
go cs.handleSession(s)
s.Output <- msg
}
cs.jsonSuccessResponse(w, SendMessageSuccess)
return
}
func (cs *chapStream) lookupHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
cs.Log.Error("[", r.Method, "] Method not allowed: ", r.URL)
http.Error(w, "Method not allowed", 405)
return
}
//userHashes := r.FormValue("UserHashes")
userHashes := r.URL.Query().Get("userHashes")
if len(userHashes) == 0 {
cs.jsonErrorResponse(w, LookupFailed,
UserHashesRequired, http.StatusBadRequest)
return
}
uh := strings.Split(userHashes, ",")
lr, err := cs.telegraph.LookupUserHashes(uh)
if err != nil {
cs.jsonErrorResponse(w, LookupFailed,
TelegraphFailed, http.StatusInternalServerError)
return
}
cs.jsonSuccessWithDataResponse(w, LookupSuccess, *lr)
return
}
type uh struct {
UserHash string
}
func (cs *chapStream) getUserHashHandler(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
cs.Log.Error("[", r.Method, "] Method not allowed: ", r.URL)
http.Error(w, "Method not allowed", 405)
return
}
publicKey := r.FormValue("PublicKey")
if len(publicKey) == 0 {
cs.jsonErrorResponse(w, GetUserHashFailed,
PublicKeyRequired, http.StatusBadRequest)
return
}
name := r.FormValue("Name")
if len(name) == 0 {
cs.jsonErrorResponse(w, GetUserHashFailed,
NameRequired, http.StatusBadRequest)
return
}
userHash := GetUserHash(name, publicKey)
rr := uh{
UserHash: userHash,
}
cs.jsonSuccessWithDataResponse(w, GetUserHashSuccess, &rr)
return
}
|
package main
import (
"fmt"
//"strings"
// "container/list"
)
type heap struct {
back []int
}
func new(s int) *heap {
z := make([]int,s)
z = z[0:0]
return &heap{z}
}
func (h *heap) insert(i int){
free := len(h.back)
h.back = h.back[0 :free+1]
h.back[free] = i
h.upHeap(free)
}
func (h *heap) upHeap(u int){
//fmt.Println(h.back[u])
if u ==0{
return
}
parent := (u-1)/2
if h.back[u] >= h.back[parent]{
return
}
h.back[u],h.back[parent] = h.back[parent],h.back[u]
h.upHeap(parent)
}
func (h *heap) pop() int {
pop := h.back[0]
h.back[0] = h.back[len(h.back)-1]
h.back = h.back[0:len(h.back)-1]
return pop
}
func (h *heap) downHeap(d int){
left := 2*d
right := 2*d+1
heaplen := len(h.back)
smallest :=0
if (left >= heaplen) && (h.back[left] > h.back[d]){
smallest = left
}else {
smallest = d
}
if (right >= heaplen) && (h.back[right] > h.back[smallest]){
smallest = right
}
if smallest < d {
h.back[d],h.back[smallest] = h.back[smallest],h.back[d]
h.downHeap(smallest)
}
}
func main () {
fmt.Print("Initialized\n")
s := new(40)
s.insert(7)
s.insert(21)
s.insert(14)
fmt.Println(s.pop())
fmt.Println(s.pop())
fmt.Println(s.pop())
}
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package rule
import "fmt"
// Type specifies the audit rule type.
type Type int
// The rule types supported by this package.
const (
DeleteAllRuleType Type = iota + 1 // DeleteAllRule
FileWatchRuleType // FileWatchRule
AppendSyscallRuleType // SyscallRule
PrependSyscallRuleType // SyscallRule
)
// Rule is the generic interface that all rule types implement.
type Rule interface {
TypeOf() Type // TypeOf returns the type of rule.
}
// DeleteAllRule deletes all existing rules.
type DeleteAllRule struct {
Type Type
Keys []string // Delete rules that have these keys.
}
// TypeOf returns DeleteAllRuleType.
func (r *DeleteAllRule) TypeOf() Type { return r.Type }
// FileWatchRule is used to audit access to particular files or directories
// that you may be interested in.
type FileWatchRule struct {
Type Type
Path string
Permissions []AccessType
Keys []string
}
// TypeOf returns FileWatchRuleType.
func (r *FileWatchRule) TypeOf() Type { return r.Type }
// SyscallRule is used to audit invocations of specific syscalls.
type SyscallRule struct {
Type Type
List string
Action string
Filters []FilterSpec
Syscalls []string
Keys []string
}
// TypeOf returns either AppendSyscallRuleType or PrependSyscallRuleType.
func (r *SyscallRule) TypeOf() Type { return r.Type }
// AccessType specifies the type of file access to audit.
type AccessType uint8
// The access types that can be audited for file watches.
const (
ReadAccessType AccessType = iota + 1
WriteAccessType
ExecuteAccessType
AttributeChangeAccessType
)
var accessTypeName = map[AccessType]string{
ReadAccessType: "read",
WriteAccessType: "write",
ExecuteAccessType: "execute",
AttributeChangeAccessType: "attribute",
}
func (t AccessType) String() string {
name, found := accessTypeName[t]
if found {
return name
}
return "unknown"
}
// FilterType specifies a type of filter to apply to a syscall rule.
type FilterType uint8
// The type of filters that can be applied.
const (
InterFieldFilterType FilterType = iota + 1 // Inter-field comparison filtering (-C).
ValueFilterType // Filtering based on values (-F).
)
// FilterSpec defines a filter to apply to a syscall rule.
type FilterSpec struct {
Type FilterType
LHS string
Comparator string
RHS string
}
func (f *FilterSpec) String() string {
return fmt.Sprintf("%v %v %v", f.LHS, f.Comparator, f.RHS)
}
|
package main
import fmt "fmt"
func main() {
fmt.Println()
fmt.Println(" The tree looks like this")
fmt.Println(" 3")
fmt.Println(" A-----B")
fmt.Println(" 2\\ |")
fmt.Println(" \\ |")
fmt.Println(" C | 4")
fmt.Println(" / \\ |")
fmt.Println(" 1/ 3\\|")
fmt.Println(" E-----D")
fmt.Println(" 5")
fmt.Println()
//Creation of vertices
A := new(vertex)
A.name = ("A")
A.D = 0
A.color = ""
B := new(vertex)
B.name = ("B")
B.D = 0
B.color = ""
C := new(vertex)
C.name = ("C")
C.D = 0
C.color = ""
D := new(vertex)
D.name = ("D")
D.D = 0
D.color = ""
E := new(vertex)
E.name = ("E")
E.D = 0
E.color = ""
//SLice with all vertices on it
vertices := make([] *vertex, 5)
vertices[0] = A
vertices[1] = B
vertices[2] = C
vertices[3] = D
vertices[4] = E
//Creation of new edges
AB := new(edge)
AB.v1 = *A
AB.v2 = *B
AB.cost = 3
AC := new(edge)
AC.v1 = *A
AC.v2 = *C
AC.cost = 2
BD := new(edge)
BD.v1 = *B
BD.v2 = *D
BD.cost = 4
CE := new(edge)
CE.v1 = *C
CE.v2 = *E
CE.cost = 1
ED := new(edge)
ED.v1 = *E
ED.v2 = *D
ED.cost = 5
CD := new(edge)
CD.v1 = *C
CD.v2 = *D
CD.cost = 3
//Slice with all the edges on it
edges := make([] *edge, 6)
edges[0] = AB
edges[1] = AC
edges[2] = BD
edges[3] = CE
edges[4] = ED
edges[5] = CD
//New graph, valued by the vertex and edge lists
graphy := new(graph)
graphy.v = vertices
graphy.e = edges
graphy.toString()
fmt.Println()
fmt.Println(" After MST the tree looks like this")
fmt.Println(" 3")
fmt.Println(" A-----B")
fmt.Println(" 2\\")
fmt.Println(" \\")
fmt.Println(" C")
fmt.Println(" / \\")
fmt.Println(" 1/ 3\\")
fmt.Println(" E D")
fmt.Println()
asd := prims(graphy)
asd.toString()
}
type edge struct {
v1 vertex
v2 vertex
cost int
}
type vertex struct {
name string
D int
color string
}
type graph struct {
v []*vertex
e []*edge
}
func prims(g *graph) graph {
if len(g.e) == 0 {
fmt.Println("Error, graph is empty")
return *new(graph)
}
returnV := make([] *vertex, len(g.v))
returnE := make([] *edge, len(g.e))
returnG := new(graph)
returnG.v = returnV
returnG.e = returnE
for i := 0; i < len(g.v); i++ {
g.v[i].D = 999
g.v[i].color = "WHITE"
}
for i := 0; i < len(g.e); i++ {
g.e[i].v1.D = 999
g.e[i].v2.D = 999
g.e[i].v1.color = "WHITE"
g.e[i].v2.color = "WHITE"
}
Q := new(heap)
Q.array = make([] vertex, 30)
Q.max = len(g.v)
Q.amount = 0
startt := g.e[0].v1
startt.color = "GRAY"
startt.D = 0
Q.insert(startt)
for Q.empty() == false {
current := Q.pop()
for i := 0; i < len(g.v); i++ {
neigh := *g.v[i]
for j := 0; j < len(g.e); j++ {
if (neigh.name == g.e[j].v1.name && current.name == g.e[j].v2.name) {
if g.e[j].v1.color == "WHITE" {
g.e[j].v1.color = "GRAY"
g.e[j].v1.D = g.e[j].cost
//g.e[j].v2.D = g.e[j].cost
Q.insert(g.e[j].v1)
returnG.add(g.e[j])
} else if g.e[j].v1.color == "GRAY" {
if g.e[j].v1.D > g.e[j].cost {
g.e[j].v1.D = g.e[j].cost
returnG.add(g.e[j])
}
}
current.color = "BLACK"
}
if (neigh.name == g.e[j].v2.name && current.name == g.e[j].v1.name) {
if g.e[j].v2.color == "WHITE" {
g.e[j].v2.color = "GRAY"
g.e[j].v2.D = g.e[j].cost
//g.e[j].v1.D = g.e[j].cost
Q.insert(g.e[j].v2)
returnG.add(g.e[j])
} else if g.e[j].v2.color == "GRAY" {
if g.e[j].v2.D > g.e[j].cost {
g.e[j].v2.D = g.e[j].cost
returnG.add(g.e[j])
}
}
current.color = "BLACK"
}
}
}
}
returnG.dublicates()
return *returnG
}
func (g graph) add(ed *edge) {
for i := 0; i<len(g.e); i++ {
if g.e[i] == nil {
g.e[i] = ed
return
}
}
}
func (g *graph) dublicates() graph{
for i := 0; i<len(g.e); i++ {
for j := 0; j<len(g.e); j++ {
if g.e[i] != nil && g.e[j] != nil {
if j==i {
j++
}
if j >= len(g.e) {
return *g
}
if g.e[i].v1.name == g.e[j].v1.name &&
g.e[i].v2.name == g.e[j].v2.name {
g.e[i] = nil
}
}
}
}
return *g
}
func (g *graph) toString() {
edges := g.e
for i := 0; i < len(edges); i++ {
if edges[i] != nil {
fmt.Print(edges[i].v1.name, " - ")
fmt.Print(edges[i].cost, " - ")
fmt.Println(edges[i].v2.name)
}
}
}
type heap struct {
array []vertex
//temp int
max int
amount int
}
func (h *heap) empty() bool{
if h.amount != 0 {
return false
} else {
return true
}
return false
}
func (h *heap) insert(e vertex) {
if h.max > h.amount {
h.array[h.amount] = e
h.amount++
current := h.amount - 1
for {
if current == 0 {
break
}
parent := (current - 1) / 2
if h.array[current].D < h.array[parent].D {
h.array[parent], h.array[current] = h.array[current], h.array[parent]
current = parent
} else {
break
}
}
}
}
func (h *heap) pop() vertex {
//fmt.Print("POP1 ", h.array[0].name, " ", h.amount, " ")
popped := h.array[0]
h.array[0] = h.array[h.amount-1]
h.array[h.amount-1] = h.array[29]
h.amount--
//fmt.Println("POP2 ", h.array[0].name," ", h.amount, " ")
left := 0
right := 0
x := 0
for {
parent := h.array[x].D
left = h.array[x*2+1].D
right = h.array[x*2+2].D
if left == 0 {
left = 999
}
if right == 0 {
right = 999
}
if parent <= left && parent <= right {
break
} else if parent > left || parent > right {
if left <= right {
h.array[x].D, h.array[x*2+1].D = left, parent
x = x*2+1
} else {
h.array[x].D, h.array[x*2+2].D = right, parent
x = x*2+2
}
}
}
return popped
}
func (h *heap) print() {
fmt.Println(h.array[0].D)
level := 2
counter := 0
for i := 1; i <= h.amount-1; i++ {
fmt.Print(h.array[i].D, " ")
counter++
if level == counter {
fmt.Println()
level = level * 2
counter = 0
}
}
fmt.Println()
fmt.Println()
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"fmt"
"io"
"github.com/cockroachdb/cockroach/pkg/sql/opt/optgen/lang"
)
// exprsGen generates the memo expression structs used by the optimizer, as well
// as memoization and interning methods.
type exprsGen struct {
compiled *lang.CompiledExpr
md *metadata
w io.Writer
}
func (g *exprsGen) generate(compiled *lang.CompiledExpr, w io.Writer) {
g.compiled = compiled
g.md = newMetadata(compiled, "memo")
g.w = w
fmt.Fprintf(g.w, "package memo\n\n")
fmt.Fprintf(g.w, "import (\n")
fmt.Fprintf(g.w, " \"unsafe\"\n")
fmt.Fprintf(g.w, "\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt/cat\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt/constraint\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt/invertedexpr\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt/props\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/opt/props/physical\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/sem/tree\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/types\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/cockroach/pkg/sql/inverted\"\n")
fmt.Fprintf(g.w, " \"github.com/cockroachdb/errors\"\n")
fmt.Fprintf(g.w, ")\n\n")
for _, define := range g.compiled.Defines {
g.genExprDef(define)
}
g.genMemoizeFuncs()
g.genAddToGroupFuncs()
g.genInternFuncs()
g.genBuildPropsFunc()
}
// genExprDef generates an expression's type definition and its methods.
func (g *exprsGen) genExprDef(define *lang.DefineExpr) {
opTyp := g.md.typeOf(define)
// Generate comment for the expression struct.
generateComments(g.w, define.Comments, string(define.Name), opTyp.name)
// Generate the struct and methods.
if define.Tags.Contains("List") {
listItemTyp := opTyp.listItemType
fmt.Fprintf(g.w, "type %s []%s\n\n", opTyp.name, listItemTyp.name)
fmt.Fprintf(g.w, "var Empty%s = %s{}\n\n", opTyp.name, opTyp.name)
g.genListExprFuncs(define)
} else if define.Tags.Contains("Enforcer") {
g.genExprStruct(define)
g.genEnforcerFuncs(define)
} else if define.Tags.Contains("Private") {
g.genPrivateStruct(define)
} else {
g.genExprStruct(define)
g.genExprFuncs(define)
}
// Generate the expression's group struct and methods
g.genExprGroupDef(define)
}
// genExprGroupDef generates the group struct definition for a relational
// expression, plus its methods:
//
// type selectGroup struct {
// mem *Memo
// rel props.Relational
// first SelectExpr
// best bestProps
// }
//
func (g *exprsGen) genExprGroupDef(define *lang.DefineExpr) {
if !define.Tags.Contains("Relational") {
return
}
structType := fmt.Sprintf("%sExpr", define.Name)
groupStructType := fmt.Sprintf("%sGroup", unTitle(string(define.Name)))
// Generate the type definition.
fmt.Fprintf(g.w, "type %s struct {\n", groupStructType)
fmt.Fprintf(g.w, " mem *Memo\n")
fmt.Fprintf(g.w, " rel props.Relational\n")
fmt.Fprintf(g.w, " first %s\n", structType)
fmt.Fprintf(g.w, " best bestProps\n")
fmt.Fprintf(g.w, "}\n\n")
fmt.Fprintf(g.w, "var _ exprGroup = &%s{}\n\n", groupStructType)
// Generate the memo method.
fmt.Fprintf(g.w, "func (g *%s) memo() *Memo {\n", groupStructType)
fmt.Fprintf(g.w, " return g.mem\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the relational method.
fmt.Fprintf(g.w, "func (g *%s) relational() *props.Relational {\n", groupStructType)
fmt.Fprintf(g.w, " return &g.rel\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the firstExpr method.
fmt.Fprintf(g.w, "func (g *%s) firstExpr() RelExpr {\n", groupStructType)
fmt.Fprintf(g.w, " return &g.first\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the bestProps method.
fmt.Fprintf(g.w, "func (g *%s) bestProps() *bestProps {\n", groupStructType)
fmt.Fprintf(g.w, " return &g.best\n")
fmt.Fprintf(g.w, "}\n\n")
}
// genPrivateStruct generates the struct for a define tagged as Private:
//
// type FunctionPrivate struct {
// Name string
// Typ *types.T
// Properties *tree.FunctionProperties
// Overload *tree.Overload
// }
//
func (g *exprsGen) genPrivateStruct(define *lang.DefineExpr) {
privTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, "type %s struct {\n", privTyp.name)
for i, field := range define.Fields {
// Generate comment for the struct field.
if len(field.Comments) != 0 {
if i != 0 {
fmt.Fprintf(g.w, "\n")
}
generateComments(g.w, field.Comments, string(field.Name), string(field.Name))
}
// If field's name is "_", then use Go embedding syntax.
if isEmbeddedField(field) {
fmt.Fprintf(g.w, " %s\n", g.md.typeOf(field).asField())
} else {
fmt.Fprintf(g.w, " %s %s\n", field.Name, g.md.typeOf(field).asField())
}
}
fmt.Fprintf(g.w, "}\n\n")
}
// genExprStruct generates the struct type definition for an expression:
//
// type SelectExpr struct {
// Input RelExpr
// Filters FiltersExpr
//
// grp exprGroup
// next RelExpr
// }
//
func (g *exprsGen) genExprStruct(define *lang.DefineExpr) {
opTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, "type %s struct {\n", opTyp.name)
// Generate child fields.
for i, field := range define.Fields {
// Generate comment for the struct field.
if len(field.Comments) != 0 {
if i != 0 {
fmt.Fprintf(g.w, "\n")
}
generateComments(g.w, field.Comments, string(field.Name), string(field.Name))
}
// If field's name is "_", then use Go embedding syntax.
if isEmbeddedField(field) {
fmt.Fprintf(g.w, " %s\n", g.md.typeOf(field).asField())
} else {
fieldName := g.md.fieldName(field)
fmt.Fprintf(g.w, " %s %s\n", fieldName, g.md.typeOf(field).asField())
}
}
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, "\n")
if g.needsDataTypeField(define) {
fmt.Fprintf(g.w, " Typ *types.T\n")
}
if define.Tags.Contains("ListItem") {
if define.Tags.Contains("ScalarProps") {
fmt.Fprintf(g.w, " scalar props.Scalar\n")
}
} else {
fmt.Fprintf(g.w, " id opt.ScalarID\n")
}
} else if define.Tags.Contains("Enforcer") {
fmt.Fprintf(g.w, " Input RelExpr\n")
fmt.Fprintf(g.w, " best bestProps\n")
} else {
fmt.Fprintf(g.w, "\n")
fmt.Fprintf(g.w, " grp exprGroup\n")
fmt.Fprintf(g.w, " next RelExpr\n")
}
fmt.Fprintf(g.w, "}\n\n")
}
// genExprFuncs generates the methods for an expression, including those from
// the Expr, RelExpr, and ScalarExpr interfaces.
func (g *exprsGen) genExprFuncs(define *lang.DefineExpr) {
opTyp := g.md.typeOf(define)
childFields := g.md.childFields(define)
privateField := g.md.privateField(define)
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, "var _ opt.ScalarExpr = &%s{}\n\n", opTyp.name)
// Generate the ID method.
fmt.Fprintf(g.w, "func (e *%s) ID() opt.ScalarID {\n", opTyp.name)
if define.Tags.Contains("ListItem") {
fmt.Fprintf(g.w, " return 0\n")
} else {
fmt.Fprintf(g.w, " return e.id\n")
}
fmt.Fprintf(g.w, "}\n\n")
} else {
fmt.Fprintf(g.w, "var _ RelExpr = &%s{}\n\n", opTyp.name)
}
// Generate the Op method.
fmt.Fprintf(g.w, "func (e *%s) Op() opt.Operator {\n", opTyp.name)
fmt.Fprintf(g.w, " return opt.%sOp\n", define.Name)
fmt.Fprintf(g.w, "}\n\n")
// Generate the ChildCount method.
fmt.Fprintf(g.w, "func (e *%s) ChildCount() int {\n", opTyp.name)
fmt.Fprintf(g.w, " return %d\n", len(childFields))
fmt.Fprintf(g.w, "}\n\n")
// Generate the Child method.
fmt.Fprintf(g.w, "func (e *%s) Child(nth int) opt.Expr {\n", opTyp.name)
if len(childFields) > 0 {
fmt.Fprintf(g.w, " switch nth {\n")
n := 0
for _, field := range childFields {
fieldName := g.md.fieldName(field)
fieldType := g.md.typeOf(field)
// Use dynamicFieldLoadPrefix, since we're loading from field and
// returning as dynamic opt.Expr type.
fmt.Fprintf(g.w, " case %d:\n", n)
fmt.Fprintf(g.w, " return %se.%s\n", dynamicFieldLoadPrefix(fieldType), fieldName)
n++
}
fmt.Fprintf(g.w, " }\n")
}
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"child index out of range\"))\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Private method.
fmt.Fprintf(g.w, "func (e *%s) Private() interface{} {\n", opTyp.name)
if privateField != nil {
fieldName := g.md.fieldName(privateField)
fieldType := g.md.typeOf(privateField)
// Use dynamicFieldLoadPrefix, since we're loading from field and returning
// as dynamic interface{} type.
fmt.Fprintf(g.w, " return %se.%s\n", dynamicFieldLoadPrefix(fieldType), fieldName)
} else {
fmt.Fprintf(g.w, " return nil\n")
}
fmt.Fprintf(g.w, "}\n\n")
// Generate the String method.
fmt.Fprintf(g.w, "func (e *%s) String() string {\n", opTyp.name)
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, " f := MakeExprFmtCtx(ExprFmtHideQualifications, nil, nil)\n")
} else {
fmt.Fprintf(g.w, " f := MakeExprFmtCtx(ExprFmtHideQualifications, e.Memo(), nil)\n")
}
fmt.Fprintf(g.w, " f.FormatExpr(e)\n")
fmt.Fprintf(g.w, " return f.Buffer.String()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the SetChild method.
fmt.Fprintf(g.w, "func (e *%s) SetChild(nth int, child opt.Expr) {\n", opTyp.name)
if len(childFields) > 0 {
fmt.Fprintf(g.w, " switch nth {\n")
n := 0
for _, field := range childFields {
fieldTyp := g.md.typeOf(field)
fieldName := g.md.fieldName(field)
// Use castFromDynamicParam and then fieldStorePrefix, in order to first
// cast from the dynamic param type (opt.Expr) to the static param type,
// and then to store that into the field.
fmt.Fprintf(g.w, " case %d:\n", n)
fmt.Fprintf(g.w, " %se.%s = %s\n", fieldStorePrefix(fieldTyp), fieldName,
castFromDynamicParam("child", fieldTyp))
fmt.Fprintf(g.w, " return\n")
n++
}
fmt.Fprintf(g.w, " }\n")
}
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"child index out of range\"))\n")
fmt.Fprintf(g.w, "}\n\n")
if define.Tags.Contains("Scalar") {
// Generate the DataType method.
fmt.Fprintf(g.w, "func (e *%s) DataType() *types.T {\n", opTyp.name)
if dataType, ok := g.constDataType(define); ok {
fmt.Fprintf(g.w, " return %s\n", dataType)
} else {
fmt.Fprintf(g.w, " return e.Typ\n")
}
fmt.Fprintf(g.w, "}\n\n")
// Generate the PopulateProps and ScalarProps methods.
if define.Tags.Contains("ScalarProps") {
fmt.Fprintf(g.w, "func (e *%s) PopulateProps(mem *Memo) {\n", opTyp.name)
fmt.Fprintf(g.w, " mem.logPropsBuilder.build%sProps(e, &e.scalar)\n", opTyp.name)
fmt.Fprintf(g.w, " e.scalar.Populated = true\n")
fmt.Fprintf(g.w, "}\n\n")
fmt.Fprintf(g.w, "func (e *%s) ScalarProps() *props.Scalar {\n", opTyp.name)
fmt.Fprintf(g.w, " return &e.scalar\n")
fmt.Fprintf(g.w, "}\n\n")
}
} else {
// Generate the Memo method.
fmt.Fprintf(g.w, "func (e *%s) Memo() *Memo {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.memo()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Relational method.
fmt.Fprintf(g.w, "func (e *%s) Relational() *props.Relational {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.relational()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the FirstExpr method.
fmt.Fprintf(g.w, "func (e *%s) FirstExpr() RelExpr {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.firstExpr()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the NextExpr method.
fmt.Fprintf(g.w, "func (e *%s) NextExpr() RelExpr {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.next\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the RequiredPhysical method.
fmt.Fprintf(g.w, "func (e *%s) RequiredPhysical() *physical.Required {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.bestProps().required\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the ProvidedPhysical method.
fmt.Fprintf(g.w, "func (e *%s) ProvidedPhysical() *physical.Provided {\n", opTyp.name)
fmt.Fprintf(g.w, " return &e.grp.bestProps().provided\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Cost method.
fmt.Fprintf(g.w, "func (e *%s) Cost() Cost {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.bestProps().cost\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the group method.
fmt.Fprintf(g.w, "func (e *%s) group() exprGroup {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the bestProps method.
fmt.Fprintf(g.w, "func (e *%s) bestProps() *bestProps {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.grp.bestProps()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the setNext method.
fmt.Fprintf(g.w, "func (e *%s) setNext(member RelExpr) {\n", opTyp.name)
fmt.Fprintf(g.w, " if e.next != nil {\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"expression already has its next defined: %%s\", e))\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " e.next = member\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the setGroup method.
fmt.Fprintf(g.w, "func (e *%s) setGroup(member RelExpr) {\n", opTyp.name)
fmt.Fprintf(g.w, " if e.grp != nil {\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"expression is already in a group: %%s\", e))\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " e.grp = member.group()\n")
fmt.Fprintf(g.w, " LastGroupMember(member).setNext(e)\n")
fmt.Fprintf(g.w, "}\n\n")
}
}
// genEnforcerFuncs generates the methods for an enforcer operator, including
// those from the Expr and RelExpr interfaces.
func (g *exprsGen) genEnforcerFuncs(define *lang.DefineExpr) {
opTyp := g.md.typeOf(define)
// Generate the Op method.
fmt.Fprintf(g.w, "func (e *%s) Op() opt.Operator {\n", opTyp.name)
fmt.Fprintf(g.w, " return opt.%sOp\n", define.Name)
fmt.Fprintf(g.w, "}\n\n")
// Generate the ChildCount method.
fmt.Fprintf(g.w, "func (e *%s) ChildCount() int {\n", opTyp.name)
fmt.Fprintf(g.w, " return 1\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Child method.
fmt.Fprintf(g.w, "func (e *%s) Child(nth int) opt.Expr {\n", opTyp.name)
fmt.Fprintf(g.w, " if nth == 0 {\n")
fmt.Fprintf(g.w, " return e.Input\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"child index out of range\"))\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Private method.
fmt.Fprintf(g.w, "func (e *%s) Private() interface{} {\n", opTyp.name)
fmt.Fprintf(g.w, " return nil\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the String method.
fmt.Fprintf(g.w, "func (e *%s) String() string {\n", opTyp.name)
fmt.Fprintf(g.w, " f := MakeExprFmtCtx(ExprFmtHideQualifications, e.Memo(), nil)\n")
fmt.Fprintf(g.w, " f.FormatExpr(e)\n")
fmt.Fprintf(g.w, " return f.Buffer.String()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the SetChild method.
fmt.Fprintf(g.w, "func (e *%s) SetChild(nth int, child opt.Expr) {\n", opTyp.name)
fmt.Fprintf(g.w, " if nth == 0 {\n")
fmt.Fprintf(g.w, " e.Input = child.(RelExpr)\n")
fmt.Fprintf(g.w, " return\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"child index out of range\"))\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Memo method.
fmt.Fprintf(g.w, "func (e *%s) Memo() *Memo {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.Input.Memo()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Relational method.
fmt.Fprintf(g.w, "func (e *%s) Relational() *props.Relational {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.Input.Relational()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the FirstExpr method.
fmt.Fprintf(g.w, "func (e *%s) FirstExpr() RelExpr {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.Input.FirstExpr()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the NextExpr method.
fmt.Fprintf(g.w, "func (e *%s) NextExpr() RelExpr {\n", opTyp.name)
fmt.Fprintf(g.w, " return nil\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the RequiredPhysical method.
fmt.Fprintf(g.w, "func (e *%s) RequiredPhysical() *physical.Required {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.best.required\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the ProvidedPhysical method.
fmt.Fprintf(g.w, "func (e *%s) ProvidedPhysical() *physical.Provided {\n", opTyp.name)
fmt.Fprintf(g.w, " return &e.best.provided\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Cost method.
fmt.Fprintf(g.w, "func (e *%s) Cost() Cost {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.best.cost\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the bestProps method.
fmt.Fprintf(g.w, "func (e *%s) bestProps() *bestProps {\n", opTyp.name)
fmt.Fprintf(g.w, " return &e.best\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the group method.
fmt.Fprintf(g.w, "func (e *%s) group() exprGroup {\n", opTyp.name)
fmt.Fprintf(g.w, " return e.Input.group()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the setNext method.
fmt.Fprintf(g.w, "func (e *%s) setNext(member RelExpr) {\n", opTyp.name)
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"setNext cannot be called on enforcers\"))\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the setGroup method.
fmt.Fprintf(g.w, "func (e *%s) setGroup(member exprGroup) {\n", opTyp.name)
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"setGroup cannot be called on enforcers\"))\n")
fmt.Fprintf(g.w, "}\n\n")
}
// genListExprFuncs generates the methods for a list expression, including those
// from the Expr and ScalarExpr interfaces.
func (g *exprsGen) genListExprFuncs(define *lang.DefineExpr) {
if define.Tags.Contains("Relational") {
panic("relational list operators are not supported; use scalar list child instead")
}
opTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, "var _ opt.ScalarExpr = &%s{}\n\n", opTyp.name)
// Generate the ID method.
fmt.Fprintf(g.w, "func (e *%s) ID() opt.ScalarID {\n", opTyp.name)
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"lists have no id\"))")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Op method.
fmt.Fprintf(g.w, "func (e *%s) Op() opt.Operator {\n", opTyp.name)
fmt.Fprintf(g.w, " return opt.%sOp\n", define.Name)
fmt.Fprintf(g.w, "}\n\n")
// Generate the ChildCount method.
fmt.Fprintf(g.w, "func (e *%s) ChildCount() int {\n", opTyp.name)
fmt.Fprintf(g.w, " return len(*e)\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the Child method.
// Use dynamicFieldLoadPrefix, since the field is being passed as the dynamic
// opt.Expr type.
fmt.Fprintf(g.w, "func (e *%s) Child(nth int) opt.Expr {\n", opTyp.name)
fmt.Fprintf(g.w, " return %s(*e)[nth]\n", dynamicFieldLoadPrefix(opTyp.listItemType))
fmt.Fprintf(g.w, "}\n\n")
// Generate the Private method.
fmt.Fprintf(g.w, "func (e *%s) Private() interface{} {\n", opTyp.name)
fmt.Fprintf(g.w, " return nil\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the String method.
fmt.Fprintf(g.w, "func (e *%s) String() string {\n", opTyp.name)
fmt.Fprintf(g.w, " f := MakeExprFmtCtx(ExprFmtHideQualifications, nil, nil)\n")
fmt.Fprintf(g.w, " f.FormatExpr(e)\n")
fmt.Fprintf(g.w, " return f.Buffer.String()\n")
fmt.Fprintf(g.w, "}\n\n")
// Generate the SetChild method.
// Use castFromDynamicParam and then fieldStorePrefix, in order to first cast
// from the dynamic param type (opt.Expr) to the static param type, and then
// to store that into the field.
fmt.Fprintf(g.w, "func (e *%s) SetChild(nth int, child opt.Expr) {\n", opTyp.name)
fmt.Fprintf(g.w, " (*e)[nth] = %s%s\n", fieldStorePrefix(opTyp.listItemType),
castFromDynamicParam("child", opTyp.listItemType))
fmt.Fprintf(g.w, "}\n\n")
// Generate the DataType method.
fmt.Fprintf(g.w, "func (e *%s) DataType() *types.T {\n", opTyp.name)
fmt.Fprintf(g.w, " return types.Any\n")
fmt.Fprintf(g.w, "}\n\n")
}
// genMemoizeFuncs generates Memoize methods on the memo. The Memoize methods
// check whether the expression is already part of the memo; if not, a new memo
// group is created for the expression.
func (g *exprsGen) genMemoizeFuncs() {
defines := g.compiled.Defines.
WithoutTag("Enforcer").
WithoutTag("List").
WithoutTag("ListItem").
WithoutTag("Private")
for _, define := range defines {
opTyp := g.md.typeOf(define)
fields := g.md.childAndPrivateFields(define)
fmt.Fprintf(g.w, "func (m *Memo) Memoize%s(\n", define.Name)
for _, field := range fields {
fieldTyp := g.md.typeOf(field)
fieldName := g.md.fieldName(field)
fmt.Fprintf(g.w, " %s %s,\n", unTitle(fieldName), fieldTyp.asParam())
}
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, ") *%s {\n", opTyp.name)
} else {
fmt.Fprintf(g.w, ") RelExpr {\n")
}
if len(define.Fields) == 0 {
fmt.Fprintf(g.w, " return %sSingleton\n", define.Name)
fmt.Fprintf(g.w, "}\n\n")
continue
}
// Construct a new expression and add it to the interning map.
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, " const size = int64(unsafe.Sizeof(%s{}))\n", opTyp.name)
fmt.Fprintf(g.w, " e := &%s{\n", opTyp.name)
} else {
groupName := fmt.Sprintf("%sGroup", unTitle(string(define.Name)))
fmt.Fprintf(g.w, " const size = int64(unsafe.Sizeof(%s{}))\n", groupName)
fmt.Fprintf(g.w, " grp := &%s{mem: m, first: %s{\n", groupName, opTyp.name)
}
for _, field := range fields {
fieldTyp := g.md.typeOf(field)
fieldName := g.md.fieldName(field)
// Use fieldStorePrefix since a value with a static param type is being
// stored as a field type.
fmt.Fprintf(g.w, " %s: %s%s,\n", fieldName, fieldStorePrefix(fieldTyp), unTitle(fieldName))
}
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, " id: m.NextID(),\n")
fmt.Fprintf(g.w, " }\n")
if g.needsDataTypeField(define) {
fmt.Fprintf(g.w, " e.Typ = InferType(m, e)\n")
}
fmt.Fprintf(g.w, " interned := m.interner.Intern%s(e)\n", define.Name)
} else {
fmt.Fprintf(g.w, " }}\n")
fmt.Fprintf(g.w, " e := &grp.first\n")
fmt.Fprintf(g.w, " e.grp = grp\n")
fmt.Fprintf(g.w, " interned := m.interner.Intern%s(e)\n", define.Name)
}
// Build relational props, track memory usage, and check consistency if
// expression was not already interned.
fmt.Fprintf(g.w, " if interned == e {\n")
fmt.Fprintf(g.w, " if m.newGroupFn != nil {\n")
fmt.Fprintf(g.w, " m.newGroupFn(e)\n")
fmt.Fprintf(g.w, " }\n")
if g.md.hasUnexportedFields(define) {
fmt.Fprintf(g.w, " e.initUnexportedFields(m)\n")
}
if !define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, " m.logPropsBuilder.build%sProps(e, &grp.rel)\n", define.Name)
fmt.Fprintf(g.w, " grp.rel.Populated = true\n")
}
fmt.Fprintf(g.w, " m.memEstimate += size\n")
fmt.Fprintf(g.w, " m.CheckExpr(e)\n")
fmt.Fprintf(g.w, " }\n")
if define.Tags.Contains("Scalar") {
fmt.Fprintf(g.w, " return interned\n")
} else {
// Return the normalized expression if this is a relational expr.
fmt.Fprintf(g.w, " return interned.FirstExpr()\n")
}
fmt.Fprintf(g.w, "}\n\n")
}
}
// genAddToGroupFuncs generates AddToGroup methods on the memo.
func (g *exprsGen) genAddToGroupFuncs() {
defines := g.compiled.Defines.WithTag("Relational").WithoutTag("List").WithoutTag("ListItem")
for _, define := range defines {
opTyp := g.md.typeOf(define)
// The Add...ToGroup functions add a (possibly non-normalized) expression
// to a memo group. They operate like this:
//
// Attempt to intern the expression. This will either give back the
// original expression, meaning we had not previously interned it, or it
// will give back a previously interned version.
//
// If we hadn't ever seen the expression before, then add it to the group
// and move on.
//
// If we *had* seen it before, check if it is in the same group as the one
// we're attempting to add it to. If it's in the same group, then this is
// fine, move along. This happens, for example, if we try to apply
// CommuteJoin twice.
//
// If it's in a different group, then we've learned something interesting:
// two groups which we previously thought were distinct are actually
// equivalent. One approach here would be to merge the two groups into a
// single group, since we've proven that they're equivalent. We do
// something simpler right now, which is to just bail on trying to add the
// new expression, leaving the existing instance of it unchanged in its old
// group. This can result in some expressions not getting fully explored,
// but we do our best to make this outcome as much of an edge-case as
// possible, so hopefully this is fine in almost every case.
// TODO(justin): add telemetry for when group collisions happen. If this is
// ever happening frequently than that is a bug.
fmt.Fprintf(g.w, "func (m *Memo) Add%sToGroup(e *%s, grp RelExpr) *%s {\n",
define.Name, opTyp.name, opTyp.name)
fmt.Fprintf(g.w, " const size = int64(unsafe.Sizeof(%s{}))\n", opTyp.name)
fmt.Fprintf(g.w, " interned := m.interner.Intern%s(e)\n", define.Name)
fmt.Fprintf(g.w, " if interned == e {\n")
if g.md.hasUnexportedFields(define) {
fmt.Fprintf(g.w, " e.initUnexportedFields(m)\n")
}
fmt.Fprintf(g.w, " e.setGroup(grp)\n")
fmt.Fprintf(g.w, " m.memEstimate += size\n")
fmt.Fprintf(g.w, " m.CheckExpr(e)\n")
fmt.Fprintf(g.w, " } else if interned.group() != grp.group() {\n")
fmt.Fprintf(g.w, " // This is a group collision, do nothing.\n")
fmt.Fprintf(g.w, " return nil\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " return interned\n")
fmt.Fprintf(g.w, "}\n\n")
}
}
// genInternFuncs generates methods on the interner.
func (g *exprsGen) genInternFuncs() {
fmt.Fprintf(g.w, "func (in *interner) InternExpr(e opt.Expr) opt.Expr {\n")
fmt.Fprintf(g.w, " switch t := e.(type) {\n")
for _, define := range g.compiled.Defines.WithoutTag("Enforcer").WithoutTag("Private") {
opTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, " case *%s:\n", opTyp.name)
fmt.Fprintf(g.w, " return in.Intern%s(t)\n", define.Name)
}
fmt.Fprintf(g.w, " default:\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"unhandled op: %%s\", e.Op()))\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, "}\n\n")
for _, define := range g.compiled.Defines.WithoutTag("Enforcer").WithoutTag("Private") {
opTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, "func (in *interner) Intern%s(val *%s) *%s {\n",
define.Name, opTyp.name, opTyp.name)
fmt.Fprintf(g.w, " in.hasher.Init()\n")
// Generate code to compute hash.
fmt.Fprintf(g.w, " in.hasher.HashOperator(opt.%sOp)\n", define.Name)
if opTyp.listItemType != nil {
fmt.Fprintf(g.w, " in.hasher.Hash%s(*val)\n", title(opTyp.friendlyName))
} else {
for _, field := range expandFields(g.compiled, define) {
fieldName := g.md.fieldName(field)
if !isExportedField(field) {
continue
}
fieldTyp := g.md.typeOf(field)
if fieldTyp.usePointerIntern {
fmt.Fprintf(g.w, " in.hasher.HashPointer(unsafe.Pointer(val.%s))\n", fieldName)
} else {
fmt.Fprintf(g.w, " in.hasher.Hash%s(val.%s)\n", title(fieldTyp.friendlyName), fieldName)
}
}
}
fmt.Fprintf(g.w, "\n")
// Generate code to check for existing item with same hash.
fmt.Fprintf(g.w, " in.cache.Start(in.hasher.hash)\n")
fmt.Fprintf(g.w, " for in.cache.Next() {\n")
fmt.Fprintf(g.w, " if existing, ok := in.cache.Item().(*%s); ok {\n", opTyp.name)
// Generate code to check expression equality when there's an existing item.
first := true
if opTyp.listItemType != nil {
first = false
fmt.Fprintf(g.w, " if in.hasher.Is%sEqual(*val, *existing)", title(opTyp.friendlyName))
} else {
for _, field := range expandFields(g.compiled, define) {
fieldName := g.md.fieldName(field)
if !isExportedField(field) {
continue
}
if !first {
fmt.Fprintf(g.w, " && \n ")
} else {
fmt.Fprintf(g.w, " if ")
first = false
}
fieldTyp := g.md.typeOf(field)
if fieldTyp.usePointerIntern {
fmt.Fprintf(g.w, "in.hasher.IsPointerEqual(unsafe.Pointer(val.%s), unsafe.Pointer(existing.%s))",
fieldName, fieldName)
} else {
fmt.Fprintf(g.w, "in.hasher.Is%sEqual(val.%s, existing.%s)",
title(fieldTyp.friendlyName), fieldName, fieldName)
}
}
}
if !first {
fmt.Fprintf(g.w, " {\n")
fmt.Fprintf(g.w, " return existing\n")
fmt.Fprintf(g.w, " }\n")
} else {
// Handle expressions with no children.
fmt.Fprintf(g.w, " return existing\n")
}
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, " }\n\n")
// Generate code to add expression to the cache.
fmt.Fprintf(g.w, " in.cache.Add(val)\n")
fmt.Fprintf(g.w, " return val\n")
fmt.Fprintf(g.w, "}\n\n")
}
}
// genBuildPropsFunc generates a buildProps method for logicalPropsBuilder that
// dispatches to the strongly-typed buildXXXProps methods from a RelExpr.
func (g *exprsGen) genBuildPropsFunc() {
fmt.Fprintf(g.w, "func (b *logicalPropsBuilder) buildProps(e RelExpr, rel *props.Relational) {\n")
fmt.Fprintf(g.w, " switch t := e.(type) {\n")
for _, define := range g.compiled.Defines.WithTag("Relational") {
opTyp := g.md.typeOf(define)
fmt.Fprintf(g.w, " case *%s:\n", opTyp.name)
fmt.Fprintf(g.w, " b.build%sProps(t, rel)\n", define.Name)
}
fmt.Fprintf(g.w, " default:\n")
fmt.Fprintf(g.w, " panic(errors.AssertionFailedf(\"unhandled type: %%s\", t.Op()))\n")
fmt.Fprintf(g.w, " }\n")
fmt.Fprintf(g.w, "}\n\n")
}
func (g *exprsGen) needsDataTypeField(define *lang.DefineExpr) bool {
if _, ok := g.constDataType(define); ok {
return false
}
for _, field := range expandFields(g.compiled, define) {
if field.Name == "Typ" && field.Type == "Type" {
return false
}
}
return true
}
func (g *exprsGen) constDataType(define *lang.DefineExpr) (_ string, ok bool) {
for _, typ := range []string{"Bool", "Int", "Float"} {
if define.Tags.Contains(typ) {
return "types." + typ, true
}
}
return "", false
}
|
package main
import (
"encoding/json"
"fmt"
"log"
"net/http"
"sessions-with-redis/session"
"time"
)
var ses session.Session
type Credentials struct {
Password string `json:"password"`
Username string `json:"username"`
}
var users = map[string]string{
"user1": "password1",
"user2": "password2",
}
func main() {
ses.DefaultSessionTimeout = 120
ses.HmacKey = "my-secret-key"
// Signin and "Welcome" are the handlers
http.HandleFunc("/signin", Signin)
http.HandleFunc("/welcome", Welcome)
http.HandleFunc("/refresh", Refresh)
log.Fatalln(http.ListenAndServe("0.0.0.0:8080", nil))
}
func Signin(w http.ResponseWriter, r *http.Request) {
var creds Credentials
// Get the JSON Body and decode into credentials
if err := json.NewDecoder(r.Body).Decode(&creds); err !=nil {
// If the structure of the body is wrong, return an HTTP error
w.WriteHeader(http.StatusBadRequest)
return
}
// Get the expected password from our in memory map
expectedPassword, ok := users[creds.Username]
// If a password exists for the given user
// AND, if it is the same as the password we received, the we can move ahead
// if NOT, then we return an "Unauthorized" status
if !ok || expectedPassword != creds.Password {
w.WriteHeader(http.StatusUnauthorized)
return
}
sessionToken, err := ses.Create(creds.Username)
webErrAndLog(w, err)
// Finally, we set the client cookie for "session_token" as the session token we just generated
// we also set an expiry time of 120 seconds, the same as the cache
http.SetCookie(w, &http.Cookie{
Name: "session_token",
Value: sessionToken,
Expires: time.Now().Add(time.Duration(ses.DefaultSessionTimeout) * time.Second),
})
}
func Welcome(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("session_token")
if err != nil {
if err == http.ErrNoCookie {
// If the cookie is not set, return an unauthorized status
w.WriteHeader(http.StatusUnauthorized)
return
}
// For any other type of error, return a bad request status
w.WriteHeader(http.StatusBadRequest)
return
}
sessionToken := c.Value
response, err := ses.Get(sessionToken)
webErrAndLog(w, err)
// Finally, return the welcome message to the user
_, err = w.Write([]byte(fmt.Sprintf("Welcome %s!", response)))
if err != nil {
log.Println(err)
}
}
func Refresh(w http.ResponseWriter, r *http.Request) {
c, err := r.Cookie("session_token")
if err != nil {
if err == http.ErrNoCookie {
w.WriteHeader(http.StatusUnauthorized)
return
}
w.WriteHeader(http.StatusBadRequest)
return
}
sessionToken := c.Value
newSessionToken, err := ses.Update(sessionToken)
webErrAndLog(w, err)
// Set the new token as the users `session_token` cookie
http.SetCookie(w, &http.Cookie{
Name: "session_token",
Value: newSessionToken,
Expires: time.Now().Add(120 * time.Second),
})
}
func webErrAndLog(w http.ResponseWriter, err error) {
if err != nil {
log.Println(err)
w.WriteHeader(http.StatusInternalServerError)
return
}
}
|
package util
import (
"os"
"os/signal"
"syscall"
)
func OnExit(cleanups ...func()) {
go onExit(cleanups...)
}
func onExit(cleanups ...func()) {
ch := make(chan os.Signal)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
<-ch
for _, cleanup := range cleanups {
cleanup()
}
}
|
package number
import (
"testing"
)
// TestCalNumberBase 测试常规的加减乘除
func TestCalNumberBase(t *testing.T) {
calNumber := NewCalNumber(0)
calNumber.SetValue(100) // value = 100
if !calNumber.Equal(100) {
t.Errorf("!calNumber.Equal(%d)", 100)
}
calNumber.Plus(100) // value = 100 + 100
if !calNumber.Equal(200) {
t.Errorf("!calNumber.Equal(%d)", 200)
}
calNumber.Minus(150) // value = 200 - 150
if !calNumber.Equal(50) {
t.Errorf("!calNumber.Equal(%d)", 50)
}
calNumber.Multi(10) // value = 50 * 10
if !calNumber.Equal(500) {
t.Errorf("!calNumber.Equal(%d)", 500)
}
calNumber.Divide(100) // value = 500 / 100
if !calNumber.Equal(5) {
t.Errorf("!calNumber.Equal(%d)", 5)
}
calNumber.Divide(0) // value = 5
if !calNumber.Equal(5) {
t.Errorf("!calNumber.Equal(%d)", 5)
}
if !calNumber.Enough(5) { // value >= 5
t.Errorf("!calNumber.Equal(%d)", 5)
}
}
|
// SPDX-FileCopyrightText: 2019 The Go Language Server Authors
// SPDX-License-Identifier: BSD-3-Clause
//go:build gojay
// +build gojay
package protocol
import (
"github.com/francoispqt/gojay"
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *Position) MarshalJSONObject(enc *gojay.Encoder) {
enc.Uint32Key(keyLine, v.Line)
enc.Uint32Key(keyCharacter, v.Character)
}
// IsNil returns wether the structure is nil value or not.
func (v *Position) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *Position) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyLine:
return dec.Uint32(&v.Line)
case keyCharacter:
return dec.Uint32(&v.Character)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *Position) NKeys() int { return 2 }
// compile time check whether the Position implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*Position)(nil)
_ gojay.UnmarshalerJSONObject = (*Position)(nil)
)
// Positions represents a slice of Position.
type Positions []Position
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v Positions) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.Object(&v[i])
}
}
// IsNil implements gojay.MarshalerJSONArray.
func (v Positions) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *Positions) UnmarshalJSONArray(dec *gojay.Decoder) error {
value := Position{}
if err := dec.Object(&value); err != nil {
return err
}
*v = append(*v, value)
return nil
}
// compile time check whether the Positions implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*Positions)(nil)
_ gojay.UnmarshalerJSONArray = (*Positions)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *Range) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKey(keyStart, &v.Start)
enc.ObjectKey(keyEnd, &v.End)
}
// IsNil returns wether the structure is nil value or not.
func (v *Range) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *Range) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyStart:
return dec.Object(&v.Start)
case keyEnd:
return dec.Object(&v.End)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *Range) NKeys() int { return 2 }
// compile time check whether the Range implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*Range)(nil)
_ gojay.UnmarshalerJSONObject = (*Range)(nil)
)
// Ranges represents a slice of Range.
type Ranges []Range
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v Ranges) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.Object(&v[i])
}
}
// IsNil implements gojay.MarshalerJSONArray.
func (v Ranges) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *Ranges) UnmarshalJSONArray(dec *gojay.Decoder) error {
value := Range{}
if err := dec.Object(&value); err != nil {
return err
}
*v = append(*v, value)
return nil
}
// compile time check whether the Ranges implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*Ranges)(nil)
_ gojay.UnmarshalerJSONArray = (*Ranges)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *Location) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyURI, string(v.URI))
enc.ObjectKey(keyRange, &v.Range)
}
// IsNil returns wether the structure is nil value or not.
func (v *Location) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *Location) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyURI:
return dec.String((*string)(&v.URI))
case keyRange:
return dec.Object(&v.Range)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *Location) NKeys() int { return 2 }
// compile time check whether the Location implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*Location)(nil)
_ gojay.UnmarshalerJSONObject = (*Location)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *LocationLink) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKeyOmitEmpty(keyOriginSelectionRange, v.OriginSelectionRange)
enc.StringKey(keyTargetURI, string(v.TargetURI))
enc.ObjectKey(keyTargetRange, &v.TargetRange)
enc.ObjectKey(keyTargetSelectionRange, &v.TargetSelectionRange)
}
// IsNil returns wether the structure is nil value or not.
func (v *LocationLink) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *LocationLink) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyOriginSelectionRange:
if v.OriginSelectionRange == nil {
v.OriginSelectionRange = &Range{}
}
return dec.Object(v.OriginSelectionRange)
case keyTargetURI:
return dec.String((*string)(&v.TargetURI))
case keyTargetRange:
return dec.Object(&v.TargetRange)
case keyTargetSelectionRange:
return dec.Object(&v.TargetSelectionRange)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *LocationLink) NKeys() int { return 4 }
// compile time check whether the LocationLink implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*LocationLink)(nil)
_ gojay.UnmarshalerJSONObject = (*LocationLink)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *CodeDescription) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyHref, string(v.Href))
}
// IsNil implements gojay.MarshalerJSONObject.
func (v *CodeDescription) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay.UnmarshalerJSONObject.
func (v *CodeDescription) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
if k == keyHref {
return dec.String((*string)(&v.Href))
}
return nil
}
// NKeys implements gojay.UnmarshalerJSONObject.
func (v *CodeDescription) NKeys() int { return 1 }
// compile time check whether the CodeDescription implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*CodeDescription)(nil)
_ gojay.UnmarshalerJSONObject = (*CodeDescription)(nil)
)
// DiagnosticRelatedInformations represents a slice of DiagnosticRelatedInformation.
type DiagnosticRelatedInformations []DiagnosticRelatedInformation
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v DiagnosticRelatedInformations) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.Object(&v[i])
}
}
// IsNil implements gojay.MarshalerJSONArray.
func (v DiagnosticRelatedInformations) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *DiagnosticRelatedInformations) UnmarshalJSONArray(dec *gojay.Decoder) error {
value := DiagnosticRelatedInformation{}
if err := dec.Object(&value); err != nil {
return err
}
*v = append(*v, value)
return nil
}
// compile time check whether the DiagnosticRelatedInformation implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*DiagnosticRelatedInformations)(nil)
_ gojay.UnmarshalerJSONArray = (*DiagnosticRelatedInformations)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *Command) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyTitle, v.Title)
enc.StringKey(keyCommand, v.Command)
enc.ArrayKeyOmitEmpty(keyArguments, (*Interfaces)(&v.Arguments))
}
// IsNil returns wether the structure is nil value or not.
func (v *Command) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *Command) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyTitle:
return dec.String(&v.Title)
case keyCommand:
return dec.String(&v.Command)
case keyArguments:
return dec.Array((*Interfaces)(&v.Arguments))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *Command) NKeys() int { return 3 }
// compile time check whether the Command implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*Command)(nil)
_ gojay.UnmarshalerJSONObject = (*Command)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *TextEdit) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKey(keyRange, &v.Range)
enc.StringKey(keyNewText, v.NewText)
}
// IsNil returns wether the structure is nil value or not.
func (v *TextEdit) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *TextEdit) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyRange:
return dec.Object(&v.Range)
case keyNewText:
return dec.String(&v.NewText)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *TextEdit) NKeys() int { return 2 }
// compile time check whether the TextEdit implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextEdit)(nil)
_ gojay.UnmarshalerJSONObject = (*TextEdit)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *ChangeAnnotation) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyLabel, v.Label)
enc.BoolKeyOmitEmpty(keyNeedsConfirmation, v.NeedsConfirmation)
enc.StringKeyOmitEmpty(keyDescription, v.Description)
}
// IsNil implements gojay.MarshalerJSONObject.
func (v *ChangeAnnotation) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay.UnmarshalerJSONObject.
func (v *ChangeAnnotation) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyLabel:
return dec.String(&v.Label)
case keyNeedsConfirmation:
return dec.Bool(&v.NeedsConfirmation)
case keyDescription:
return dec.String(&v.Description)
}
return nil
}
// NKeys implements gojay.UnmarshalerJSONObject.
func (v *ChangeAnnotation) NKeys() int { return 3 }
// compile time check whether the ChangeAnnotation implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*ChangeAnnotation)(nil)
_ gojay.UnmarshalerJSONObject = (*ChangeAnnotation)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *AnnotatedTextEdit) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKey(keyRange, &v.Range)
enc.StringKey(keyNewText, v.NewText)
enc.StringKey(keyAnnotationID, string(v.AnnotationID))
}
// IsNil implements gojay.MarshalerJSONObject.
func (v *AnnotatedTextEdit) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay.UnmarshalerJSONObject.
func (v *AnnotatedTextEdit) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyRange:
return dec.Object(&v.Range)
case keyNewText:
return dec.String(&v.NewText)
case keyAnnotationID:
return dec.String((*string)(&v.AnnotationID))
}
return nil
}
// NKeys implements gojay.UnmarshalerJSONObject.
func (v *AnnotatedTextEdit) NKeys() int { return 3 }
// compile time check whether the AnnotatedTextEdit implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*AnnotatedTextEdit)(nil)
_ gojay.UnmarshalerJSONObject = (*AnnotatedTextEdit)(nil)
)
// TextEdits represents a slice of TextEdit.
type TextEdits []TextEdit
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v TextEdits) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.Object(&v[i])
}
}
// IsNil returns wether the structure is nil value or not.
func (v TextEdits) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *TextEdits) UnmarshalJSONArray(dec *gojay.Decoder) error {
value := TextEdit{}
if err := dec.Object(&value); err != nil {
return err
}
*v = append(*v, value)
return nil
}
// compile time check whether the TextEdits implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*TextEdits)(nil)
_ gojay.UnmarshalerJSONArray = (*TextEdits)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *TextDocumentEdit) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKey(keyTextDocument, &v.TextDocument)
enc.ArrayKey(keyEdits, (*TextEdits)(&v.Edits))
}
// IsNil returns wether the structure is nil value or not.
func (v *TextDocumentEdit) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *TextDocumentEdit) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyTextDocument:
return dec.Object(&v.TextDocument)
case keyEdits:
return dec.Array((*TextEdits)(&v.Edits))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *TextDocumentEdit) NKeys() int { return 2 }
// compile time check whether the TextDocumentEdit implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextDocumentEdit)(nil)
_ gojay.UnmarshalerJSONObject = (*TextDocumentEdit)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *CreateFileOptions) MarshalJSONObject(enc *gojay.Encoder) {
enc.BoolKeyOmitEmpty(keyOverwrite, v.Overwrite)
enc.BoolKeyOmitEmpty(keyIgnoreIfExists, v.IgnoreIfExists)
}
// IsNil returns wether the structure is nil value or not.
func (v *CreateFileOptions) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *CreateFileOptions) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyOverwrite:
return dec.Bool(&v.Overwrite)
case keyIgnoreIfExists:
return dec.Bool(&v.IgnoreIfExists)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *CreateFileOptions) NKeys() int { return 2 }
// compile time check whether the CreateFileOptions implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*CreateFileOptions)(nil)
_ gojay.UnmarshalerJSONObject = (*CreateFileOptions)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *CreateFile) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyKind, string(v.Kind))
enc.StringKey(keyURI, string(v.URI))
enc.ObjectKeyOmitEmpty(keyOptions, v.Options)
enc.StringKeyOmitEmpty(keyAnnotationID, string(v.AnnotationID))
}
// IsNil returns wether the structure is nil value or not.
func (v *CreateFile) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *CreateFile) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyKind:
return dec.String((*string)(&v.Kind))
case keyURI:
return dec.String((*string)(&v.URI))
case keyOptions:
if v.Options == nil {
v.Options = &CreateFileOptions{}
}
return dec.Object(v.Options)
case keyAnnotationID:
return dec.String((*string)(&v.AnnotationID))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *CreateFile) NKeys() int { return 4 }
// compile time check whether the CreateFile implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*CreateFile)(nil)
_ gojay.UnmarshalerJSONObject = (*CreateFile)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *RenameFileOptions) MarshalJSONObject(enc *gojay.Encoder) {
enc.BoolKeyOmitEmpty(keyOverwrite, v.Overwrite)
enc.BoolKeyOmitEmpty(keyIgnoreIfExists, v.IgnoreIfExists)
}
// IsNil returns wether the structure is nil value or not.
func (v *RenameFileOptions) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *RenameFileOptions) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyOverwrite:
return dec.Bool(&v.Overwrite)
case keyIgnoreIfExists:
return dec.Bool(&v.IgnoreIfExists)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *RenameFileOptions) NKeys() int { return 2 }
// compile time check whether the RenameFileOptions implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*RenameFileOptions)(nil)
_ gojay.UnmarshalerJSONObject = (*RenameFileOptions)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *RenameFile) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyKind, string(v.Kind))
enc.StringKey(keyOldURI, string(v.OldURI))
enc.StringKey(keyNewURI, string(v.NewURI))
enc.ObjectKeyOmitEmpty(keyOptions, v.Options)
enc.StringKeyOmitEmpty(keyAnnotationID, string(v.AnnotationID))
}
// IsNil returns wether the structure is nil value or not.
func (v *RenameFile) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *RenameFile) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyKind:
return dec.String((*string)(&v.Kind))
case keyOldURI:
return dec.String((*string)(&v.OldURI))
case keyNewURI:
return dec.String((*string)(&v.NewURI))
case keyOptions:
if v.Options == nil {
v.Options = &RenameFileOptions{}
}
return dec.Object(v.Options)
case keyAnnotationID:
return dec.String((*string)(&v.AnnotationID))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *RenameFile) NKeys() int { return 5 }
// compile time check whether the RenameFile implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*RenameFile)(nil)
_ gojay.UnmarshalerJSONObject = (*RenameFile)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *DeleteFileOptions) MarshalJSONObject(enc *gojay.Encoder) {
enc.BoolKeyOmitEmpty(keyRecursive, v.Recursive)
enc.BoolKeyOmitEmpty(keyIgnoreIfNotExists, v.IgnoreIfNotExists)
}
// IsNil returns wether the structure is nil value or not.
func (v *DeleteFileOptions) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *DeleteFileOptions) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyRecursive:
return dec.Bool(&v.Recursive)
case keyIgnoreIfNotExists:
return dec.Bool(&v.IgnoreIfNotExists)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *DeleteFileOptions) NKeys() int { return 2 }
// compile time check whether the DeleteFileOptions implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*DeleteFileOptions)(nil)
_ gojay.UnmarshalerJSONObject = (*DeleteFileOptions)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *DeleteFile) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyKind, string(v.Kind))
enc.StringKey(keyURI, string(v.URI))
enc.ObjectKeyOmitEmpty(keyOptions, v.Options)
enc.StringKeyOmitEmpty(keyAnnotationID, string(v.AnnotationID))
}
// IsNil returns wether the structure is nil value or not.
func (v *DeleteFile) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *DeleteFile) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyKind:
return dec.String((*string)(&v.Kind))
case keyURI:
return dec.String((*string)(&v.URI))
case keyOptions:
if v.Options == nil {
v.Options = &DeleteFileOptions{}
}
return dec.Object(v.Options)
case keyAnnotationID:
return dec.String((*string)(&v.AnnotationID))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *DeleteFile) NKeys() int { return 4 }
// compile time check whether the DeleteFile implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*DeleteFile)(nil)
_ gojay.UnmarshalerJSONObject = (*DeleteFile)(nil)
)
// TextEditsMap represents a map of WorkspaceEdit.Changes.
type TextEditsMap map[DocumentURI][]TextEdit
// compile time check whether the TextEditsMap implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextEditsMap)(nil)
_ gojay.UnmarshalerJSONObject = (*TextEditsMap)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v TextEditsMap) MarshalJSONObject(enc *gojay.Encoder) {
for key, value := range v {
value := value
enc.ArrayKeyOmitEmpty(string(key), (*TextEdits)(&value))
}
}
// IsNil returns wether the structure is nil value or not.
func (v TextEditsMap) IsNil() bool {
return v == nil
}
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v TextEditsMap) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
edits := []TextEdit{}
err := dec.Array((*TextEdits)(&edits))
if err != nil {
return err
}
v[DocumentURI(k)] = TextEdits(edits)
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v TextEditsMap) NKeys() int { return 0 }
// TextDocumentEdits represents a TextDocumentEdit slice.
type TextDocumentEdits []TextDocumentEdit
// compile time check whether the documentChanges implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*TextDocumentEdits)(nil)
_ gojay.UnmarshalerJSONArray = (*TextDocumentEdits)(nil)
)
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v TextDocumentEdits) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.ObjectOmitEmpty(&v[i])
}
}
// IsNil implements gojay.MarshalerJSONArray.
func (v TextDocumentEdits) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *TextDocumentEdits) UnmarshalJSONArray(dec *gojay.Decoder) error {
t := TextDocumentEdit{}
if err := dec.Object(&t); err != nil {
return err
}
*v = append(*v, t)
return nil
}
// ChangeAnnotationsMap represents a map of WorkspaceEdit.ChangeAnnotations.
type ChangeAnnotationsMap map[ChangeAnnotationIdentifier]ChangeAnnotation
// compile time check whether the ChangeAnnotationsMap implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*ChangeAnnotationsMap)(nil)
_ gojay.UnmarshalerJSONObject = (*ChangeAnnotationsMap)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v ChangeAnnotationsMap) MarshalJSONObject(enc *gojay.Encoder) {
for key, value := range v {
value := value
enc.ObjectKeyOmitEmpty(string(key), &value)
}
}
// IsNil returns wether the structure is nil value or not.
func (v ChangeAnnotationsMap) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v ChangeAnnotationsMap) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
edits := ChangeAnnotation{}
if err := dec.Object(&edits); err != nil {
return err
}
v[ChangeAnnotationIdentifier(k)] = edits
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v ChangeAnnotationsMap) NKeys() int { return 0 }
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *WorkspaceEdit) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKeyOmitEmpty(keyChanges, (*TextEditsMap)(&v.Changes))
enc.ArrayKeyOmitEmpty(keyDocumentChanges, (*TextDocumentEdits)(&v.DocumentChanges))
enc.ObjectKeyOmitEmpty(keyChangeAnnotations, (*ChangeAnnotationsMap)(&v.ChangeAnnotations))
}
// IsNil returns wether the structure is nil value or not.
func (v *WorkspaceEdit) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *WorkspaceEdit) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyChanges:
if v.Changes == nil {
v.Changes = make(map[DocumentURI][]TextEdit)
}
return dec.Object(TextEditsMap(v.Changes))
case keyDocumentChanges:
if v.DocumentChanges == nil {
v.DocumentChanges = []TextDocumentEdit{}
}
return dec.Array((*TextDocumentEdits)(&v.DocumentChanges))
case keyChangeAnnotations:
if v.ChangeAnnotations == nil {
v.ChangeAnnotations = make(map[ChangeAnnotationIdentifier]ChangeAnnotation)
}
return dec.Object(ChangeAnnotationsMap(v.ChangeAnnotations))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *WorkspaceEdit) NKeys() int { return 3 }
// compile time check whether the WorkspaceEdit implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*WorkspaceEdit)(nil)
_ gojay.UnmarshalerJSONObject = (*WorkspaceEdit)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *TextDocumentIdentifier) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyURI, string(v.URI))
}
// IsNil returns wether the structure is nil value or not.
func (v *TextDocumentIdentifier) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *TextDocumentIdentifier) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
if k == keyURI {
return dec.String((*string)(&v.URI))
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *TextDocumentIdentifier) NKeys() int { return 1 }
// compile time check whether the TextDocumentIdentifier implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextDocumentIdentifier)(nil)
_ gojay.UnmarshalerJSONObject = (*TextDocumentIdentifier)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *TextDocumentItem) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyURI, string(v.URI))
enc.StringKey(keyLanguageID, string(v.LanguageID))
enc.Int32Key(keyVersion, v.Version)
enc.StringKey(keyText, v.Text)
}
// IsNil returns wether the structure is nil value or not.
func (v *TextDocumentItem) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *TextDocumentItem) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyURI:
return dec.String((*string)(&v.URI))
case keyLanguageID:
return dec.String((*string)(&v.LanguageID))
case keyVersion:
return dec.Int32(&v.Version)
case keyText:
return dec.String(&v.Text)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *TextDocumentItem) NKeys() int { return 4 }
// compile time check whether the TextDocumentItem implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextDocumentItem)(nil)
_ gojay.UnmarshalerJSONObject = (*TextDocumentItem)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *VersionedTextDocumentIdentifier) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyURI, string(v.URI))
enc.Int32Key(keyVersion, v.Version)
}
// IsNil returns wether the structure is nil value or not.
func (v *VersionedTextDocumentIdentifier) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *VersionedTextDocumentIdentifier) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyURI:
return dec.String((*string)(&v.URI))
case keyVersion:
return dec.Int32(&v.Version)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *VersionedTextDocumentIdentifier) NKeys() int { return 2 }
// compile time check whether the VersionedTextDocumentIdentifier implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*VersionedTextDocumentIdentifier)(nil)
_ gojay.UnmarshalerJSONObject = (*VersionedTextDocumentIdentifier)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *OptionalVersionedTextDocumentIdentifier) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyURI, string(v.URI))
if v.Version == nil {
v.Version = NewVersion(0)
}
enc.Int32KeyNullEmpty(keyVersion, *v.Version)
}
// IsNil implements gojay.MarshalerJSONObject.
func (v *OptionalVersionedTextDocumentIdentifier) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay.UnmarshalerJSONObject.
func (v *OptionalVersionedTextDocumentIdentifier) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyURI:
return dec.String((*string)(&v.URI))
case keyVersion:
return dec.Int32Null(&v.Version)
}
return nil
}
// NKeys implements gojay.UnmarshalerJSONObject.
func (v *OptionalVersionedTextDocumentIdentifier) NKeys() int { return 2 }
// compile time check whether the OptionalVersionedTextDocumentIdentifier implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*OptionalVersionedTextDocumentIdentifier)(nil)
_ gojay.UnmarshalerJSONObject = (*OptionalVersionedTextDocumentIdentifier)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *TextDocumentPositionParams) MarshalJSONObject(enc *gojay.Encoder) {
enc.ObjectKey(keyTextDocument, &v.TextDocument)
enc.ObjectKey(keyPosition, &v.Position)
}
// IsNil returns wether the structure is nil value or not.
func (v *TextDocumentPositionParams) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *TextDocumentPositionParams) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyTextDocument:
return dec.Object(&v.TextDocument)
case keyPosition:
return dec.Object(&v.Position)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *TextDocumentPositionParams) NKeys() int { return 2 }
// compile time check whether the TextDocumentPositionParams implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*TextDocumentPositionParams)(nil)
_ gojay.UnmarshalerJSONObject = (*TextDocumentPositionParams)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *DocumentFilter) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKeyOmitEmpty(keyLanguage, v.Language)
enc.StringKeyOmitEmpty(keyScheme, v.Scheme)
enc.StringKeyOmitEmpty(keyPattern, v.Pattern)
}
// IsNil returns wether the structure is nil value or not.
func (v *DocumentFilter) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *DocumentFilter) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyLanguage:
return dec.String(&v.Language)
case keyScheme:
return dec.String(&v.Scheme)
case keyPattern:
return dec.String(&v.Pattern)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *DocumentFilter) NKeys() int { return 3 }
// compile time check whether the DocumentFilter implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*DocumentFilter)(nil)
_ gojay.UnmarshalerJSONObject = (*DocumentFilter)(nil)
)
// MarshalJSONArray implements gojay.MarshalerJSONArray.
func (v DocumentSelector) MarshalJSONArray(enc *gojay.Encoder) {
for i := range v {
enc.Object(v[i])
}
}
// IsNil implements gojay.MarshalerJSONArray.
func (v DocumentSelector) IsNil() bool { return len(v) == 0 }
// UnmarshalJSONArray implements gojay.UnmarshalerJSONArray.
func (v *DocumentSelector) UnmarshalJSONArray(dec *gojay.Decoder) error {
value := &DocumentFilter{}
if err := dec.Object(value); err != nil {
return err
}
*v = append(*v, value)
return nil
}
// compile time check whether the DocumentSelector implements a gojay.MarshalerJSONArray and gojay.UnmarshalerJSONArray interfaces.
var (
_ gojay.MarshalerJSONArray = (*DocumentSelector)(nil)
_ gojay.UnmarshalerJSONArray = (*DocumentSelector)(nil)
)
// MarshalJSONObject implements gojay.MarshalerJSONObject.
func (v *MarkupContent) MarshalJSONObject(enc *gojay.Encoder) {
enc.StringKey(keyKind, string(v.Kind))
enc.StringKey(keyValue, v.Value)
}
// IsNil returns wether the structure is nil value or not.
func (v *MarkupContent) IsNil() bool { return v == nil }
// UnmarshalJSONObject implements gojay's UnmarshalerJSONObject.
func (v *MarkupContent) UnmarshalJSONObject(dec *gojay.Decoder, k string) error {
switch k {
case keyKind:
return dec.String((*string)(&v.Kind))
case keyValue:
return dec.String(&v.Value)
}
return nil
}
// NKeys returns the number of keys to unmarshal.
func (v *MarkupContent) NKeys() int { return 2 }
// compile time check whether the MarkupContent implements a gojay.MarshalerJSONObject and gojay.UnmarshalerJSONObject interfaces.
var (
_ gojay.MarshalerJSONObject = (*MarkupContent)(nil)
_ gojay.UnmarshalerJSONObject = (*MarkupContent)(nil)
)
|
package lantern_cache
import (
"bytes"
"testing"
"time"
)
func makeByte(size int) []byte {
return make([]byte, size)
}
func TestBucketPutGet(t *testing.T) {
b := newBucket(&bucketConfig{
64 * 1024 * 2,
0,
NewChunkAllocator("heap"),
&Stats{},
})
h := newFowlerNollVoHasher()
key1 := []byte("key1")
val1 := []byte("val1")
err := b.put(h.Hash(key1), key1, val1, 0)
if err != nil {
t.Fatal(err)
}
actual, err := b.get(nil, h.Hash(key1), key1)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actual, val1) {
t.Fatal("not equal")
}
}
func TestBucketPutGetExpire(t *testing.T) {
b := newBucket(&bucketConfig{
64 * 1024 * 2,
0,
NewChunkAllocator("heap"),
&Stats{},
})
h := newFowlerNollVoHasher()
key1 := []byte("key1")
val1 := []byte("val1")
err := b.put(h.Hash(key1), key1, val1, time.Now().Unix()+1)
if err != nil {
t.Fatal(err)
}
actual, err := b.get(nil, h.Hash(key1), key1)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actual, val1) {
t.Fatal("not equal")
}
time.Sleep(time.Second * 2)
_, err = b.get(nil, h.Hash(key1), key1)
if err != ErrorValueExpire {
t.Fatal(err)
}
}
func TestBucketPutGetSmall(t *testing.T) {
b := newBucket(&bucketConfig{
1,
0,
NewChunkAllocator("heap"),
&Stats{},
})
h := newFowlerNollVoHasher()
key1 := []byte("key1")
val1 := []byte("val1")
err := b.put(h.Hash(key1), key1, val1, time.Now().Unix()+1)
if err != nil {
t.Fatal(err)
}
actual, err := b.get(nil, h.Hash(key1), key1)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actual, val1) {
t.Fatal("not equal")
}
}
func TestCacheBigKeyValue(t *testing.T) {
b := newBucket(&bucketConfig{
64 * 1024 * 2,
0,
NewChunkAllocator("heap"),
&Stats{},
})
h := newFowlerNollVoHasher()
key1 := []byte("key1")
val1 := makeByte(64 * 1024)
err := b.put(h.Hash(key1), key1, val1, time.Now().Unix()+1)
if err != ErrorInvalidEntry {
t.Fatal(err)
}
}
func TestBucketDel(t *testing.T) {
b := newBucket(&bucketConfig{
64 * 1024 * 2,
0,
NewChunkAllocator("heap"),
&Stats{},
})
h := newFowlerNollVoHasher()
key1 := []byte("key1")
val1 := []byte("val1")
err := b.put(h.Hash(key1), key1, val1, 0)
if err != nil {
t.Fatal(err)
}
actual, err := b.get(nil, h.Hash(key1), key1)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(actual, val1) {
t.Fatal("not equal")
}
b.del(h.Hash(key1))
_, err = b.get(nil, h.Hash(key1), key1)
if err != ErrorNotFound {
t.Fatal(err)
}
}
|
package db
import (
"log"
"time"
pg "github.com/go-pg/pg"
)
func saveProduct(dbRef *pg.DB){
newProduct := &ProductItem{
Name : "mango",
Desc : "sweet fruit",
Image : " file path of image ",
Price : 23,
Features : struct{Name string; Desc string; Imp int} {
Name : "F1",
Desc : "F1 description",
Imp : 3 ,
},
CreatedAt : time.Now(),
UpdatedAt : time.Now(),
IsActive : true ,
}
newProduct.Save(dbRef)
}
func (pi *ProductItem) Save(dBase *pg.DB) error {
insertErr := dBase.Insert(pi)
Panicking(insertErr)
log.Println("successfully inserted productItem : ", pi.Name)
return nil
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"time"
)
type config struct {
Addr string `json:"addr"`
}
func main() {
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "No config file given\n")
fmt.Fprintf(os.Stderr, "Usage: %s <path to config>\n", os.Args[0])
os.Exit(1)
}
configBytes, err := ioutil.ReadFile(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to read config from %q: %v\n", os.Args[1], err)
os.Exit(1)
}
var config config
if err := json.Unmarshal(configBytes, &config); err != nil {
fmt.Fprintf(os.Stderr, "Failed to unmarshal config: %v\n", err)
os.Exit(1)
}
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
message := time.Now().Format("Mon Jan 2 15:04:05 -0700 MST 2006")
fmt.Fprintf(w, "Hello from habitat demo service. The time is: %s\n", message)
})
if err = http.ListenAndServe(config.Addr, nil); err != http.ErrServerClosed {
fmt.Fprintf(os.Stderr, "HTTP server error: %v\n", err)
os.Exit(1)
}
}
|
package provider
import (
"strings"
"github.com/denverdino/aliyungo/ecs"
)
const (
DevicePrefixLocal = "/dev/vd"
DevicePrefixAPI = "/dev/xvd"
)
// Status of disks
type DiskStatus string
const (
DiskStatusInUse = DiskStatus("In_use")
DiskStatusAvailable = DiskStatus("Available")
DiskStatusAttaching = DiskStatus("Attaching")
DiskStatusDetaching = DiskStatus("Detaching")
DiskStatusCreating = DiskStatus("Creating")
DiskStatusReIniting = DiskStatus("ReIniting")
)
type Disk ecs.DiskItemType
func (d *Disk) IsInUse() bool {
return DiskStatus(d.Status) == DiskStatusInUse
}
func (d *Disk) IsAvailable() bool {
return DiskStatus(d.Status) == DiskStatusAvailable
}
func (d *Disk) IsDetaching() bool {
return DiskStatus(d.Status) == DiskStatusDetaching
}
func (d *Disk) LocalDevice() string {
return strings.Replace(d.Device, DevicePrefixAPI, DevicePrefixLocal, 1)
}
func (d *Disk) SetLocalDevice(device string) {
d.Device = strings.Replace(device, DevicePrefixLocal, DevicePrefixAPI, 1)
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package model
import "time"
// LabelDoc is database struct to store labels
type LabelDoc struct {
ID string `json:"id,omitempty" bson:"id,omitempty" yaml:"id,omitempty" swag:"string"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty"`
Format string `bson:"format,omitempty"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty"` //tenant info
Project string `json:"project,omitempty" yaml:"project,omitempty"`
Alias string `json:"alias,omitempty" yaml:"alias,omitempty"`
}
// KVDoc is database struct to store kv
type KVDoc struct {
ID string `json:"id,omitempty" bson:"id,omitempty" yaml:"id,omitempty" swag:"string"`
LabelFormat string `json:"label_format,omitempty" bson:"label_format,omitempty" yaml:"label_format,omitempty"`
Key string `json:"key" yaml:"key" validate:"min=1,max=2048,key"`
Value string `json:"value" yaml:"value" validate:"max=131072,value"` //128K
ValueType string `json:"value_type,omitempty" bson:"value_type,omitempty" yaml:"value_type,omitempty" validate:"valueType"` //ini,json,text,yaml,properties,xml
Checker string `json:"check,omitempty" yaml:"check,omitempty" validate:"max=1048576,check"` //python script
CreateRevision int64 `json:"create_revision,omitempty" bson:"create_revision," yaml:"create_revision,omitempty"`
UpdateRevision int64 `json:"update_revision,omitempty" bson:"update_revision," yaml:"update_revision,omitempty"`
Project string `json:"project,omitempty" yaml:"project,omitempty" validate:"min=1,max=256,commonName"`
Status string `json:"status,omitempty" yaml:"status,omitempty" validate:"kvStatus"`
CreateTime int64 `json:"create_time,omitempty" bson:"create_time," yaml:"create_time,omitempty"`
UpdateTime int64 `json:"update_time,omitempty" bson:"update_time," yaml:"update_time,omitempty"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty" validate:"max=6,dive,keys,labelK,endkeys,labelV"` //redundant
Domain string `json:"domain,omitempty" yaml:"domain,omitempty" validate:"min=1,max=256,commonName"` //redundant
}
// ViewDoc is db struct, it saves user's custom view name and criteria
type ViewDoc struct {
ID string `json:"id,omitempty" bson:"id,omitempty" yaml:"id,omitempty" swag:"string"`
Display string `json:"display,omitempty" yaml:"display,omitempty"`
Project string `json:"project,omitempty" yaml:"project,omitempty"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty"`
Criteria string `json:"criteria,omitempty" yaml:"criteria,omitempty"`
}
// PollingDetail is db struct, it record operation history
type PollingDetail struct {
ID string `json:"id,omitempty" yaml:"id,omitempty"`
SessionID string `json:"session_id,omitempty" bson:"session_id," yaml:"session_id,omitempty"`
SessionGroup string `json:"session_group,omitempty" bson:"session_group," yaml:"session_group,omitempty"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty"`
Project string `json:"project,omitempty" yaml:"project,omitempty"`
PollingData map[string]interface{} `json:"polling_data,omitempty" yaml:"polling_data,omitempty"`
Revision string `json:"revision,omitempty" yaml:"revision,omitempty"`
IP string `json:"ip,omitempty" yaml:"ip,omitempty"`
UserAgent string `json:"user_agent,omitempty" bson:"user_agent," yaml:"user_agent,omitempty"`
URLPath string `json:"url_path,omitempty" bson:"url_path," yaml:"url_path,omitempty"`
ResponseBody []*KVDoc `json:"kv,omitempty" bson:"kv," yaml:"kv,omitempty"`
ResponseCode int `json:"response_code,omitempty" bson:"response_code," yaml:"response_code,omitempty"`
Timestamp time.Time `json:"timestamp,omitempty" yaml:"timestamp,omitempty"`
}
// UpdateKVRequest is db struct, it contains kv update request params
type UpdateKVRequest struct {
ID string `json:"id,omitempty" bson:"id,omitempty" yaml:"id,omitempty" swag:"string" validate:"min=1,max=64"`
Value string `json:"value,omitempty" yaml:"value,omitempty" validate:"max=131072,value"`
Project string `json:"project,omitempty" yaml:"project,omitempty" validate:"min=1,max=256,commonName"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty" validate:"min=1,max=256,commonName"` //redundant
Status string `json:"status,omitempty" yaml:"status,omitempty" validate:"kvStatus"`
}
// GetKVRequest contains kv get request params
type GetKVRequest struct {
Project string `json:"project,omitempty" yaml:"project,omitempty" validate:"min=1,max=256,commonName"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty" validate:"min=1,max=256,commonName"` //redundant
ID string `json:"id,omitempty" bson:"id,omitempty" yaml:"id,omitempty" swag:"string" validate:"min=1,max=64"`
}
// ListKVRequest contains kv list request params
type ListKVRequest struct {
Project string `json:"project,omitempty" yaml:"project,omitempty" validate:"min=1,max=256,commonName"`
Domain string `json:"domain,omitempty" yaml:"domain,omitempty" validate:"min=1,max=256,commonName"` //redundant
Key string `json:"key" yaml:"key" validate:"max=128,getKey"`
Labels map[string]string `json:"labels,omitempty" yaml:"labels,omitempty" validate:"max=8,dive,keys,labelK,endkeys,labelV"` //redundant
Offset int64 `validate:"min=0"`
Limit int64 `validate:"min=0,max=100"`
Status string `json:"status,omitempty" yaml:"status,omitempty" validate:"kvStatus"`
Match string `json:"match,omitempty" yaml:"match,omitempty"`
}
// UploadKVRequest contains kv list upload request params
type UploadKVRequest struct {
Domain string `json:"domain,omitempty" yaml:"domain,omitempty" validate:"min=1,max=256,commonName"` //redundant
Project string `json:"project,omitempty" yaml:"project,omitempty" validate:"min=1,max=256,commonName"`
KVs []*KVDoc
Override string
}
|
package main
import (
"flag"
"fmt"
)
type commandLineOptions struct {
configFile string
}
func parseCommandLine(args []string) (*commandLineOptions, error) {
fanFlags := flag.NewFlagSet("fan options", flag.ContinueOnError)
cmd := commandLineOptions{}
fanFlags.StringVar(&cmd.configFile, "config", "", "config file")
err := fanFlags.Parse(args)
if err != nil {
return nil, err
}
if cmd.configFile == "" {
return nil, fmt.Errorf("config file no specified")
}
return &cmd, nil
}
|
package util
import (
"io"
"io/ioutil"
"os"
"path"
"strings"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestUtils(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Utils Suite")
}
var _ = Describe("Utils", func() {
// GetTempDir()
Describe("Get Temp Directory", func() {
var (
cwd string
tmpDir string
err error
)
JustBeforeEach(func() {
tmpDir, err = GetTempDir()
Expect(err).ToNot(HaveOccurred())
os.RemoveAll(tmpDir)
})
Context("When configured to the default", func() {
It("Should return a directory under the system default", func() {
Expect(path.Dir(tmpDir)).To(Equal(os.TempDir()))
})
})
// This test assumes current directory is writable
Context("When overwritten with a custom path", func() {
BeforeEach(func() {
cwd, err = os.Getwd()
Expect(err).ToNot(HaveOccurred())
TempDir = cwd
})
It("Should return a temp directory under the custom path", func() {
Expect(path.Dir(tmpDir)).To(Equal(cwd))
})
})
})
// CalculateSHA256Sum
Describe("Calculating SHA256 Sums", func() {
var (
shaSum string
err error
body io.ReadCloser
)
const (
helloWorldSha = "b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9"
)
JustBeforeEach(func() {
shaSum, err = CalculateSHA256Sum(body)
})
Context("When passed the value 'hello world'", func() {
BeforeEach(func() {
body = ioutil.NopCloser(strings.NewReader("hello world"))
})
It("Should return the correct checksum", func() {
Expect(err).ToNot(HaveOccurred())
Expect(shaSum).To(Equal(helloWorldSha))
})
})
Context("When passed a closed io.Reader", func() {
BeforeEach(func() {
body, _, err = os.Pipe()
Expect(err).ToNot(HaveOccurred())
body.Close()
})
It("Should return an error", func() {
Expect(err).To(HaveOccurred())
})
})
})
// IsK8sObject
Describe("Detecting K8s Objects from Unmarshaled Data", func() {
var (
data map[string]interface{}
isK8sObject bool
)
JustBeforeEach(func() { isK8sObject = IsK8sObject(data) })
Context("When passed a valid kubernetes object", func() {
BeforeEach(func() {
data = map[string]interface{}{
"kind": "Pod",
"apiVersion": "v1",
"metadata": map[string]interface{}{
"name": "test-pod",
},
}
})
Specify("That it is a valid object", func() {
Expect(isK8sObject).To(BeTrue())
})
})
Context("When passed an invalid kubernetes object", func() {
BeforeEach(func() {
data = map[string]interface{}{
"hello": "world",
"apiVersion": "v1",
"metadata": map[string]interface{}{
"name": "invalid-pod",
},
}
})
Specify("That it is an invalid object", func() {
Expect(isK8sObject).To(BeFalse())
})
})
})
// GenerateToken
Describe("Generating Unique Tokens", func() {
var (
length int
token string
)
JustBeforeEach(func() { token = GenerateToken(length) })
Context("When told to generate a 128 character token", func() {
BeforeEach(func() { length = 128 })
It("should return a token with 128 characters", func() {
Expect(len(token)).To(Equal(128))
})
})
Context("When told to generate a 256 character token", func() {
BeforeEach(func() { length = 256 })
It("should return a token with 256 characters", func() {
Expect(len(token)).To(Equal(256))
})
})
})
})
|
// +build !sgx_enclave
package adapters
import (
"fmt"
"github.com/smartcontractkit/chainlink/store"
"github.com/smartcontractkit/chainlink/store/models"
)
// Wasm represents a wasm binary encoded as base64 or wasm encoded as text (a lisp like language).
type Wasm struct {
WasmT string `json:"wasmt"`
}
// Perform ships the wasm representation to the SGX enclave where it is evaluated.
func (wasm *Wasm) Perform(input models.RunResult, _ *store.Store) models.RunResult {
return input.WithError(fmt.Errorf("Wasm is not supported without SGX"))
}
|
/*
Copyright 2017-2020 Gravitational, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO(awly): combine Expression and Matcher. It should be possible to write:
// `{{regexp.match(email.local(external.trait_name))}}`
package parse
import (
"fmt"
"reflect"
"regexp"
"strings"
"unicode"
"github.com/gravitational/trace"
"github.com/vulcand/predicate"
)
// Expression is a string expression template
// that can interpolate to some variables.
type Expression struct {
// prefix is a prefix of the expression
prefix string
// suffix is a suffix of the expression
suffix string
// expr is the expression AST
expr Expr
}
// MatchExpression is a match expression.
type MatchExpression struct {
// prefix is a prefix of the expression
prefix string
// suffix is a suffix of the expression
suffix string
// matcher is the matcher in the expression
matcher Expr
}
var reVariable = regexp.MustCompile(
// prefix is anything that is not { or }
`^(?P<prefix>[^}{]*)` +
// variable is anything in brackets {{}} that is not { or }
`{{(?P<expression>\s*[^}{]*\s*)}}` +
// prefix is anything that is not { or }
`(?P<suffix>[^}{]*)$`,
)
// NewExpression parses expressions like {{external.foo}} or {{internal.bar}},
// or a literal value like "prod". Call Interpolate on the returned Expression
// to get the final value based on traits or other dynamic values.
func NewExpression(value string) (*Expression, error) {
match := reVariable.FindStringSubmatch(value)
if len(match) == 0 {
if strings.Contains(value, "{{") || strings.Contains(value, "}}") {
return nil, trace.BadParameter(
"%q is using template brackets '{{' or '}}', however expression does not parse, make sure the format is {{expression}}",
value,
)
}
expr := &VarExpr{namespace: LiteralNamespace, name: value}
return &Expression{expr: expr}, nil
}
prefix, value, suffix := match[1], match[2], match[3]
expr, err := parse(value)
if err != nil {
return nil, trace.Wrap(err)
}
if expr.Kind() != reflect.String {
return nil, trace.BadParameter("%q does not evaluate to a string", value)
}
return &Expression{
prefix: strings.TrimLeftFunc(prefix, unicode.IsSpace),
suffix: strings.TrimRightFunc(suffix, unicode.IsSpace),
expr: expr,
}, nil
}
// Interpolate interpolates the variable adding prefix and suffix if present.
// The returned error is trace.NotFound in case the expression contains a variable
// and this variable is not found on any trait, nil in case of success,
// and BadParameter otherwise.
func (e *Expression) Interpolate(varValidation func(namespace, name string) error, traits map[string][]string) ([]string, error) {
ctx := EvaluateContext{
VarValue: func(v VarExpr) ([]string, error) {
if err := varValidation(v.namespace, v.name); err != nil {
return nil, trace.Wrap(err)
}
values, ok := traits[v.name]
if !ok {
return nil, trace.NotFound("variable not found: %s", v)
}
return values, nil
},
}
result, err := e.expr.Evaluate(ctx)
if err != nil {
return nil, trace.Wrap(err)
}
l, ok := result.([]string)
if !ok {
panic(fmt.Sprintf("unexpected string expression evaluation result type %T (this is a bug)", result))
}
var out []string
for _, val := range l {
if len(val) > 0 {
out = append(out, e.prefix+val+e.suffix)
}
}
return out, nil
}
// Matcher matches strings against some internal criteria (e.g. a regexp)
type Matcher interface {
Match(in string) bool
}
// MatcherFn converts function to a matcher interface
type MatcherFn func(in string) bool
// Match matches string against a regexp
func (fn MatcherFn) Match(in string) bool {
return fn(in)
}
// NewAnyMatcher returns a matcher function based
// on incoming values
func NewAnyMatcher(in []string) (Matcher, error) {
matchers := make([]Matcher, len(in))
for i, v := range in {
m, err := NewMatcher(v)
if err != nil {
return nil, trace.Wrap(err)
}
matchers[i] = m
}
return MatcherFn(func(in string) bool {
for _, m := range matchers {
if m.Match(in) {
return true
}
}
return false
}), nil
}
// NewMatcher parses a matcher expression. Currently supported expressions:
// - string literal: `foo`
// - wildcard expression: `*` or `foo*bar`
// - regexp expression: `^foo$`
// - regexp function calls:
// - positive match: `{{regexp.match("foo.*")}}`
// - negative match: `{{regexp.not_match("foo.*")}}`
//
// These expressions do not support variable interpolation (e.g.
// `{{internal.logins}}`), like Expression does.
func NewMatcher(value string) (*MatchExpression, error) {
match := reVariable.FindStringSubmatch(value)
if len(match) == 0 {
if strings.Contains(value, "{{") || strings.Contains(value, "}}") {
return nil, trace.BadParameter(
"%q is using template brackets '{{' or '}}', however expression does not parse, make sure the format is {{expression}}",
value,
)
}
matcher, err := buildRegexpMatchExprFromLit(value)
if err != nil {
return nil, trace.Wrap(err)
}
return &MatchExpression{matcher: matcher}, nil
}
prefix, value, suffix := match[1], match[2], match[3]
matcher, err := parse(value)
if err != nil {
return nil, trace.Wrap(err)
}
if matcher.Kind() != reflect.Bool {
return nil, trace.BadParameter("%q does not evaluate to a boolean", value)
}
return &MatchExpression{
prefix: prefix,
suffix: suffix,
matcher: matcher,
}, nil
}
func (e *MatchExpression) Match(in string) bool {
if !strings.HasPrefix(in, e.prefix) || !strings.HasSuffix(in, e.suffix) {
return false
}
in = strings.TrimPrefix(in, e.prefix)
in = strings.TrimSuffix(in, e.suffix)
ctx := EvaluateContext{
MatcherInput: in,
}
// Ignore err as there's no variable interpolation for now,
// and thus `Evaluate` cannot error for matchers.
result, _ := e.matcher.Evaluate(ctx)
b, ok := result.(bool)
if !ok {
panic(fmt.Sprintf("unexpected match expression evaluation result type %T (this is a bug)", result))
}
return b
}
const (
// LiteralNamespace is a namespace for Expressions that always return
// static literal values.
LiteralNamespace = "literal"
// EmailLocalFnName is a name for email.local function
EmailLocalFnName = "email.local"
// RegexpMatchFnName is a name for regexp.match function.
RegexpMatchFnName = "regexp.match"
// RegexpNotMatchFnName is a name for regexp.not_match function.
RegexpNotMatchFnName = "regexp.not_match"
// RegexpReplaceFnName is a name for regexp.replace function.
RegexpReplaceFnName = "regexp.replace"
)
// parse uses predicate in order to parse the expression.
func parse(exprStr string) (Expr, error) {
parser, err := predicate.NewParser(predicate.Def{
GetIdentifier: buildVarExpr,
GetProperty: buildVarExprFromProperty,
Functions: map[string]interface{}{
EmailLocalFnName: buildEmailLocalExpr,
RegexpReplaceFnName: buildRegexpReplaceExpr,
RegexpMatchFnName: buildRegexpMatchExpr,
RegexpNotMatchFnName: buildRegexpNotMatchExpr,
},
})
if err != nil {
return nil, trace.Wrap(err)
}
result, err := parser.Parse(exprStr)
if err != nil {
return nil, trace.BadParameter("failed to parse: %q, error: %s", exprStr, err)
}
expr, ok := result.(Expr)
if !ok {
return nil, trace.BadParameter("failed to parse: %q, unexpected parser result type %T", exprStr, result)
}
if err := validateExpr(expr); err != nil {
return nil, trace.Wrap(err)
}
return expr, nil
}
|
package user
import (
"context"
"encoding/json"
"net/url"
"strings"
kratosClient "github.com/ory/kratos-client-go/client"
"github.com/ory/kratos-client-go/client/admin"
"github.com/ory/kratos-client-go/models"
"github.com/thoas/go-funk"
)
type KratosClientV5 interface {
GetUserByEmail(email string) (KratosUser, error)
GetUsers() ([]KratosUser, error)
GetUser(id string) (KratosUser, error)
UpdateUser(user KratosUser) error
AddCustomerToUser(user KratosUser, customerID string) error
AddCustomerToUserByUserID(userID string, customerID string) error
AddCustomerToUserByEmail(email string, customerID string) error
RemoveCustomerToUserByEmail(email string, customerID string) error
}
type kratosClientV5 struct {
client *kratosClient.OryKratos
}
func NewKratosClientV5(clientURL *url.URL) KratosClientV5 {
config := kratosClient.DefaultTransportConfig().WithSchemes([]string{clientURL.Scheme}).WithHost(clientURL.Host).WithBasePath(clientURL.Path)
return kratosClientV5{
client: kratosClient.NewHTTPClientWithConfig(nil, config),
}
}
func (c kratosClientV5) AddCustomerToUserByUserID(userID string, customerID string) error {
kratosUser, err := c.GetUser(userID)
if err != nil {
return err
}
return c.AddCustomerToUser(kratosUser, customerID)
}
func (c kratosClientV5) RemoveCustomerToUserByEmail(email string, customerID string) error {
kratosUser, err := c.GetUserByEmail(email)
if err != nil {
return err
}
return c.DeleteCustomerToUser(kratosUser, customerID)
}
func (c kratosClientV5) AddCustomerToUserByEmail(email string, customerID string) error {
kratosUser, err := c.GetUserByEmail(email)
if err != nil {
return err
}
return c.AddCustomerToUser(kratosUser, customerID)
}
func (c kratosClientV5) AddCustomerToUser(kratosUser KratosUser, customerID string) error {
exists := UserCustomersContains(kratosUser, customerID)
if exists {
return ErrCustomerUserConnectionAlreadyExists
}
kratosUser.Traits.Tenants = append(kratosUser.Traits.Tenants, customerID)
err := c.UpdateUser(kratosUser)
if err != nil {
return err
}
return nil
}
func (c kratosClientV5) DeleteCustomerToUser(kratosUser KratosUser, customerID string) error {
exists := UserCustomersContains(kratosUser, customerID)
if !exists {
return ErrNotFound
}
newCustomers := make([]string, 0)
for _, currentCustomerId := range kratosUser.Traits.Tenants {
if currentCustomerId == customerID {
continue
}
newCustomers = append(newCustomers, currentCustomerId)
}
kratosUser.Traits.Tenants = newCustomers
err := c.UpdateUser(kratosUser)
if err != nil {
return err
}
return nil
}
func (c kratosClientV5) UpdateUser(user KratosUser) error {
_, err := c.client.Admin.UpdateIdentity(&admin.UpdateIdentityParams{
ID: user.ID,
Body: &models.UpdateIdentity{
Traits: user.Traits,
},
Context: context.TODO(),
})
return err
}
func (c kratosClientV5) convertKratosUserModelPayloadToKratosUser(model *models.Identity) (KratosUser, error) {
var traits KratosUserTraits
kratosUser := KratosUser{}
traitsData := model.Traits
b, err := json.Marshal(traitsData)
if err != nil {
return kratosUser, err
}
err = json.Unmarshal(b, &traits)
if err != nil {
return kratosUser, err
}
kratosUser = KratosUser{
ID: string(model.ID),
SchemaID: *model.SchemaID,
SchemaURL: *model.SchemaURL,
Traits: traits,
}
return kratosUser, err
}
func (c kratosClientV5) GetUser(id string) (KratosUser, error) {
i, err := c.client.Admin.GetIdentity(admin.NewGetIdentityParams().WithID(id))
if err != nil {
// This api is horrible :P, guess thats why it has been updated
if strings.Contains(err.Error(), "404") {
return KratosUser{}, ErrNotFound
}
return KratosUser{}, err
}
return c.convertKratosUserModelPayloadToKratosUser(i.Payload)
}
func (c kratosClientV5) GetUsers() ([]KratosUser, error) {
kratosUsers := make([]KratosUser, 0)
// Possible issue with pagination, not clear
items, err := c.client.Admin.ListIdentities(nil)
if err != nil {
return kratosUsers, err
}
for _, item := range items.Payload {
kratosUser, err := c.convertKratosUserModelPayloadToKratosUser(item)
if err != nil {
continue
}
kratosUsers = append(kratosUsers, kratosUser)
}
return kratosUsers, nil
}
func (c kratosClientV5) GetUserByEmail(email string) (KratosUser, error) {
kratosUsers, err := c.GetUsers()
if err != nil {
return KratosUser{}, err
}
kratosUser, err := GetUserFromListByEmail(kratosUsers, email)
if err != nil {
return KratosUser{}, err
}
return kratosUser, nil
}
func GetUserFromListByEmail(users []KratosUser, email string) (KratosUser, error) {
found := funk.Find(users, func(kratosUser KratosUser) bool {
return kratosUser.Traits.Email == email
})
if found == nil {
return KratosUser{}, ErrNotFound
}
return found.(KratosUser), nil
}
func UserCustomersContains(user KratosUser, customerID string) bool {
exists := funk.Contains(user.Traits.Tenants, func(currentCustomerID string) bool {
return customerID == currentCustomerID
})
return exists
}
|
package db
import (
"github.com/kapitanov/natandb/pkg/model"
"github.com/kapitanov/natandb/pkg/storage"
"sort"
"strings"
)
type transaction struct {
Engine *engine
ShouldCommit bool
}
func newTransaction(engine *engine) *transaction {
tx := &transaction{
Engine: engine,
ShouldCommit: false,
}
return tx
}
// Commit marks transaction for committing
func (t *transaction) Commit() {
t.ShouldCommit = true
}
// Close terminates a transaction
func (t *transaction) Close() error {
var err error
if t.ShouldCommit {
err = t.Engine.WAL.CommitTx()
} else {
err = t.Engine.WAL.RollbackTx()
}
if err != nil {
return err
}
t.Engine.EndTx()
return nil
}
// List returns paged list of DB keys (with values)
// Optionally list might be filtered by key prefix
// If data version is changed, a ErrDataOutOfDate error is returned
// ErrDataOutOfDate is not returned if version parameter contains zero
func (t *transaction) List(prefix Key, skip uint, limit uint, version uint64) (*PagedNodeList, error) {
if version != 0 && version != t.Engine.Model.LastChangeID {
return nil, ErrDataOutOfDate
}
// TODO dirty and inefficient implementation
array := make([]*Node, 0)
for k, n := range t.Engine.Model.NodesMap {
if prefix == "" || strings.Index(k, string(prefix)) == 0 {
array = append(array, mapNode(n))
}
}
cmp := func(i, j int) bool {
return strings.Compare(string(array[i].Key), string(array[j].Key)) < 0
}
sort.Slice(array, cmp)
lowIndex := int(skip)
if len(array) < lowIndex {
lowIndex = len(array) - 1
}
count := int(limit)
if len(array)-lowIndex < count {
count = len(array) - lowIndex
}
list := &PagedNodeList{
Nodes: array[lowIndex:count],
Version: t.Engine.Model.LastChangeID,
TotalCount: uint(len(array)),
}
return list, nil
}
// GetVersion returns current data version
func (t *transaction) GetVersion() uint64 {
return t.Engine.Model.LastChangeID
}
// Get gets a node value by its key
// If specified node doesn't exist, a ErrNoSuchKey error is returned
func (t *transaction) Get(key Key) (*Node, error) {
node := t.Engine.Model.GetNode(string(key))
if node == nil {
return nil, ErrNoSuchKey
}
return mapNode(node), nil
}
// Set sets a node value, rewriting its value if node already exists
// If specified node doesn't exists, it will be created
func (t *transaction) Set(key Key, values []Value) (*Node, error) {
// If new node value is empty or nil - just drop the node and exit
if values == nil || len(values) == 0 {
node := t.Engine.Model.GetNode(string(key))
if node == nil {
// No need to drop node if it doesn't exist
return &Node{
Key: key,
Values: make([]Value, 0),
Version: t.Engine.Model.LastChangeID,
}, nil
}
// Drop existing node
err := t.write(storage.WALRemoveKey, node.Key, nil)
if err != nil {
return nil, err
}
return mapNode(node), nil
}
node := t.Engine.Model.GetOrCreateNode(string(key))
changeCount := len(node.Values) + len(values)
var err error
if changeCount == 1 {
// Optimistic path for new nodes
err = t.write(storage.WALAddValue, node.Key, values[0])
} else {
// First, drop all node's values
for _, v := range node.Values {
err = t.write(storage.WALRemoveValue, node.Key, v)
if err != nil {
return nil, err
}
}
// Then add all new values
for _, v := range values {
err = t.write(storage.WALAddValue, node.Key, v)
if err != nil {
return nil, err
}
}
}
return mapNode(node), nil
}
// AddValue defines an "append value" operation
// If specified node doesn't exists, it will be created
// A specified value will be added to node even if it already exists
func (t *transaction) AddValue(key Key, value Value) (*Node, error) {
node := t.Engine.Model.GetOrCreateNode(string(key))
err := t.write(storage.WALAddValue, node.Key, value)
if err != nil {
return nil, err
}
return mapNode(node), nil
}
// AddUniqueValue defines an "append value" operation
// If specified node doesn't exists, it will be created
// If node already contains the same value and "unique" parameter is set to "true", a ErrDuplicateValue error is returned
func (t *transaction) AddUniqueValue(key Key, value Value) (*Node, error) {
node := t.Engine.Model.GetOrCreateNode(string(key))
if node.Contains(value) {
return nil, ErrDuplicateValue
}
err := t.write(storage.WALAddValue, node.Key, value)
if err != nil {
return nil, err
}
return mapNode(node), nil
}
// RemoveValue defines an "remove value" operation
// If specified node doesn't exist, a ErrNoSuchKey error is returned
// If specified value doesn't exist within a node, a ErrNoSuchValue error is returned
func (t *transaction) RemoveValue(key Key, value Value) (*Node, error) {
node := t.Engine.Model.GetNode(string(key))
if node == nil {
return nil, ErrNoSuchKey
}
for _, v := range node.Values {
if v.Equal(value) {
// If node contained only one value - node should be removed
var err error
if len(node.Values) == 1 {
err = t.write(storage.WALRemoveKey, node.Key, nil)
} else {
err = t.write(storage.WALRemoveValue, node.Key, value)
}
if err != nil {
return nil, err
}
return mapNode(node), nil
}
}
return nil, ErrNoSuchValue
}
// RemoveAllValues defines an "remove value" operation
// If specified node doesn't exist, a ErrNoSuchKey error is returned
// If node contains specified value multiple times, all values are removed
// If specified value doesn't exist within a node, a ErrNoSuchValue error is returned
func (t *transaction) RemoveAllValues(key Key, value Value) (*Node, error) {
node := t.Engine.Model.GetNode(string(key))
if node == nil {
return nil, ErrNoSuchKey
}
// Generate change list
count := 0
for i := 0; i < len(node.Values); i++ {
v := node.Values[i]
if v.Equal(value) {
err := t.write(storage.WALRemoveValue, node.Key, value)
if err != nil {
return nil, err
}
i--
count++
}
}
// Return an error when trying to remove a non-existing value
if count == 0 {
return nil, ErrNoSuchValue
}
if len(node.Values) <= 0 {
// If node is empty after RemoveAllValues operation - just drop entire node
err := t.write(storage.WALRemoveKey, node.Key, nil)
if err != nil {
return nil, err
}
}
return mapNode(node), nil
}
// RemoveKey removes a key completely
// If specified node doesn't exist, a ErrNoSuchKey error is returned
func (t *transaction) RemoveKey(key Key) error {
node := t.Engine.Model.GetNode(string(key))
if node == nil {
return ErrNoSuchKey
}
err := t.write(storage.WALRemoveKey, node.Key, nil)
if err != nil {
return err
}
return nil
}
// write writes and applies one change record
func (t *transaction) write(recordType storage.WALRecordType, key string, value model.Value) error {
record := &storage.WALRecord{
Type: recordType,
Key: key,
Value: value,
}
err := t.Engine.WAL.Write(record)
if err != nil {
return err
}
err = t.Engine.Model.Apply(record)
if err != nil {
return err
}
return nil
}
func mapNode(node *model.Node) *Node {
return &Node{
Key: Key(node.Key),
Version: node.LastChangeID,
Values: node.Values,
}
}
|
package main
func main() {
//r1 := newRect(1, 2, 3, 4, "zhangsan")
//println(r1.area())
//println(r1.Base.name)
//p := new(Human)
//p.name = "123"
//p.int = 12
//fmt.Println(*p)
//var a [3]int
//for _, v := range a {
// fmt.Println(v)
//}
//
//r := [...]int{2: 1}
//
//for _, v := range r {
// fmt.Println(v)
//}
//
//fmt.Println(a == r)
//println(os.Getpid());
//println(os.Getppid());
//println(exec.Cmd{exec.Command("ps","aux")})
}
type Base struct {
name string
}
type Rect struct {
x, y float64
width, height float64
Base
}
func (r *Rect) area() float64 {
return r.height * r.width
}
func newRect(x, y, width, height float64, name string) *Rect {
return &Rect{x, y, width, height, Base{name: name}}
}
type Human struct {
name string
int
}
|
package handler
import (
"encoding/json"
"errors"
"net"
"net/http"
"strings"
"code.cloudfoundry.org/lager"
"github.com/rosenhouse/reflex/peer"
)
type PeerList struct {
Logger lager.Logger
Peers peer.List
}
func (h *PeerList) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := h.Logger.Session("handle-list")
defer logger.Debug("done")
snapshot := h.Peers.Snapshot(logger)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(snapshot)
}
type PeerPost struct {
Logger lager.Logger
Peers peer.List
AllowedCIDR *net.IPNet
}
func encodeError(w http.ResponseWriter, msg string) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(struct {
Error string `json:"error"`
}{msg})
}
func (h *PeerPost) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := h.Logger.Session("handle-post")
defer logger.Debug("done")
clientIP, err := parseHostIP(r.RemoteAddr) // http server sets r.RemoteAddr to "IP:port"
if err != nil {
logger.Error("parse-remote-addr", err, lager.Data{"remote-addr": r.RemoteAddr})
w.WriteHeader(http.StatusInternalServerError)
encodeError(w, "cannot parse remote address")
return
}
if !h.AllowedCIDR.Contains(clientIP) {
logger.Info("peer-not-allowed", lager.Data{"remote-addr": r.RemoteAddr})
w.WriteHeader(http.StatusForbidden)
encodeError(w, "source ip not allowed")
return
}
h.Peers.Upsert(logger, clientIP.String())
snapshot := h.Peers.Snapshot(logger)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(snapshot)
}
func parseHostIP(addr string) (net.IP, error) {
// addr might be an ip6 including :'s, so we need to find the _last_ :
i := strings.LastIndex(addr, ":")
if i < 0 {
return nil, errors.New("unable to parse address")
}
ip := net.ParseIP(addr[:i])
if ip == nil {
return nil, errors.New("cannot parse as ip")
}
return ip, nil
}
|
// Declare and initialize a variable of type int with the value of 20. Display
// the _address of_ and _value of_ the variable.
//
// Declare and initialize a pointer variable of type int that points to the last
// variable you just created. Display the _address of_ , _value of_ and the
// _value that the pointer points to_.
package main
import "fmt"
// Add imports.
func main() {
x :=20
fmt.Println(&x,"*******",x)
p:=&x
fmt.Println(p,"**********",*p)
// Display the address of, value of and the value the pointer
// points to.
}
|
package node
import (
"context"
"fmt"
"log"
"math"
"net"
"net/rpc"
"os"
"os/signal"
"strconv"
"syscall"
"time"
"github.com/labstack/echo/v4"
"github.com/xmliszt/e-safe/config"
"github.com/xmliszt/e-safe/pkg/message"
"github.com/xmliszt/e-safe/pkg/secret"
"github.com/xmliszt/e-safe/util"
)
// Node contains all the variables that are necessary to manage a node
type Node struct {
IsCoordinator bool `validate:"required"`
Pid int `validate:"gte=0"` // Node ID
Ring []int `validate:"required"` // Ring structure of nodes
RpcMap map[int]string `validate:"required"` // Map node ID to their receiving address
HeartBeatTable map[int]bool
VirtualNodeLocation []int
VirtualNodeMap map[int]string
Router *echo.Echo
KillSignal chan os.Signal // For signalling shutdown of router server
}
// Start is the main function that starts the entire program
func Start(nodeID int) {
config, err := config.GetConfig()
if err != nil {
log.Fatal(err)
}
address, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("0.0.0.0:%d", config.ConfigLocksmith.Port+nodeID))
if err != nil {
log.Fatal(err)
}
inbound, err := net.ListenTCP("tcp", address)
if err != nil {
log.Fatal(err)
}
node := &Node{
IsCoordinator: false,
Pid: nodeID,
RpcMap: make(map[int]string),
VirtualNodeLocation: make([]int, 0),
VirtualNodeMap: make(map[int]string),
HeartBeatTable: make(map[int]bool),
KillSignal: make(chan os.Signal, 1),
}
signal.Notify(node.KillSignal, syscall.SIGTERM)
err = node.signalNodeStart() // Send start signal to Locksmith
if err != nil {
log.Fatal(err)
}
err = node.createVirtualNodes() // Create virtual nodes
if err != nil {
log.Fatal(err)
}
// If the number of nodes present (excluding Locksmith) is greater than replication factor
// Then start data re-distribution
if len(node.RpcMap)-1 > config.ConfigNode.ReplicationFactor+1 {
log.Printf("Node %d starts data re-distribution!\n", node.Pid)
err := node.updateData() // Update data
if err != nil {
log.Fatal(err)
}
}
// Start RPC server
log.Printf("Node %d listening on: %v\n", node.Pid, address)
err = rpc.Register(node)
if err != nil {
log.Fatal(err)
}
rpc.Accept(inbound)
}
// updataData grabs data from the next clockwise node
// for the replicated data, it will grab from the previous nodes
func (n *Node) updateData() error {
config, err := config.GetConfig()
if err != nil {
return err
}
// do for all virtual nodes
for i := 1; i <= config.VirtualNodesCount; i++ {
virtualNode := strconv.Itoa(n.Pid) + "-" + strconv.Itoa(i)
ulocation, e := util.GetHash(virtualNode)
location := int(ulocation)
if e != nil {
return e
}
var nextPhysicalNodeID int
var prevVirtualNodeLocation int
ownLocationIdx := n.getVirtualLocationIndex(location)
var nextVirtualNodeName string
// Get the next physical node ID that is not myself
idx := ownLocationIdx + 1
for {
if idx == len(n.VirtualNodeLocation) {
idx = 0
}
loc := n.VirtualNodeLocation[idx]
physicalNodeID, err := getPhysicalNodeID(n.VirtualNodeMap[loc])
if err != nil {
return err
}
if physicalNodeID == n.Pid {
idx++
} else {
nextPhysicalNodeID = physicalNodeID
if ownLocationIdx-1 < 0 {
prevVirtualNodeLocation = n.VirtualNodeLocation[len(n.VirtualNodeLocation)-int(math.Abs(float64(ownLocationIdx-1)))]
} else {
prevVirtualNodeLocation = n.VirtualNodeLocation[ownLocationIdx-1]
}
nextVirtualNodeName = n.VirtualNodeMap[loc]
break
}
}
// grab original data from the next node
originalSecretMigrationRequest := &message.Request{
From: n.Pid,
To: nextPhysicalNodeID,
Code: message.FETCH_ORIGINAL_SECRETS,
Payload: map[string]interface{}{
"range": []int{prevVirtualNodeLocation, location},
"delete": true, // If true, the target node will delete the data after sending
},
}
var originalSecretMigrationReply message.Reply
err = message.SendMessage(n.RpcMap[nextPhysicalNodeID], "Node.GetSecrets", originalSecretMigrationRequest, &originalSecretMigrationReply)
if err != nil {
return err
}
fetchedSecrets := originalSecretMigrationReply.Payload.(map[string]*secret.Secret)
log.Printf("Virtual Node %s fetched original secrets from Virtual Node %s: %v\n", virtualNode, nextVirtualNodeName, fetchedSecrets)
// put secret to itself
for k, v := range fetchedSecrets {
err := secret.UpdateSecret(n.Pid, k, v)
if err != nil {
err := secret.PutSecret(n.Pid, k, v)
if err != nil {
return err
}
}
}
// Get replica from previous nodes using RPC
replicationLocation, err := n.getReplicationLocations(location)
if err != nil {
return err
}
for _, slice := range replicationLocation {
nodeID, from, to := slice[0], slice[1], slice[2]
replicaSecretMigrationRequest := &message.Request{
From: n.Pid,
To: nodeID,
Code: message.FETCH_REPLICA_SECRETS,
Payload: map[string]interface{}{
"range": []int{from, to},
"delete": false, // if false, the target node will retain the data after sending
},
}
var replicaSecretMigrationReply message.Reply
err = message.SendMessage(n.RpcMap[nodeID], "Node.GetSecrets", replicaSecretMigrationRequest, &replicaSecretMigrationReply)
if err != nil {
return err
}
fetchedReplicas := originalSecretMigrationReply.Payload.(map[string]*secret.Secret)
log.Printf("Virtual Node %s fetched replica secrets from Virtual Node %s: %v\n", virtualNode, n.VirtualNodeMap[to], fetchedReplicas)
// put secret to itself
for k, v := range fetchedReplicas {
err := secret.UpdateSecret(n.Pid, k, v)
if err != nil {
err := secret.PutSecret(n.Pid, k, v)
if err != nil {
return err
}
}
}
}
}
return nil
}
// signalNodeStart sends a signal to Locksmith server that the node has started
// it is for Locksmith server to respond with the current RPC map-
func (n *Node) signalNodeStart() error {
config, err := config.GetConfig()
if err != nil {
return err
}
request := &message.Request{
From: n.Pid,
To: 0,
Code: message.SIGNAL_START,
Payload: nil,
}
var reply message.Reply
err = message.SendMessage(fmt.Sprintf("localhost:%d", config.ConfigLocksmith.Port), "LockSmith.SignalStart", request, &reply)
if err != nil {
return err
}
n.RpcMap = reply.Payload.(map[int]string)
log.Printf("Node %d RPC map updated: %+v\n", n.Pid, n.RpcMap)
// Relay updated RPC map to others
for pid, address := range n.RpcMap {
if pid == n.Pid || pid == 0 {
continue
}
request = &message.Request{
From: n.Pid,
To: pid,
Code: message.UPDATE_RPC_MAP,
Payload: n.RpcMap,
}
err = message.SendMessage(address, "Node.UpdateRpcMap", request, &reply)
if err != nil {
log.Println(err)
}
}
return nil
}
// Create virtual nodes
func (n *Node) createVirtualNodes() error {
config, err := config.GetConfig()
if err != nil {
return err
}
virtualNodesData := make(map[int]string)
virtualLocations := make([]int, 0)
for i := 1; i <= config.VirtualNodesCount; i++ {
virtualNode := strconv.Itoa(n.Pid) + "-" + strconv.Itoa(i)
ulocation, e := util.GetHash(virtualNode)
location := int(ulocation)
if e != nil {
return e
}
virtualNodesData[location] = virtualNode
virtualLocations = append(virtualLocations, location)
}
request := &message.Request{
From: n.Pid,
To: 0,
Code: message.CREATE_VIRTUAL_NODE,
Payload: map[string]interface{}{
"virtualNodeMap": virtualNodesData,
"virtualNodeLocation": virtualLocations,
},
}
var reply message.Reply
err = message.SendMessage(n.RpcMap[0], "LockSmith.CreateVirtualNodes", request, &reply)
if err != nil {
return err
}
payload := reply.Payload.(map[string]interface{})
n.VirtualNodeMap = payload["virtualNodeMap"].(map[int]string)
n.VirtualNodeLocation = payload["virtualNodeLocation"].([]int)
log.Printf("Node %d has created virtual nodes: %+v | %+v\n", n.Pid, n.VirtualNodeLocation, n.VirtualNodeMap)
// Relay updated virtual nodes to others
for pid, address := range n.RpcMap {
if pid == n.Pid || pid == 0 {
continue
}
request = &message.Request{
From: n.Pid,
To: pid,
Code: message.UPDATE_VIRTUAL_NODES,
Payload: map[string]interface{}{
"virtualNodeMap": n.VirtualNodeMap,
"virtualNodeLocation": n.VirtualNodeLocation,
"rpcMap": n.RpcMap,
},
}
err = message.SendMessage(address, "Node.UpdateVirtualNodes", request, &reply)
if err != nil {
log.Println(err)
}
}
return nil
}
// Starts the router -> Graceful shutdown
func (n *Node) startRouter() {
n.Router = n.getRouter()
config, err := config.GetConfig()
if err != nil {
log.Fatal(err)
}
go func() {
err := n.Router.Start(fmt.Sprintf(":%d", config.ConfigServer.Port))
if err != nil {
log.Printf("Node %d REST server closed!\n", n.Pid)
}
}()
<-n.KillSignal // Blocking, until kill signal received
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
err = n.Router.Shutdown(ctx)
if err != nil {
log.Fatal(err)
}
}
// Strict node sends to Even node
func (n *Node) sendEventualRepMsg(rf int, key string, secret secret.Secret, relayNodes []int) error {
config, err := config.GetConfig()
if err != nil {
log.Printf("Node %d is unable to relay secret deletion to next node: %s\n", n.Pid, err)
return err
}
nextNodeLoc := relayNodes[config.ConfigNode.ReplicationFactor-rf]
nextPhysicalNodeID, err := getPhysicalNodeID(n.VirtualNodeMap[nextNodeLoc])
if err != nil {
log.Printf("Node %d is unable to relay secret deletion to next node: %s\n", n.Pid, err)
return err
}
nextNodeAddr := n.RpcMap[nextPhysicalNodeID]
request := &message.Request{
From: n.Pid,
To: nextPhysicalNodeID,
Code: message.EVENTUAL_STORE,
Payload: map[string]interface{}{
"rf": rf,
"key": key,
"secret": secret,
"nodes": relayNodes,
},
}
var reply message.Reply
err = message.SendMessage(nextNodeAddr, "Node.PerformEventualReplication", request, &reply)
if err != nil {
return err
}
return nil
}
func (n *Node) checkHeartbeat(pid int) bool {
return n.HeartBeatTable[pid]
}
// Start Strict Replication
func (n *Node) sendStrictRepMsg(rf int, key string, value secret.Secret, relayNodes []int) error {
config, err := config.GetConfig()
if err != nil {
log.Printf("Node %d is unable to relay strict consistency to next node: %s\n", n.Pid, err)
return err
}
nextNodeLoc := relayNodes[config.ConfigNode.ReplicationFactor-rf]
nextPhysicalNodeID, err := getPhysicalNodeID(n.VirtualNodeMap[nextNodeLoc])
if err != nil {
log.Printf("Node %d is unable to relay strict consistency to next node: %s\n", n.Pid, err)
return err
}
nextNodeAddr := n.RpcMap[nextPhysicalNodeID]
request := &message.Request{
From: n.Pid,
To: nextPhysicalNodeID,
Code: message.STRICT_STORE,
Payload: map[string]interface{}{
"rf": rf,
"key": key,
"secret": value,
"nodes": relayNodes,
},
}
var reply message.Reply
err = message.SendMessage(nextNodeAddr, "Node.StrictReplication", request, &reply)
log.Println("This is the reply from strict", reply)
if err != nil {
log.Printf("Node %d strict consistency error: %s\n", n.Pid, err)
return err
}
replyPayload := reply.Payload.(map[string]interface{})
if replyPayload["success"].(bool) {
return nil
}
return nil
}
|
// Copyright 2020 New Relic Corporation. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package collect
import (
"context"
"github.com/newrelic/nri-vsphere/internal/config"
"github.com/newrelic/nri-vsphere/internal/performance"
"github.com/vmware/govmomi/vim25/mo"
"github.com/vmware/govmomi/vim25/types"
)
// Hosts VMWare
func Hosts(config *config.Config) {
ctx := context.Background()
m := config.ViewManager
// Reference: http://pubs.vmware.com/vsphere-60/topic/com.vmware.wssdk.apiref.doc/vim.HostSystem.html
propertiesToRetrieve := []string{"summary", "overallStatus", "config", "network", "vm", "runtime", "parent", "datastore"}
for i, dc := range config.Datacenters {
logger := config.Logrus.WithField("datacenter", dc.Datacenter.Name)
cv, err := m.CreateContainerView(ctx, dc.Datacenter.Reference(), []string{HOST}, true)
if err != nil {
logger.WithError(err).Error("failed to create HostSystem container view")
continue
}
defer func() {
err := cv.Destroy(ctx)
if err != nil {
logger.WithError(err).Error("error while cleaning up host container view")
}
}()
var hosts []mo.HostSystem
err = cv.Retrieve(ctx, []string{HOST}, propertiesToRetrieve, &hosts)
if err != nil {
logger.WithError(err).Error("failed to retrieve HostSystems")
continue
}
if config.TagCollectionEnabled() {
_, err = config.TagCollector.FetchTagsForObjects(hosts)
if err != nil {
logger.WithError(err).Warn("failed to retrieve tags for hosts", err)
} else {
logger.WithField("seconds", config.Uptime()).Debug("hosts tags collected")
}
}
var hostsRefs []types.ManagedObjectReference
for j, host := range hosts {
config.Datacenters[i].Hosts[host.Self] = &hosts[j]
// filtering here only affects performance metrics collection
if config.TagFilteringEnabled() && !config.TagCollector.MatchObjectTags(host.Reference()) {
continue
}
hostsRefs = append(hostsRefs, host.Self)
}
if config.PerfMetricsCollectionEnabled() {
metricsToCollect := config.PerfCollector.MetricDefinition.Host
collectedData := config.PerfCollector.Collect(hostsRefs, metricsToCollect, performance.RealTimeInterval)
dc.AddPerfMetrics(collectedData)
logger.WithField("seconds", config.Uptime()).Debug("hosts perf metrics collected")
}
}
}
|
package main
import (
context "context"
"flag"
"fmt"
"io"
"log"
"net"
"strconv"
"sync"
pb "github.com/qapquiz/cheirokmeta/examples/platformer/server/platformer"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
type platformerServer struct {
LatestPlayerID int32
PlayersMapWithID map[int32]*pb.PlayerData
Players []*pb.PlayerData
Broadcast chan pb.StreamResponse
PlayerStreams map[int32]chan pb.StreamResponse
playerStreamsMtx sync.RWMutex
}
var (
port = flag.Int("port", 5050, "The server port")
)
func (s *platformerServer) Connect(ctx context.Context, connectRequest *pb.ConnectRequest) (*pb.ConnectResponse, error) {
player := &pb.PlayerData{
Id: s.LatestPlayerID,
Name: connectRequest.GetName(),
Position: &pb.PlayerPosition{
X: 0.0,
Y: 0.0,
},
}
s.Players = append(s.Players, player)
s.PlayersMapWithID[s.LatestPlayerID] = player
s.Broadcast <- pb.StreamResponse{
Event: &pb.StreamResponse_Player{
Player: &pb.PlayerData{
Id: s.LatestPlayerID,
Name: connectRequest.GetName(),
Position: &pb.PlayerPosition{
X: 0.0,
Y: 0.0,
},
},
},
}
s.LatestPlayerID = s.LatestPlayerID + 1
return &pb.ConnectResponse{
Player: player,
IsSuccess: true,
OtherPlayers: s.Players,
}, nil
}
func (s *platformerServer) Stream(streamServer pb.Platformer_StreamServer) error {
playerIDHeader := "player-id"
md, ok := metadata.FromIncomingContext(streamServer.Context())
if !ok || len(md[playerIDHeader]) == 0 {
log.Fatal("Cant get playerID from metadata")
}
playerIDString := md[playerIDHeader][0]
playerID, err := strconv.ParseInt(playerIDString, 10, 32)
if err != nil {
log.Fatal("Cant convert PlayerID from string to int")
}
go s.sendBroadcastsFromServer(streamServer, int32(playerID))
for {
req, err := streamServer.Recv()
if err == io.EOF {
break
} else if err != nil {
return err
}
id := req.GetId()
position := req.GetPosition()
s.Players[id].Position = position
s.PlayersMapWithID[id].Position = position
log.Printf("Receive from id: %v position: %v", req.GetId(), req.GetPosition())
s.Broadcast <- pb.StreamResponse{
Event: &pb.StreamResponse_PlayerPositionById{
PlayerPositionById: &pb.PlayerPositionById{
Id: req.GetId(),
Position: req.GetPosition(),
},
},
}
}
<-streamServer.Context().Done()
return streamServer.Context().Err()
}
func (s *platformerServer) systemBroadcast() {
for res := range s.Broadcast {
s.playerStreamsMtx.Lock()
for _, stream := range s.PlayerStreams {
select {
case stream <- res:
// no operation
default:
log.Printf("Client stream full!, dropping message")
}
}
s.playerStreamsMtx.Unlock()
}
}
func (s *platformerServer) sendBroadcastsFromServer(streamServer pb.Platformer_StreamServer, playerID int32) {
stream := s.openStream(playerID)
defer s.closeStream(playerID)
for {
select {
case <-streamServer.Context().Done():
return
case res := <-stream:
if s, ok := status.FromError(streamServer.Send(&res)); ok {
switch s.Code() {
case codes.OK:
// no operation
case codes.Unavailable, codes.Canceled, codes.DeadlineExceeded:
log.Printf("Player id %d terminated connection", playerID)
default:
log.Printf("Failed to send to Player id: %d", playerID)
}
}
}
}
}
func (s *platformerServer) openStream(playerID int32) (stream chan pb.StreamResponse) {
stream = make(chan pb.StreamResponse, 100)
s.playerStreamsMtx.Lock()
s.PlayerStreams[playerID] = stream
s.playerStreamsMtx.Unlock()
log.Printf("open stream for player id: %d", playerID)
return
}
func (s *platformerServer) closeStream(playerID int32) {
s.playerStreamsMtx.Lock()
if stream, ok := s.PlayerStreams[playerID]; ok {
delete(s.PlayerStreams, playerID)
close(stream)
}
s.playerStreamsMtx.Unlock()
log.Printf("close stream for player id: %d", playerID)
}
func main() {
flag.Parse()
listen, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", *port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
grpcServer := grpc.NewServer()
server := &platformerServer{
LatestPlayerID: 0,
PlayersMapWithID: make(map[int32]*pb.PlayerData),
Players: []*pb.PlayerData{},
Broadcast: make(chan pb.StreamResponse, 1000),
PlayerStreams: make(map[int32]chan pb.StreamResponse),
}
pb.RegisterPlatformerServer(grpcServer, server)
go server.systemBroadcast()
// reflection.Register(grpcServer)
if err := grpcServer.Serve(listen); err != nil {
log.Fatalf("failed to server: %v", err)
}
}
|
package ovs
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/kubeovn/kube-ovn/pkg/ovsdb/ovnnb"
)
func (c *ovnClient) CreateNbGlobal(nbGlobal *ovnnb.NBGlobal) error {
op, err := c.ovnNbClient.Create(nbGlobal)
if err != nil {
return fmt.Errorf("generate operations for creating nb global: %v", err)
}
return c.Transact("nb-global-create", op)
}
func (c *ovnClient) DeleteNbGlobal() error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return err
}
op, err := c.Where(nbGlobal).Delete()
if err != nil {
return err
}
return c.Transact("nb-global-delete", op)
}
func (c *ovnClient) GetNbGlobal() (*ovnnb.NBGlobal, error) {
ctx, cancel := context.WithTimeout(context.Background(), c.Timeout)
defer cancel()
nbGlobalList := make([]ovnnb.NBGlobal, 0, 1)
// there is only one nb_global in OVN_Northbound, so return true and it will work
err := c.WhereCache(func(config *ovnnb.NBGlobal) bool {
return true
}).List(ctx, &nbGlobalList)
if err != nil {
return nil, fmt.Errorf("list nbGlobal: %v", err)
}
if len(nbGlobalList) == 0 {
return nil, fmt.Errorf("not found nb_global")
}
return &nbGlobalList[0], nil
}
func (c *ovnClient) UpdateNbGlobal(nbGlobal *ovnnb.NBGlobal, fields ...interface{}) error {
/* // list nb_global which connections != nil
op, err := c.Where(nbGlobal, model.Condition{
Field: &nbGlobal.Connections,
Function: ovsdb.ConditionNotEqual,
Value: []string{""},
}).Update(nbGlobal) */
op, err := c.Where(nbGlobal).Update(nbGlobal, fields...)
if err != nil {
return fmt.Errorf("generate operations for updating nb global: %v", err)
}
if err := c.Transact("nb-global-update", op); err != nil {
return fmt.Errorf("update nb global: %v", err)
}
return nil
}
func (c *ovnClient) SetAzName(azName string) error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return fmt.Errorf("get nb global: %v", err)
}
if azName == nbGlobal.Name {
return nil // no need to update
}
nbGlobal.Name = azName
if err := c.UpdateNbGlobal(nbGlobal, &nbGlobal.Name); err != nil {
return fmt.Errorf("set nb_global az name %s: %v", azName, err)
}
return nil
}
func (c *ovnClient) SetUseCtInvMatch() error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return fmt.Errorf("get nb global: %v", err)
}
nbGlobal.Options["use_ct_inv_match"] = "false"
if err := c.UpdateNbGlobal(nbGlobal, &nbGlobal.Options); err != nil {
return fmt.Errorf("set use_ct_inv_match to false, %v", err)
}
return nil
}
func (c *ovnClient) SetICAutoRoute(enable bool, blackList []string) error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return fmt.Errorf("get nb global: %v", err)
}
if enable {
nbGlobal.Options = map[string]string{
"ic-route-adv": "true",
"ic-route-learn": "true",
"ic-route-blacklist": strings.Join(blackList, ","),
}
} else {
nbGlobal.Options = map[string]string{
"ic-route-adv": "false",
"ic-route-learn": "false",
}
}
if err := c.UpdateNbGlobal(nbGlobal, &nbGlobal.Options); err != nil {
return fmt.Errorf("enable ovn-ic auto route, %v", err)
}
return nil
}
func (c *ovnClient) SetLBCIDR(serviceCIDR string) error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return fmt.Errorf("get nb global: %v", err)
}
nbGlobal.Options["svc_ipv4_cidr"] = serviceCIDR
if err := c.UpdateNbGlobal(nbGlobal, &nbGlobal.Options); err != nil {
return fmt.Errorf("set svc cidr %s for lb, %v", serviceCIDR, err)
}
return nil
}
func (c *ovnClient) SetLsDnatModDlDst(enabled bool) error {
nbGlobal, err := c.GetNbGlobal()
if err != nil {
return fmt.Errorf("get nb global: %v", err)
}
nbGlobal.Options["ls_dnat_mod_dl_dst"] = strconv.FormatBool(enabled)
if err := c.UpdateNbGlobal(nbGlobal, &nbGlobal.Options); err != nil {
return fmt.Errorf("set NB_Global option ls_dnat_mod_dl_dst to %v: %v", enabled, err)
}
return nil
}
|
// package Handler is a collection of functions that handle requests (endpoints)
package main
import (
"encoding/json"
"fmt"
"net/http"
)
// HandleRoot is the root handler
func HandleRoot(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello, %s", r.URL.Path[1:])
}
// HanldeHome is the home handler
func HandleHome(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "This is home page")
}
// PostRequest is the handler for POST requests
func PostRequest(w http.ResponseWriter, r *http.Request) {
// Get a decoder from the request body
decoder := json.NewDecoder(r.Body)
// Decode the request body into a struct
var metaData MetaData
err := decoder.Decode(&metaData)
if err != nil {
fmt.Fprintf(w, "error: %v", err)
return
}
// Write the response
fmt.Fprintf(w, "Received: %v\n", metaData)
}
// UserPostRequest is the handler for User POST requests
func UserPostRequest(w http.ResponseWriter, r *http.Request) {
decoder := json.NewDecoder(r.Body)
var user User
err := decoder.Decode(&user)
if err != nil {
fmt.Fprintf(w, "error: %v", err)
return
}
resp, err := user.ToJson()
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(resp)
}
|
package main
import (
"bufio"
"fmt"
"log"
"os"
)
func pow3(x int) int {
y, ret := 3, 1
for x > 0 {
if x&1 > 0 {
ret *= y
}
y *= y
x >>= 1
}
return ret
}
func ugly(j int, n []int) bool {
var (
k int
s int64
c = int64(n[0])
cj = j
p = true
)
for k < len(n)-1 {
ops := cj % 3
cj /= 3
if ops == 0 {
c *= 10
} else {
if p {
s += c
if ops == 1 {
p = false
}
} else {
s -= c
if ops == 2 {
p = true
}
}
c = 0
}
k++
c += int64(n[k])
}
if p {
s += c
} else {
s -= c
}
return s%2 == 0 || s%3 == 0 || s%5 == 0 || s%7 == 0
}
func uglyNumbers(q string) (r int) {
t := make([]int, len(q))
for ix, i := range q {
t[ix] = int(i - '0')
}
p := pow3(len(t) - 1)
for j := 0; j < p; j++ {
if ugly(j, t) {
r++
}
}
return r
}
func main() {
data, err := os.Open(os.Args[1])
if err != nil {
log.Fatal(err)
}
defer data.Close()
scanner := bufio.NewScanner(data)
for scanner.Scan() {
fmt.Println(uglyNumbers(scanner.Text()))
}
}
|
package common
import "sync"
type (
Subscriber chan interface{}
subscriberFunc func(v interface{}) bool
)
// Broadcaster sends events to multiple subscribers.
type Broadcaster struct {
subs map[Subscriber]subscriberFunc
m sync.RWMutex
}
func NewBroadcaster() *Broadcaster {
return &Broadcaster{
subs: make(map[Subscriber]subscriberFunc),
}
}
// Register helps init subscriber with specified subscribe function.
func (b *Broadcaster) Register(sf subscriberFunc) Subscriber {
ch := make(Subscriber)
b.m.Lock()
b.subs[ch] = sf
b.m.Unlock()
return ch
}
// Evict specified subscriber.
func (b *Broadcaster) Evict(s Subscriber) {
b.m.Lock()
defer b.m.Unlock()
delete(b.subs, s)
close(s)
}
// Broadcast events to each subscriber.
func (b *Broadcaster) Broadcast(v interface{}) {
b.m.Lock()
defer b.m.Unlock()
var wg sync.WaitGroup
for s, sf := range b.subs {
wg.Add(1)
go b.publish(s, sf, v, &wg)
}
wg.Wait()
}
// publish event message (which is filtered by subscribe function) to subscriber.
func (b *Broadcaster) publish(s Subscriber, sf subscriberFunc, v interface{}, wg *sync.WaitGroup) {
defer wg.Done()
// skip not fit data
if sf != nil && !sf(v) {
return
}
select {
case s <- v:
}
}
// Close all subscribers.
func (b *Broadcaster) Close() {
b.m.Lock()
defer b.m.Unlock()
for s := range b.subs {
delete(b.subs, s)
close(s)
}
}
|
package converters
import (
"k8s.io/apimachinery/pkg/runtime"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/cluster-api-provider-nks/pkg/apis/nks/v1alpha1"
"sigs.k8s.io/yaml"
)
// ClusterConfigFromProviderSpec unmarshals a provider config into an NKS Cluster type
func ClusterConfigFromProviderSpec(providerConfig clusterv1.ProviderSpec) (*v1alpha1.NKSClusterProviderSpec, error) {
var config v1alpha1.NKSClusterProviderSpec
if providerConfig.Value == nil {
return &config, nil
}
if err := yaml.Unmarshal(providerConfig.Value.Raw, &config); err != nil {
return nil, err
}
return &config, nil
}
// ClusterStatusFromProviderStatus unmarshals a raw extension into an NKS Cluster type
func ClusterStatusFromProviderStatus(extension *runtime.RawExtension) (*v1alpha1.NKSClusterProviderStatus, error) {
if extension == nil {
return &v1alpha1.NKSClusterProviderStatus{}, nil
}
status := new(v1alpha1.NKSClusterProviderStatus)
if err := yaml.Unmarshal(extension.Raw, status); err != nil {
return nil, err
}
return status, nil
}
|
// Copyright © 2021 Cisco Systems, Inc. and its affiliates.
// All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rest
import (
"fmt"
"net/http"
"github.com/go-openapi/loads"
"github.com/go-openapi/runtime/middleware"
"github.com/go-openapi/spec"
log "github.com/sirupsen/logrus"
"github.com/apiclarity/apiclarity/api/server/models"
"github.com/apiclarity/apiclarity/api/server/restapi/operations"
"github.com/apiclarity/apiclarity/backend/pkg/database"
)
const defaultTagName = "default-tag"
func (s *Server) GetAPIInventoryAPIIDSpecs(params operations.GetAPIInventoryAPIIDSpecsParams) middleware.Responder {
apiSpecFromDB, err := database.GetAPISpecs(params.APIID)
if err != nil {
// TODO: need to handle errors
// https://github.com/go-gorm/gorm/blob/master/errors.go
log.Errorf("Failed to get api specs from DB. %v", err)
return operations.NewGetAPIInventoryAPIIDSpecsDefault(http.StatusInternalServerError)
}
log.Debugf("Got GetAPIInventoryAPIIDSpecsParams=%+v, Got apiSpecFromDB=%+v", params, apiSpecFromDB)
providedSpec, err := createSpecInfo(apiSpecFromDB.ProvidedSpec)
if err != nil {
log.Errorf("Failed to create spec info from provided spec. %v", err)
return operations.NewGetAPIInventoryAPIIDSpecsDefault(http.StatusInternalServerError)
}
reconstructedSpec, err := createSpecInfo(apiSpecFromDB.ReconstructedSpec)
if err != nil {
log.Errorf("Failed to create spec info from reconstructed spec. %v", err)
return operations.NewGetAPIInventoryAPIIDSpecsDefault(http.StatusInternalServerError)
}
return operations.NewGetAPIInventoryAPIIDSpecsOK().WithPayload(
&models.OpenAPISpecs{
ProvidedSpec: providedSpec,
ReconstructedSpec: reconstructedSpec,
})
}
func createSpecInfo(rawSpec string) (*models.SpecInfo, error) {
if rawSpec == "" {
return nil, nil
}
tags, err := createTagsListFromRawSpec(rawSpec)
if err != nil {
return nil, fmt.Errorf("failed to create tags list from raw spec: %v. %v", rawSpec, err)
}
return &models.SpecInfo{
Tags: tags,
}, nil
}
func createTagsListFromRawSpec(rawSpec string) ([]*models.SpecTag, error) {
var tagList []*models.SpecTag
tagListMap := map[string][]*models.MethodAndPath{}
analyzed, err := loads.Analyzed([]byte(rawSpec), "")
if err != nil {
return nil, fmt.Errorf("failed to analyze spec: %v. %v", rawSpec, err)
}
analyzedSpec := analyzed.Spec()
for path, pathItem := range analyzedSpec.Paths.Paths {
addOperationToTagList(pathItem.Get, models.HTTPMethodGET, path, tagListMap)
addOperationToTagList(pathItem.Put, models.HTTPMethodPUT, path, tagListMap)
addOperationToTagList(pathItem.Post, models.HTTPMethodPOST, path, tagListMap)
addOperationToTagList(pathItem.Patch, models.HTTPMethodPATCH, path, tagListMap)
addOperationToTagList(pathItem.Options, models.HTTPMethodOPTIONS, path, tagListMap)
addOperationToTagList(pathItem.Delete, models.HTTPMethodDELETE, path, tagListMap)
addOperationToTagList(pathItem.Head, models.HTTPMethodHEAD, path, tagListMap)
}
for tag, methodAndPaths := range tagListMap {
tagList = append(tagList, &models.SpecTag{
Description: "", // TODO from review?
MethodAndPathList: methodAndPaths,
Name: tag,
})
}
return tagList, nil
}
func addOperationToTagList(operation *spec.Operation, method models.HTTPMethod, path string, tagList map[string][]*models.MethodAndPath) {
if operation == nil {
return
}
if len(operation.Tags) == 0 {
tagList[defaultTagName] = append(tagList[defaultTagName], &models.MethodAndPath{
Method: method,
Path: path,
})
return
}
for _, tag := range operation.Tags {
tagList[tag] = append(tagList[tag], &models.MethodAndPath{
Method: method,
Path: path,
})
}
}
|
package lib
type Buckets struct {
}
func (slf Buckets) get(name string) {
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"context"
"fmt"
"regexp"
"strings"
"time"
"github.com/golang/protobuf/ptypes/empty"
"chromiumos/tast/common/servo"
"chromiumos/tast/ctxutil"
"chromiumos/tast/remote/firmware/fixture"
"chromiumos/tast/remote/firmware/reporters"
"chromiumos/tast/rpc"
"chromiumos/tast/services/cros/ui"
"chromiumos/tast/ssh"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
type s3StabilityTestParams struct {
tabletMode bool
val int
}
func init() {
testing.AddTest(&testing.Test{
Func: S3SuspendResume,
Desc: "Verifies DUT S3 entry and exit with suspend-resume",
Contacts: []string{"pathan.jilani@intel.com", "intel-chrome-system-automation-team@intel.com"},
SoftwareDeps: []string{"chrome"},
ServiceDeps: []string{"tast.cros.ui.ScreenLockService"},
Vars: []string{"servo"},
// TODO(b/199674322): Add back to firmware_unstable once this test actually works.
Attr: []string{},
HardwareDeps: hwdep.D(hwdep.ChromeEC()),
Params: []testing.Param{{
Name: "stability_test_clamshell_mode",
Fixture: fixture.NormalMode,
Val: s3StabilityTestParams{
tabletMode: false,
val: 10,
},
Timeout: 10 * time.Minute,
}, {
Name: "stability_test_tablet_mode",
Fixture: fixture.NormalMode,
Val: s3StabilityTestParams{
tabletMode: true,
val: 10,
},
Timeout: 10 * time.Minute,
}, {
Name: "entry_exit_clamshell_mode",
Fixture: fixture.NormalMode,
Val: s3StabilityTestParams{
tabletMode: false,
val: 1,
},
}, {
Name: "entry_exit_tablet_mode",
Fixture: fixture.NormalMode,
Val: s3StabilityTestParams{
tabletMode: true,
val: 1,
},
}, {
Name: "stress_test",
Fixture: fixture.NormalMode,
Val: s3StabilityTestParams{
tabletMode: false,
val: 100,
},
Timeout: 28 * time.Minute,
}},
})
}
func S3SuspendResume(ctx context.Context, s *testing.State) {
h := s.FixtValue().(*fixture.Value).Helper
dut := s.DUT()
testOpt := s.Param().(s3StabilityTestParams)
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 3*time.Minute)
defer cancel()
var (
WakeUpFromS3 = regexp.MustCompile("Waking up from system sleep state S3")
requiredEventSets = [][]string{[]string{`Sleep`, `^Wake`},
[]string{`ACPI Enter \| S3`, `ACPI Wake \| S3`},
}
PrematureWake = regexp.MustCompile("Premature wakes: 0")
SuspndFailure = regexp.MustCompile("Suspend failures: 0")
FrmwreLogError = regexp.MustCompile("Firmware log errors: 0")
)
const (
suspendToIdle = "0"
ClrDemsgCmd = "dmesg -C"
S3DmesgCmd = "dmesg | grep S3"
PowerdConfigCmd = "check_powerd_config --suspend_to_idle; echo $?"
)
// Get the initial tablet_mode_angle settings to restore at the end of test.
re := regexp.MustCompile(`tablet_mode_angle=(\d+) hys=(\d+)`)
out, err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle").Output()
if err != nil {
s.Fatal("Failed to retrieve tablet_mode_angle settings: ", err)
}
m := re.FindSubmatch(out)
if len(m) != 3 {
s.Fatalf("Failed to get initial tablet_mode_angle settings: got submatches %+v", m)
}
initLidAngle := m[1]
initHys := m[2]
// Set tabletModeAngle to 0 to force the DUT into tablet mode.
if testOpt.tabletMode {
testing.ContextLog(ctx, "Put DUT into tablet mode")
if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", "0", "0").Run(); err != nil {
s.Fatal("Failed to set DUT into tablet mode: ", err)
}
}
cl, err := rpc.Dial(ctx, s.DUT(), s.RPCHint())
if err != nil {
s.Fatal("Failed to connect to the RPC service on the DUT: ", err)
}
defer cl.Close(cleanupCtx)
screenLockService := ui.NewScreenLockServiceClient(cl.Conn)
if _, err := screenLockService.NewChrome(ctx, &empty.Empty{}); err != nil {
s.Fatal("Failed to login chrome: ", err)
}
defer screenLockService.CloseChrome(ctx, &empty.Empty{})
r := h.Reporter
var cutoffEvent reporters.Event
oldEvents, err := r.EventlogList(ctx)
if err != nil {
s.Fatal("Failed finding last event: ", err)
}
if len(oldEvents) > 0 {
cutoffEvent = oldEvents[len(oldEvents)-1]
}
if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c", fmt.Sprintf(
"mkdir -p /tmp/power_manager && "+
"echo %q > /tmp/power_manager/suspend_to_idle && "+
"mount --bind /tmp/power_manager /var/lib/power_manager && "+
"restart powerd", suspendToIdle),
).Run(ssh.DumpLogOnError); err != nil {
s.Fatal("Failed to set suspend to idle: ", err)
}
defer func(ctx context.Context) {
s.Log("Performing cleanup")
if !dut.Connected(ctx) {
waitCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
if err := h.Servo.KeypressWithDuration(ctx, servo.PowerKey, servo.DurPress); err != nil {
s.Fatal("Failed to power normal press: ", err)
}
if err := dut.WaitConnect(waitCtx); err != nil {
s.Fatal("Failed to wait connect DUT: ", err)
}
}
if err := h.DUT.Conn().CommandContext(ctx, "sh", "-c",
"umount /var/lib/power_manager && restart powerd",
).Run(ssh.DumpLogOnError); err != nil {
s.Log("Failed to restore powerd settings: ", err)
}
if err := dut.Conn().CommandContext(ctx, "ectool", "motionsense", "tablet_mode_angle", string(initLidAngle), string(initHys)).Run(); err != nil {
s.Fatal("Failed to restore tablet_mode_angle to the original settings: ", err)
}
}(cleanupCtx)
if err := h.DUT.Conn().CommandContext(ctx, "bash", "-c", ClrDemsgCmd).Run(); err != nil {
s.Fatalf("Failed to execute %q command: %v", ClrDemsgCmd, err)
}
configValue, err := h.DUT.Conn().CommandContext(ctx, "bash", "-c", PowerdConfigCmd).Output()
if err != nil {
s.Fatalf("Failed to execute %q command: %v", PowerdConfigCmd, err)
}
actualValue := strings.TrimSpace(string(configValue))
expectedValue := "1"
if actualValue != expectedValue {
s.Fatalf("Failed to be in S3 state. PowerdConfig want %q; got %q", expectedValue, actualValue)
}
// expected time sleep 8 seconds to ensure DUT switch to S3.
// otherwise premature wake, suspend failure errors are expected.
if err := testing.Sleep(ctx, 8*time.Second); err != nil {
s.Fatal("Failed to sleep: ", err)
}
val := fmt.Sprintf("%d", testOpt.val)
testing.ContextLog(ctx, "Executing suspend_stress_test")
stressOut, err := h.DUT.Conn().CommandContext(ctx, "suspend_stress_test", "-c", val).Output()
if err != nil {
s.Fatal("Failed to execute suspend_stress_test command: ", err)
}
var errorCodes []*regexp.Regexp
errorCodes = []*regexp.Regexp{PrematureWake, SuspndFailure, FrmwreLogError}
for _, errMsg := range errorCodes {
if !(errMsg).MatchString(string(stressOut)) {
s.Fatalf("Failed for failures; expected %q but got non-zero %s", errMsg, string(stressOut))
}
}
dmesgOut, err := h.DUT.Conn().CommandContext(ctx, "bash", "-c", S3DmesgCmd).Output()
if err != nil {
s.Fatalf("Failed to execute %q command: %v", S3DmesgCmd, err)
}
if !WakeUpFromS3.MatchString(string(dmesgOut)) {
s.Fatalf("Failed to find %q pattern in dmesg log", WakeUpFromS3)
}
events, err := r.EventlogListAfter(ctx, cutoffEvent)
if err != nil {
s.Fatal("Failed gathering events: ", err)
}
requiredEventsFound := false
for _, requiredEventSet := range requiredEventSets {
foundAllRequiredEventsInSet := true
for _, requiredEvent := range requiredEventSet {
reRequiredEvent := regexp.MustCompile(requiredEvent)
if !eventMessagesContainMatch(ctx, events, reRequiredEvent) {
foundAllRequiredEventsInSet = false
break
}
}
if foundAllRequiredEventsInSet {
requiredEventsFound = true
break
}
}
if !requiredEventsFound {
s.Fatal("Failed as required event missing")
}
}
// eventMessagesContainMatch verifies whether mosys event log contains matching eventlog.
func eventMessagesContainMatch(ctx context.Context, events []reporters.Event, re *regexp.Regexp) bool {
for _, event := range events {
if re.MatchString(event.Message) {
return true
}
}
return false
}
|
package network
import (
"fmt"
xctx "github.com/xuperchain/xupercore/kernel/common/xcontext"
"testing"
"github.com/xuperchain/xupercore/kernel/mock"
nctx "github.com/xuperchain/xupercore/kernel/network/context"
"github.com/xuperchain/xupercore/kernel/network/p2p"
pb "github.com/xuperchain/xupercore/protos"
)
type MockP2PServ struct {
ctx *nctx.NetCtx
}
func NewMockP2PServ() p2p.Server {
return &MockP2PServ{}
}
func (t *MockP2PServ) Init(ctx *nctx.NetCtx) error {
t.ctx = ctx
return nil
}
func (t *MockP2PServ) Start() {
}
func (t *MockP2PServ) Stop() {
}
func (t *MockP2PServ) NewSubscriber(pb.XuperMessage_MessageType,
interface{}, ...p2p.SubscriberOption) p2p.Subscriber {
return nil
}
func (t *MockP2PServ) Register(p2p.Subscriber) error {
return fmt.Errorf("mock interface")
}
func (t *MockP2PServ) UnRegister(p2p.Subscriber) error {
return fmt.Errorf("mock interface")
}
func (t *MockP2PServ) SendMessage(xctx.XContext, *pb.XuperMessage, ...p2p.OptionFunc) error {
return fmt.Errorf("mock interface")
}
func (t *MockP2PServ) SendMessageWithResponse(xctx.XContext,
*pb.XuperMessage, ...p2p.OptionFunc) ([]*pb.XuperMessage, error) {
return nil, fmt.Errorf("mock interface")
}
func (t *MockP2PServ) Context() *nctx.NetCtx {
return t.ctx
}
func (t *MockP2PServ) PeerInfo() pb.PeerInfo {
return pb.PeerInfo{}
}
func TestNewNetwork(t *testing.T) {
mock.InitLogForTest()
Register("p2pv2", NewMockP2PServ)
ecfg, _ := mock.NewEnvConfForTest()
netCtx, _ := nctx.NewNetCtx(ecfg)
n, err := NewNetwork(netCtx)
if err != nil {
t.Fatal(err)
}
fmt.Println(n)
}
|
package informer
import (
"WarpCloud/walm/pkg/k8s/converter"
"WarpCloud/walm/pkg/k8s/utils"
errorModel "WarpCloud/walm/pkg/models/error"
"WarpCloud/walm/pkg/models/k8s"
"WarpCloud/walm/pkg/models/release"
"errors"
tosv1beta1 "github.com/migration/pkg/apis/tos/v1beta1"
migrationclientset "github.com/migration/pkg/client/clientset/versioned"
migrationexternalversions "github.com/migration/pkg/client/informers/externalversions"
migrationv1beta1 "github.com/migration/pkg/client/listers/tos/v1beta1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
appsv1 "k8s.io/client-go/listers/apps/v1"
"sort"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/listers/apps/v1beta1"
batchv1 "k8s.io/client-go/listers/batch/v1"
"k8s.io/client-go/listers/core/v1"
listv1beta1 "k8s.io/client-go/listers/extensions/v1beta1"
storagev1 "k8s.io/client-go/listers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
"sync"
"time"
instanceclientset "transwarp/application-instance/pkg/client/clientset/versioned"
instanceexternalversions "transwarp/application-instance/pkg/client/informers/externalversions"
instancev1beta1 "transwarp/application-instance/pkg/client/listers/transwarp/v1beta1"
releaseconfigclientset "transwarp/release-config/pkg/client/clientset/versioned"
releaseconfigexternalversions "transwarp/release-config/pkg/client/informers/externalversions"
releaseconfigv1beta1 "transwarp/release-config/pkg/client/listers/transwarp/v1beta1"
k8sutils "WarpCloud/walm/pkg/k8s/utils"
"fmt"
beta1 "transwarp/application-instance/pkg/apis/transwarp/v1beta1"
isomatesetclientset "transwarp/isomateset-client/pkg/client/clientset/versioned"
isomatesetexternalversions "transwarp/isomateset-client/pkg/client/informers/externalversions"
isomatesetv1beta1 "transwarp/isomateset-client/pkg/client/listers/apiextensions.transwarp.io/v1alpha1"
monitorclientset "transwarp/monitor-crd-informer/pkg/client/versioned"
monitorexternalversions "transwarp/monitor-crd-informer/pkg/client/informers/externalversions"
monitorv1 "transwarp/monitor-crd-informer/pkg/client/listers/monitoring/v1"
)
type Informer struct {
client *kubernetes.Clientset
factory informers.SharedInformerFactory
deploymentLister listv1beta1.DeploymentLister
configMapLister v1.ConfigMapLister
daemonSetLister listv1beta1.DaemonSetLister
ingressLister listv1beta1.IngressLister
jobLister batchv1.JobLister
podLister v1.PodLister
secretLister v1.SecretLister
serviceLister v1.ServiceLister
statefulSetLister v1beta1.StatefulSetLister
nodeLister v1.NodeLister
namespaceLister v1.NamespaceLister
resourceQuotaLister v1.ResourceQuotaLister
persistentVolumeClaimLister v1.PersistentVolumeClaimLister
storageClassLister storagev1.StorageClassLister
endpointsLister v1.EndpointsLister
limitRangeLister v1.LimitRangeLister
replicaSetLister appsv1.ReplicaSetLister
releaseConifgFactory releaseconfigexternalversions.SharedInformerFactory
releaseConfigLister releaseconfigv1beta1.ReleaseConfigLister
instanceFactory instanceexternalversions.SharedInformerFactory
instanceLister instancev1beta1.ApplicationInstanceLister
migrationFactory migrationexternalversions.SharedInformerFactory
migrationLister migrationv1beta1.MigLister
isomateSetFactory isomatesetexternalversions.SharedInformerFactory
isomateSetLister isomatesetv1beta1.IsomateSetLister
monitorFactory monitorexternalversions.SharedInformerFactory
monitorLister monitorv1.ServiceMonitorLister
}
func (informer *Informer) ListServices(namespace string, labelSelectorStr string) ([]*k8s.Service, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.serviceLister.Services(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list services in namespace %s : %s", namespace, err.Error())
return nil, err
}
services := []*k8s.Service{}
for _, resource := range resources {
endpoints, err := informer.getEndpoints(namespace, resource.Name)
if err != nil && !errorModel.IsNotFoundError(err) {
return nil, err
}
service, err := converter.ConvertServiceFromK8s(resource, endpoints)
if err != nil {
klog.Errorf("failed to convert service %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
services = append(services, service)
}
return services, nil
}
func (informer *Informer) ListStorageClasses(namespace string, labelSelectorStr string) ([]*k8s.StorageClass, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.storageClassLister.List(selector)
if err != nil {
klog.Errorf("failed to list storage classes in namespace %s : %s", namespace, err.Error())
return nil, err
}
storageClasses := []*k8s.StorageClass{}
for _, resource := range resources {
storageClass, err := converter.ConvertStorageClassFromK8s(resource)
if err != nil {
klog.Errorf("failed to convert storageClass %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
storageClasses = append(storageClasses, storageClass)
}
return storageClasses, nil
}
func (informer *Informer) GetPodLogs(namespace string, podName string, containerName string, tailLines int64) (string, error) {
podLogOptions := &corev1.PodLogOptions{}
if containerName != "" {
podLogOptions.Container = containerName
}
if tailLines != 0 {
podLogOptions.TailLines = &tailLines
}
logs, err := informer.client.CoreV1().Pods(namespace).GetLogs(podName, podLogOptions).Do().Raw()
if err != nil {
klog.Errorf("failed to get pod logs : %s", err.Error())
return "", err
}
return string(logs), nil
}
func (informer *Informer) GetPodEventList(namespace string, name string) (*k8s.EventList, error) {
pod, err := informer.podLister.Pods(namespace).Get(name)
if err != nil {
klog.Errorf("failed to get pod : %s", err.Error())
return nil, err
}
ref := &corev1.ObjectReference{
Namespace: pod.Namespace,
Name: pod.Name,
Kind: pod.Kind,
ResourceVersion: pod.ResourceVersion,
UID: pod.UID,
APIVersion: pod.APIVersion,
}
podEvents, err := informer.searchEvents(pod.Namespace, ref)
if err != nil {
klog.Errorf("failed to get Events : %s", err.Error())
return nil, err
}
events, err := converter.ConvertEventListFromK8s(podEvents.Items)
if err != nil {
klog.Errorf("failed to convert walm events : %s", err.Error())
return nil, err
}
return &k8s.EventList{Events: events}, nil
}
func (informer *Informer) GetDeploymentEventList(namespace string, name string) (*k8s.EventList, error) {
deployment, err := informer.deploymentLister.Deployments(namespace).Get(name)
if err != nil {
klog.Errorf("failed to get deployment : %s", err.Error())
return nil, err
}
ref := &corev1.ObjectReference{
Kind: deployment.Kind,
Namespace: deployment.Namespace,
Name: deployment.Name,
UID: deployment.UID,
APIVersion: deployment.APIVersion,
ResourceVersion: deployment.ResourceVersion,
}
deploymentEvents, err := informer.searchEvents(deployment.Namespace, ref)
if err != nil {
klog.Errorf("failed to get Events : %s", err.Error())
return nil, err
}
sort.Sort(utils.SortableEvents(deploymentEvents.Items))
walmEvents := []k8s.Event{}
for _, event := range deploymentEvents.Items {
walmEvent := k8s.Event{
Type: event.Type,
Reason: event.Reason,
Message: event.Message,
Count: event.Count,
FirstTimestamp: event.FirstTimestamp.String(),
LastTimestamp: event.LastTimestamp.String(),
From: utils.FormatEventSource(event.Source),
}
walmEvents = append(walmEvents, walmEvent)
}
return &k8s.EventList{Events: walmEvents}, nil
}
func (informer *Informer) GetStatefulSetEventList(namespace string, name string) (*k8s.EventList, error) {
statefulSet, err := informer.statefulSetLister.StatefulSets(namespace).Get(name)
if err != nil {
klog.Errorf("failed to get statefulSet : %s", err.Error())
return nil, err
}
ref := &corev1.ObjectReference{
Kind: statefulSet.Kind,
Namespace: statefulSet.Namespace,
Name: statefulSet.Name,
UID: statefulSet.UID,
APIVersion: statefulSet.APIVersion,
ResourceVersion: statefulSet.ResourceVersion,
}
statefulSetEvents, err := informer.searchEvents(statefulSet.Namespace, ref)
if err != nil {
klog.Errorf("failed to get Events : %s", err.Error())
return nil, err
}
sort.Sort(utils.SortableEvents(statefulSetEvents.Items))
walmEvents := []k8s.Event{}
for _, event := range statefulSetEvents.Items {
walmEvent := k8s.Event{
Type: event.Type,
Reason: event.Reason,
Message: event.Message,
Count: event.Count,
FirstTimestamp: event.FirstTimestamp.String(),
LastTimestamp: event.LastTimestamp.String(),
From: utils.FormatEventSource(event.Source),
}
walmEvents = append(walmEvents, walmEvent)
}
return &k8s.EventList{Events: walmEvents}, nil
}
func (informer *Informer) GetReleaseEventList(resourceSet *k8s.ResourceSet) ([]k8s.Event, error) {
if resourceSet == nil {
return nil, nil
}
events := []corev1.Event{}
for _, deployment := range resourceSet.Deployments {
resource, err := informer.deploymentLister.Deployments(deployment.Namespace).Get(deployment.Name)
if err != nil {
klog.Errorf("failed to get deployment : %s", err.Error())
return nil, err
}
eventList, err := informer.getResourceEvents(resource.ObjectMeta, resource.TypeMeta)
if err != nil {
klog.Errorf("failed to get resource events : %s", err.Error())
return nil, err
}
events = append(events, eventList.Items...)
}
for _, statefulSet := range resourceSet.StatefulSets {
resource, err := informer.statefulSetLister.StatefulSets(statefulSet.Namespace).Get(statefulSet.Name)
if err != nil {
klog.Errorf("failed to get stateful set : %s", err.Error())
return nil, err
}
eventList, err := informer.getResourceEvents(resource.ObjectMeta, resource.TypeMeta)
if err != nil {
klog.Errorf("failed to get resource events : %s", err.Error())
return nil, err
}
events = append(events, eventList.Items...)
}
if informer.isomateSetLister != nil {
for _, isomateSet := range resourceSet.IsomateSets {
resource, err := informer.isomateSetLister.IsomateSets(isomateSet.Namespace).Get(isomateSet.Name)
if err != nil {
klog.Errorf("failed to get isomate set : %s", err.Error())
return nil, err
}
eventList, err := informer.getResourceEvents(resource.ObjectMeta, resource.TypeMeta)
if err != nil {
klog.Errorf("failed to get resource events : %s", err.Error())
return nil, err
}
events = append(events, eventList.Items...)
}
}
return converter.ConvertEventListFromK8s(events)
}
func (informer *Informer)getResourceEvents(meta metav1.ObjectMeta, typeMeta metav1.TypeMeta) (*corev1.EventList, error) {
ref := &corev1.ObjectReference{
Kind: typeMeta.Kind,
Namespace: meta.Namespace,
Name: meta.Name,
UID: meta.UID,
APIVersion: typeMeta.APIVersion,
ResourceVersion: meta.ResourceVersion,
}
return informer.searchEvents(meta.Namespace, ref)
}
func (informer *Informer) ListSecrets(namespace string, labelSelectorStr string) (*k8s.SecretList, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.secretLister.Secrets(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list secrets in namespace %s : %s", namespace, err.Error())
return nil, err
}
secrets := []*k8s.Secret{}
for _, resource := range resources {
secret, err := converter.ConvertSecretFromK8s(resource)
if err != nil {
klog.Errorf("failed to convert secret %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
secrets = append(secrets, secret)
}
return &k8s.SecretList{
Num: len(secrets),
Items: secrets,
}, nil
}
func (informer *Informer) ListStatefulSets(namespace string, labelSelectorStr string) ([]*k8s.StatefulSet, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.statefulSetLister.StatefulSets(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list stateful sets in namespace %s : %s", namespace, err.Error())
return nil, err
}
statefulSets := []*k8s.StatefulSet{}
for _, resource := range resources {
pods, err := informer.listPods(namespace, resource.Spec.Selector, false)
if err != nil {
return nil, err
}
statefulSet, err := converter.ConvertStatefulSetFromK8s(resource, pods)
if err != nil {
klog.Errorf("failed to convert stateful set %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
statefulSets = append(statefulSets, statefulSet)
}
return statefulSets, nil
}
func (informer *Informer) GetNodes(labelSelectorStr string) ([]*k8s.Node, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
nodeList, err := informer.nodeLister.List(selector)
if err != nil {
return nil, err
}
walmNodes := []*k8s.Node{}
if nodeList != nil {
mux := &sync.Mutex{}
var wg sync.WaitGroup
for _, node := range nodeList {
wg.Add(1)
go func(node *corev1.Node) {
defer wg.Done()
podsOnNode, err1 := informer.getNonTermiatedPodsOnNode(node.Name, nil)
if err1 != nil {
klog.Errorf("failed to get pods on node: %s", err1.Error())
err = errors.New(err1.Error())
return
}
walmNode, err1 := converter.ConvertNodeFromK8s(node, podsOnNode)
if err1 != nil {
klog.Errorf("failed to build walm node : %s", err1.Error())
err = errors.New(err1.Error())
return
}
mux.Lock()
walmNodes = append(walmNodes, walmNode)
mux.Unlock()
}(node)
}
wg.Wait()
if err != nil {
klog.Errorf("failed to build nodes : %s", err.Error())
return nil, err
}
}
return walmNodes, nil
}
func (informer *Informer) AddReleaseConfigHandler(OnAdd func(obj interface{}), OnUpdate func(oldObj, newObj interface{}), OnDelete func(obj interface{})) {
handlerFuncs := &cache.ResourceEventHandlerFuncs{
AddFunc: OnAdd,
UpdateFunc: OnUpdate,
DeleteFunc: OnDelete,
}
informer.releaseConifgFactory.Transwarp().V1beta1().ReleaseConfigs().Informer().AddEventHandler(handlerFuncs)
}
func (informer *Informer) AddServiceHandler(OnAdd func(obj interface{}), OnUpdate func(oldObj, newObj interface{}), OnDelete func(obj interface{})) {
handlerFuncs := &cache.ResourceEventHandlerFuncs{
AddFunc: OnAdd,
UpdateFunc: OnUpdate,
DeleteFunc: OnDelete,
}
informer.factory.Core().V1().Services().Informer().AddEventHandler(handlerFuncs)
}
func (informer *Informer) AddMigrationHandler(OnAdd func(obj interface{}), OnUpdate func(oldObj, newObj interface{}), OnDelete func(obj interface{})) {
}
func (informer *Informer) ListPersistentVolumeClaims(namespace string, labelSelectorStr string) ([]*k8s.PersistentVolumeClaim, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.persistentVolumeClaimLister.PersistentVolumeClaims(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list pvcs in namespace %s : %s", namespace, err.Error())
return nil, err
}
pvcs := []*k8s.PersistentVolumeClaim{}
for _, resource := range resources {
pvc, err := converter.ConvertPvcFromK8s(resource)
if err != nil {
klog.Errorf("failed to convert release config %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
pvcs = append(pvcs, pvc)
}
return pvcs, nil
}
func (informer *Informer) ListReleaseConfigs(namespace, labelSelectorStr string) ([]*k8s.ReleaseConfig, error) {
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
resources, err := informer.releaseConfigLister.ReleaseConfigs(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list release configs in namespace %s : %s", namespace, err.Error())
return nil, err
}
releaseConfigs := []*k8s.ReleaseConfig{}
for _, resource := range resources {
releaseConfig, err := converter.ConvertReleaseConfigFromK8s(resource)
if err != nil {
klog.Errorf("failed to convert release config %s/%s: %s", resource.Namespace, resource.Name, err.Error())
return nil, err
}
releaseConfigs = append(releaseConfigs, releaseConfig)
}
return releaseConfigs, nil
}
func (informer *Informer) GetNodeMigration(namespace, node string) (*k8s.MigStatus, error) {
var migs []*k8s.Mig
selector, err := utils.ConvertLabelSelectorToSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"migType": "node", "srcNode": node},
})
if err != nil {
klog.Errorf("failed to convert label selector to selector: %s", err.Error())
return nil, err
}
k8sMigs, err := informer.migrationLister.Migs(namespace).List(selector)
if err != nil {
klog.Errorf("failed to list pod migs of node: %s", err.Error())
return nil, err
}
count := 0
for _, k8sMig := range k8sMigs {
if k8sMig.Status.Phase == tosv1beta1.MIG_FINISH {
count++
}
mig, err := converter.ConvertMigFromK8s(k8sMig)
if err != nil {
klog.Errorf("failed to convert mig from k8s mig: %s", err.Error())
return nil, err
}
migs = append(migs, mig)
}
return &k8s.MigStatus{
Succeed: count,
Total: len(k8sMigs),
Items: migs,
}, nil
}
func (informer *Informer) ListMigrations(namespace, labelSelectorStr string) ([]*k8s.Mig, error) {
var k8sMigs []*tosv1beta1.Mig
selector, err := labels.Parse(labelSelectorStr)
if err != nil {
klog.Errorf("failed to parse label string %s : %s", labelSelectorStr, err.Error())
return nil, err
}
k8sMigs, err = informer.migrationLister.Migs(namespace).List(selector)
var migs []*k8s.Mig
for _, k8sMig := range k8sMigs {
mig, err := converter.ConvertMigFromK8s(k8sMig)
if err != nil {
klog.Errorf("failed to convert mig from k8s: %s", err.Error())
return nil, err
}
migs = append(migs, mig)
}
return migs, nil
}
func (informer *Informer) GetResourceSet(releaseResourceMetas []release.ReleaseResourceMeta) (resourceSet *k8s.ResourceSet, err error) {
resourceSet = k8s.NewResourceSet()
for _, resourceMeta := range releaseResourceMetas {
resource, err := informer.GetResource(resourceMeta.Kind, resourceMeta.Namespace, resourceMeta.Name)
// if resource is not found , do not return error, add it into resource set, so resource should not be nil
if err != nil && !errorModel.IsNotFoundError(err) {
return nil, err
}
resource.AddToResourceSet(resourceSet)
}
return
}
func (informer *Informer) GetResource(kind k8s.ResourceKind, namespace, name string) (k8s.Resource, error) {
switch kind {
case k8s.ReleaseConfigKind:
return informer.getReleaseConfig(namespace, name)
case k8s.ConfigMapKind:
return informer.getConfigMap(namespace, name)
case k8s.PersistentVolumeClaimKind:
return informer.getPvc(namespace, name)
case k8s.DaemonSetKind:
return informer.getDaemonSet(namespace, name)
case k8s.DeploymentKind:
return informer.getDeployment(namespace, name)
case k8s.ServiceKind:
return informer.getService(namespace, name)
case k8s.StatefulSetKind:
return informer.getStatefulSet(namespace, name)
case k8s.JobKind:
return informer.getJob(namespace, name)
case k8s.IngressKind:
return informer.getIngress(namespace, name)
case k8s.SecretKind:
return informer.getSecret(namespace, name)
case k8s.NodeKind:
return informer.getNode(namespace, name)
case k8s.StorageClassKind:
return informer.getStorageClass(namespace, name)
case k8s.InstanceKind:
return informer.getInstance(namespace, name)
case k8s.ReplicaSetKind:
return informer.getReplicaSet(namespace, name)
case k8s.MigKind:
return informer.getMigration(namespace, name)
case k8s.IsomateSetKind:
return informer.getIsomateSet(namespace, name)
default:
return &k8s.DefaultResource{Meta: k8s.NewMeta(kind, namespace, name, k8s.NewState("Unknown", "NotSupportedKind", "Can not get this resource"))}, nil
}
}
func (informer *Informer) start(stopCh <-chan struct{}) {
informer.factory.Start(stopCh)
informer.releaseConifgFactory.Start(stopCh)
if informer.instanceFactory != nil {
informer.instanceFactory.Start(stopCh)
}
if informer.migrationFactory != nil {
informer.migrationFactory.Start(stopCh)
}
if informer.isomateSetFactory != nil {
informer.isomateSetFactory.Start(stopCh)
}
}
func (informer *Informer) waitForCacheSync(stopCh <-chan struct{}) {
informer.factory.WaitForCacheSync(stopCh)
informer.releaseConifgFactory.WaitForCacheSync(stopCh)
if informer.instanceFactory != nil {
informer.instanceFactory.WaitForCacheSync(stopCh)
}
if informer.migrationFactory != nil {
informer.migrationFactory.WaitForCacheSync(stopCh)
}
if informer.isomateSetFactory != nil {
informer.isomateSetFactory.WaitForCacheSync(stopCh)
}
}
func (informer *Informer) searchEvents(namespace string, objOrRef runtime.Object) (*corev1.EventList, error) {
return informer.client.CoreV1().Events(namespace).Search(runtime.NewScheme(), objOrRef)
}
func (informer *Informer) getDependencyMetaByInstance(instance *beta1.ApplicationInstance) (*k8s.DependencyMeta, error) {
dummyServiceSelectorStr := fmt.Sprintf("transwarp.meta=true,transwarp.install=%s", instance.Spec.InstanceId)
dummyServices, err := informer.ListServices(instance.Namespace, dummyServiceSelectorStr)
if err != nil {
klog.Errorf("failed to list dummy services : %s", err.Error())
return nil, err
}
if len(dummyServices) == 0 {
return nil, nil
}
svc := dummyServices[0]
metaString, found := svc.Annotations["transwarp.meta"]
if !found {
return nil, nil
}
return k8sutils.GetDependencyMetaFromDummyServiceMetaStr(metaString)
}
func NewInformer(
client *kubernetes.Clientset,
releaseConfigClient *releaseconfigclientset.Clientset,
instanceClient *instanceclientset.Clientset,
migrationClient *migrationclientset.Clientset,
isomateSetClient *isomatesetclientset.Clientset,
monitorClient *monitorclientset.Clientset,
resyncPeriod time.Duration, stopCh <-chan struct{},
) (*Informer) {
informer := &Informer{}
informer.client = client
informer.factory = informers.NewSharedInformerFactory(client, resyncPeriod)
informer.deploymentLister = informer.factory.Extensions().V1beta1().Deployments().Lister()
informer.configMapLister = informer.factory.Core().V1().ConfigMaps().Lister()
informer.daemonSetLister = informer.factory.Extensions().V1beta1().DaemonSets().Lister()
informer.ingressLister = informer.factory.Extensions().V1beta1().Ingresses().Lister()
informer.jobLister = informer.factory.Batch().V1().Jobs().Lister()
informer.podLister = informer.factory.Core().V1().Pods().Lister()
informer.secretLister = informer.factory.Core().V1().Secrets().Lister()
informer.serviceLister = informer.factory.Core().V1().Services().Lister()
informer.statefulSetLister = informer.factory.Apps().V1beta1().StatefulSets().Lister()
informer.nodeLister = informer.factory.Core().V1().Nodes().Lister()
informer.namespaceLister = informer.factory.Core().V1().Namespaces().Lister()
informer.resourceQuotaLister = informer.factory.Core().V1().ResourceQuotas().Lister()
informer.persistentVolumeClaimLister = informer.factory.Core().V1().PersistentVolumeClaims().Lister()
informer.storageClassLister = informer.factory.Storage().V1().StorageClasses().Lister()
informer.endpointsLister = informer.factory.Core().V1().Endpoints().Lister()
informer.limitRangeLister = informer.factory.Core().V1().LimitRanges().Lister()
informer.replicaSetLister = informer.factory.Apps().V1().ReplicaSets().Lister()
informer.releaseConifgFactory = releaseconfigexternalversions.NewSharedInformerFactory(releaseConfigClient, resyncPeriod)
informer.releaseConfigLister = informer.releaseConifgFactory.Transwarp().V1beta1().ReleaseConfigs().Lister()
if instanceClient != nil {
informer.instanceFactory = instanceexternalversions.NewSharedInformerFactory(instanceClient, resyncPeriod)
informer.instanceLister = informer.instanceFactory.Transwarp().V1beta1().ApplicationInstances().Lister()
}
if migrationClient != nil {
informer.migrationFactory = migrationexternalversions.NewSharedInformerFactory(migrationClient, resyncPeriod)
informer.migrationLister = informer.migrationFactory.Apiextensions().V1beta1().Migs().Lister()
}
if isomateSetClient != nil {
informer.isomateSetFactory = isomatesetexternalversions.NewSharedInformerFactory(isomateSetClient, resyncPeriod)
informer.isomateSetLister = informer.isomateSetFactory.Apiextensions().V1alpha1().IsomateSets().Lister()
}
if monitorClient != nil {
informer.monitorFactory = monitorexternalversions.NewSharedInformerFactoryWithOptions(monitorClient, resyncPeriod)
informer.monitorLister = informer.monitorFactory.Monitoring().V1().ServiceMonitors().Lister()
}
informer.start(stopCh)
informer.waitForCacheSync(stopCh)
klog.Info("k8s cache sync finished")
return informer
}
|
// Package main ...
package main
import (
"fmt"
"net/http"
"github.com/go-rod/rod"
"github.com/go-rod/rod/lib/utils"
)
func main() {
go serve()
browser := rod.New().MustConnect()
defer browser.MustClose()
// Creating a Page Object
page := browser.MustPage()
// Evaluates given script in every frame upon creation
// Disable all alerts by making window.alert no-op.
page.MustEvalOnNewDocument(`window.alert = () => {}`)
// Navigate to the website you want to visit
page.MustNavigate("http://localhost:8080")
fmt.Println(page.MustElement("script").MustText())
}
const testPage = `<html><script>alert("message")</script></html>`
// mock a server
func serve() {
mux := http.NewServeMux()
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
utils.E(fmt.Fprint(res, testPage))
})
utils.E(http.ListenAndServe(":8080", mux))
}
|
package styles
import (
"embed"
"io/fs"
"sort"
"github.com/alecthomas/chroma/v2"
)
//go:embed *.xml
var embedded embed.FS
// Registry of Styles.
var Registry = func() map[string]*chroma.Style {
registry := map[string]*chroma.Style{}
// Register all embedded styles.
files, err := fs.ReadDir(embedded, ".")
if err != nil {
panic(err)
}
for _, file := range files {
if file.IsDir() {
continue
}
r, err := embedded.Open(file.Name())
if err != nil {
panic(err)
}
style, err := chroma.NewXMLStyle(r)
if err != nil {
panic(err)
}
registry[style.Name] = style
_ = r.Close()
}
return registry
}()
// Fallback style. Reassign to change the default fallback style.
var Fallback = Registry["swapoff"]
// Register a chroma.Style.
func Register(style *chroma.Style) *chroma.Style {
Registry[style.Name] = style
return style
}
// Names of all available styles.
func Names() []string {
out := []string{}
for name := range Registry {
out = append(out, name)
}
sort.Strings(out)
return out
}
// Get named style, or Fallback.
func Get(name string) *chroma.Style {
if style, ok := Registry[name]; ok {
return style
}
return Fallback
}
|
// Copyright (c) 2018 The MATRIX Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php
package matrixstate
import (
"github.com/MatrixAINetwork/go-matrix/common"
"github.com/MatrixAINetwork/go-matrix/core/types"
"github.com/MatrixAINetwork/go-matrix/log"
"github.com/MatrixAINetwork/go-matrix/mc"
"github.com/MatrixAINetwork/go-matrix/rlp"
)
/////////////////////////////////////////////////////////////////////////////////////////
//
type operatorBasePowerStatsStatus struct {
key common.Hash
}
func newBasePowerStatsStatusOpt() *operatorBasePowerStatsStatus {
return &operatorBasePowerStatsStatus{
key: types.RlpHash(matrixStatePrefix + mc.MSKeyBasePowerStatsStatus),
}
}
func (opt *operatorBasePowerStatsStatus) KeyHash() common.Hash {
return opt.key
}
func (opt *operatorBasePowerStatsStatus) GetValue(st StateDB) (interface{}, error) {
if err := checkStateDB(st); err != nil {
return nil, err
}
data := st.GetMatrixData(opt.key)
if len(data) == 0 {
return &mc.BasePowerSlashStatsStatus{Number: 0}, nil
}
value := new(mc.BasePowerSlashStatsStatus)
err := rlp.DecodeBytes(data, &value)
if err != nil {
log.Error(logInfo, "basePowerStatsStatus rlp decode failed", err)
return nil, err
}
return value, nil
}
func (opt *operatorBasePowerStatsStatus) SetValue(st StateDB, value interface{}) error {
if err := checkStateDB(st); err != nil {
return err
}
data, err := rlp.EncodeToBytes(value)
if err != nil {
log.Error(logInfo, "basePowerStatsStatus rlp encode failed", err)
return err
}
st.SetMatrixData(opt.key, data)
return nil
}
/////////////////////////////////////////////////////////////////////////////////////////
//
type operatorBasePowerSlashCfg struct {
key common.Hash
}
func newBasePowerSlashCfgOpt() *operatorBasePowerSlashCfg {
return &operatorBasePowerSlashCfg{
key: types.RlpHash(matrixStatePrefix + mc.MSKeyBasePowerSlashCfg),
}
}
func (opt *operatorBasePowerSlashCfg) KeyHash() common.Hash {
return opt.key
}
func (opt *operatorBasePowerSlashCfg) GetValue(st StateDB) (interface{}, error) {
if err := checkStateDB(st); err != nil {
return nil, err
}
data := st.GetMatrixData(opt.key)
if len(data) == 0 {
return &mc.BasePowerSlashCfg{Switcher: false, LowTHR: 1, ProhibitCycleNum: 2}, nil
}
value := new(mc.BasePowerSlashCfg)
err := rlp.DecodeBytes(data, &value)
if err != nil {
log.Error(logInfo, "basePowerSlashCfg rlp decode failed", err)
return nil, err
}
return value, nil
}
func (opt *operatorBasePowerSlashCfg) SetValue(st StateDB, value interface{}) error {
if err := checkStateDB(st); err != nil {
return err
}
data, err := rlp.EncodeToBytes(value)
if err != nil {
log.Error(logInfo, "basePowerSlashCfg rlp encode failed", err)
return err
}
st.SetMatrixData(opt.key, data)
return nil
}
/////////////////////////////////////////////////////////////////////////////////////////
//
type operatorBasePowerStats struct {
key common.Hash
}
func newBasePowerStatsOpt() *operatorBasePowerStats {
return &operatorBasePowerStats{
key: types.RlpHash(matrixStatePrefix + mc.MSKeyBasePowerStats),
}
}
func (opt *operatorBasePowerStats) KeyHash() common.Hash {
return opt.key
}
func (opt *operatorBasePowerStats) GetValue(st StateDB) (interface{}, error) {
if err := checkStateDB(st); err != nil {
return nil, err
}
data := st.GetMatrixData(opt.key)
if len(data) == 0 {
return &mc.BasePowerStats{StatsList: make([]mc.BasePowerNum, 0)}, nil
}
value := new(mc.BasePowerStats)
err := rlp.DecodeBytes(data, &value)
if err != nil {
log.Error(logInfo, "basePowerStats rlp decode failed", err)
return nil, err
}
return value, nil
}
func (opt *operatorBasePowerStats) SetValue(st StateDB, value interface{}) error {
if err := checkStateDB(st); err != nil {
return err
}
data, err := rlp.EncodeToBytes(value)
if err != nil {
log.Error(logInfo, "basePowerStats rlp encode failed", err)
return err
}
st.SetMatrixData(opt.key, data)
return nil
}
/////////////////////////////////////////////////////////////////////////////////////////
//
type operatorBasePowerBlackList struct {
key common.Hash
}
func newBasePowerBlackListOpt() *operatorBasePowerBlackList {
return &operatorBasePowerBlackList{
key: types.RlpHash(matrixStatePrefix + mc.MSKeyBasePowerBlackList),
}
}
func (opt *operatorBasePowerBlackList) KeyHash() common.Hash {
return opt.key
}
func (opt *operatorBasePowerBlackList) GetValue(st StateDB) (interface{}, error) {
if err := checkStateDB(st); err != nil {
return nil, err
}
data := st.GetMatrixData(opt.key)
if len(data) == 0 {
return &mc.BasePowerSlashBlackList{BlackList: make([]mc.BasePowerSlash, 0)}, nil
}
value := new(mc.BasePowerSlashBlackList)
err := rlp.DecodeBytes(data, &value)
if err != nil {
log.Error(logInfo, "basePowerBlackList rlp decode failed", err)
return nil, err
}
return value, nil
}
func (opt *operatorBasePowerBlackList) SetValue(st StateDB, value interface{}) error {
if err := checkStateDB(st); err != nil {
return err
}
data, err := rlp.EncodeToBytes(value)
if err != nil {
log.Error(logInfo, "basePowerBlackList rlp encode failed", err)
return err
}
st.SetMatrixData(opt.key, data)
return nil
}
|
package ffmpeg
//#include <libavutil/frame.h>
import "C"
import (
"errors"
"fmt"
"unsafe"
)
type AudioFrame C.struct_Frame
func NewAudioFrame(samples int, sampleFmt SampleFormat, channelLayout ChannelLayout) (*AudioFrame, error) {
f := C.av_frame_alloc()
f.nb_samples = C.int(samples)
f.format = C.int(sampleFmt.ctype())
f.channel_layout = channelLayout.ctype()
frame := (*AudioFrame)(unsafe.Pointer(f))
if ret := C.av_frame_get_buffer(frame.ctype(), C.int(1) /*alignment*/); ret < 0 {
frame.Release()
return nil, fmt.Errorf("Error allocating avframe buffer. Err: %v", ret)
}
return frame, nil
}
func (frame *AudioFrame) Release() {
C.av_frame_free((**C.struct_AVFrame)(unsafe.Pointer(&frame)))
}
func (frame *AudioFrame) MakeWritable() error {
if 0 != int(C.av_frame_make_writable(frame.ctype())) {
return errors.New("make writable error")
}
return nil
}
func (frame *AudioFrame) Write(data []byte) (int, error) {
if int(frame.ctype().linesize[0]) < len(data) {
return 0, errors.New("frame buffer less than writable data")
}
C.memcpy(unsafe.Pointer(frame.ctype().data[0]), unsafe.Pointer(&(data[0])), C.ulong(len(data)))
return len(data), nil
}
func (frame *AudioFrame) SetPts(pts int) {
frame.ctype().pts = C.long(pts)
}
func (frame *AudioFrame) ctype() *C.struct_AVFrame {
return (*C.struct_AVFrame)(unsafe.Pointer(frame))
}
|
package main
import (
"context"
"fmt"
"log"
"github.com/segmentio/kafka-go"
"github.com/segmentio/kafka-go/protocol"
)
func main() {
//PRODUCER:
writer := &kafka.Writer{
Addr: kafka.TCP("localhost:19092"),
Topic: "quickstart",
}
err := writer.WriteMessages(context.Background(), kafka.Message{
Value: []byte("Hello Kafka"),
Headers: []protocol.Header{
{
Key: "sessionn",
Value: []byte("kafka"),
},
},
})
if err != nil {
log.Fatal("Can't write a message: ", err)
}
//CONSUMER:
reader := kafka.NewReader(kafka.ReaderConfig{
Brokers: []string{"localhost:19092"},
GroupID: "consumer",
Topic: "quickstart",
MinBytes: 0,
MaxBytes: 10e6, //10MB
})
for i := 0; i < 1; i++ {
message, err := reader.ReadMessage(context.Background())
for _, header := range message.Headers {
if header.Key != "session" && string(header.Value) == "kafka" {
log.Fatal("Incorrect message...")
reader.Close()
}
}
if err != nil {
log.Fatal("Can't receive a message: ", err)
reader.Close()
}
fmt.Println("Receive a message: ", string(message.Value))
}
reader.Close()
}
|
package kubeconfig
import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/clientcmd/api"
)
// Loader loads the kubeconfig
type Loader interface {
NewConfig() clientcmd.ClientConfig
LoadConfig() clientcmd.ClientConfig
LoadRawConfig() (*api.Config, error)
GetCurrentContext() (string, error)
SaveConfig(config *api.Config) error
DeleteKubeContext(kubeConfig *api.Config, kubeContext string) error
}
type loader struct {
}
// NewLoader creates a new instance of the interface Loader
func NewLoader() Loader {
return &loader{}
}
|
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the Licensc.
// You may obtain a copy of the License at
//
// http://www.apachc.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the Licensc.
// Package cloner is a plugin for the gapil compiler to generate deep clone
// functions for reference types, maps and commands.
package cloner
import (
"fmt"
"github.com/google/gapid/core/codegen"
"github.com/google/gapid/gapil/compiler"
"github.com/google/gapid/gapil/semantic"
)
// cloner is the compiler plugin that adds cloning functionality.
type cloner struct {
*compiler.C
clonableTys []semantic.Type
clone map[semantic.Type]*codegen.Function
cloneImpls map[semantic.Type]*codegen.Function
callbacks callbacks
}
// Build implements the compiler.Plugin interfacc.
func (c *cloner) Build(compiler *compiler.C) {
*c = cloner{
C: compiler,
clone: map[semantic.Type]*codegen.Function{},
cloneImpls: map[semantic.Type]*codegen.Function{},
}
for _, api := range c.APIs {
for _, ty := range api.References {
c.clonableTys = append(c.clonableTys, ty)
}
for _, ty := range api.Maps {
c.clonableTys = append(c.clonableTys, ty)
}
}
c.parseCallbacks()
c.declareClones()
c.implementClones()
}
// declareClones declares all the clone functions for all the clonable types.
func (c *cloner) declareClones() {
// impls is a map of type mangled name to the public clone function.
// This is used to deduplicate clone functions that have the same underlying
// LLVM types when lowered.
impls := map[string]*codegen.Function{}
for _, ty := range c.clonableTys {
ptrTy := c.T.Target(ty).(codegen.Pointer)
elTy := ptrTy.Element
// Use the mangled name to determine whether the clone function has
// already been declared for this lowered type.
mangled := c.Mangler(c.Mangle(elTy))
impl, seen := impls[mangled]
if !seen {
impl = c.Method(false, elTy, ptrTy, "clone", c.T.ArenaPtr, c.T.VoidPtr).
LinkPrivate().
LinkOnceODR()
impls[mangled] = impl
c.cloneImpls[ty] = impl
}
// Delegate the clone method of this type on to the common implmentation.
f := c.M.Function(ptrTy, ty.Name()+"_clone", ptrTy, c.T.ArenaPtr, c.T.VoidPtr).
LinkPrivate().
LinkOnceODR().
Inline()
c.Delegate(f, impl)
c.clone[ty] = f
}
}
// implementClones implements all the private clone functions, and all the
// public clone functions.
func (c *cloner) implementClones() {
for ty, f := range c.cloneImpls {
switch ty := ty.(type) {
case *semantic.Reference:
c.C.Build(f, func(s *compiler.S) {
this, arena, tracker := s.Parameter(0), s.Parameter(1), s.Parameter(2)
s.Arena = arena
refPtrTy := this.Type().(codegen.Pointer)
refTy := refPtrTy.Element
s.IfElse(this.IsNull(), func(s *compiler.S) {
s.Return(s.Zero(refPtrTy))
}, func(s *compiler.S) {
existing := s.Call(c.callbacks.cloneTrackerLookup, tracker, this.Cast(c.T.VoidPtr)).Cast(refPtrTy)
s.IfElse(existing.IsNull(), func(s *compiler.S) {
clone := c.Alloc(s, s.Scalar(uint64(1)), refTy)
s.Call(c.callbacks.cloneTrackerTrack, tracker, this.Cast(c.T.VoidPtr), clone.Cast(c.T.VoidPtr))
clone.Index(0, compiler.RefRefCount).Store(s.Scalar(uint32(1)))
clone.Index(0, compiler.RefArena).Store(s.Arena)
c.cloneTo(s, ty.To, clone.Index(0, compiler.RefValue), this.Index(0, compiler.RefValue).Load(), tracker)
s.Return(clone)
}, func(s *compiler.S) {
s.Return(existing)
})
})
})
case *semantic.Map:
c.C.Build(f, func(s *compiler.S) {
this, arena, tracker := s.Parameter(0), s.Parameter(1), s.Parameter(2)
s.Arena = arena
mapPtrTy := this.Type().(codegen.Pointer)
s.IfElse(this.IsNull(), func(s *compiler.S) {
s.Return(s.Zero(mapPtrTy))
}, func(s *compiler.S) {
existing := s.Call(c.callbacks.cloneTrackerLookup, tracker, this.Cast(c.T.VoidPtr)).Cast(mapPtrTy)
s.IfElse(existing.IsNull(), func(s *compiler.S) {
mapInfo := c.T.Maps[ty]
clone := c.Alloc(s, s.Scalar(uint64(1)), mapInfo.Type)
s.Call(c.callbacks.cloneTrackerTrack, tracker, this.Cast(c.T.VoidPtr), clone.Cast(c.T.VoidPtr))
clone.Index(0, compiler.MapRefCount).Store(s.Scalar(uint32(1)))
clone.Index(0, compiler.MapArena).Store(s.Arena)
clone.Index(0, compiler.MapCount).Store(s.Scalar(uint64(0)))
clone.Index(0, compiler.MapCapacity).Store(s.Scalar(uint64(0)))
clone.Index(0, compiler.MapElements).Store(s.Zero(c.T.Pointer(mapInfo.Elements)))
c.IterateMap(s, this, semantic.Uint64Type, func(i, k, v *codegen.Value) {
dstK, srcK := s.Local("key", mapInfo.Key), k.Load()
c.cloneTo(s, ty.KeyType, dstK, srcK, tracker)
dstV, srcV := s.Call(mapInfo.Index, clone, dstK.Load(), s.Scalar(true)), v.Load()
c.cloneTo(s, ty.ValueType, dstV, srcV, tracker)
})
s.Return(clone)
}, func(s *compiler.S) {
s.Return(existing)
})
})
})
default:
c.Fail("Unhandled type: %v", ty.Name())
}
}
for _, api := range c.APIs {
for _, cmd := range api.Functions {
params := c.T.CmdParams[cmd]
paramsPtr := c.T.Pointer(params)
f := c.M.Function(paramsPtr, cmd.Name()+"__clone", paramsPtr, c.T.ArenaPtr, c.T.VoidPtr).LinkOnceODR()
c.C.Build(f, func(s *compiler.S) {
this, arena, tracker := s.Parameter(0), s.Parameter(1), s.Parameter(2)
s.Arena = arena
clone := c.Alloc(s, s.Scalar(1), params)
thread := semantic.BuiltinThreadGlobal.Name()
c.cloneTo(s, semantic.Uint64Type, clone.Index(0, thread), this.Index(0, thread).Load(), tracker)
for _, p := range cmd.FullParameters {
c.cloneTo(s, p.Type, clone.Index(0, p.Name()), this.Index(0, p.Name()).Load(), tracker)
}
s.Return(clone)
})
}
}
}
// cloneTo emits the logic to clone the value src to the pointer dst.
func (c *cloner) cloneTo(s *compiler.S, ty semantic.Type, dst, src, tracker *codegen.Value) {
if f, ok := c.clone[ty]; ok {
dst.Store(s.Call(f, src, s.Arena, tracker))
return
}
switch ty := semantic.Underlying(ty).(type) {
case *semantic.Pseudonym:
c.cloneTo(s, ty.To, dst, src, tracker)
case *semantic.Builtin:
switch ty {
case semantic.Int8Type,
semantic.Int16Type,
semantic.Int32Type,
semantic.Int64Type,
semantic.IntType,
semantic.Uint8Type,
semantic.Uint16Type,
semantic.Uint32Type,
semantic.Uint64Type,
semantic.UintType,
semantic.CharType,
semantic.SizeType,
semantic.BoolType,
semantic.Float32Type,
semantic.Float64Type:
dst.Store(src)
case semantic.StringType:
existing := s.Call(c.callbacks.cloneTrackerLookup, tracker, src.Cast(c.T.VoidPtr)).Cast(c.T.StrPtr)
s.IfElse(existing.IsNull(), func(s *compiler.S) {
l := src.Index(0, compiler.StringLength).Load()
d := src.Index(0, compiler.StringData, 0)
clone := c.MakeString(s, l, d)
s.Call(c.callbacks.cloneTrackerTrack, tracker, src.Cast(c.T.VoidPtr), clone.Cast(c.T.VoidPtr))
dst.Store(clone)
}, func(s *compiler.S) {
dst.Store(existing)
})
default:
panic(fmt.Errorf("cloneTo not implemented for builtin type %v", ty))
}
case *semantic.Enum:
dst.Store(src)
case *semantic.Class:
for _, f := range ty.Fields {
dst, src := dst.Index(0, f.Name()), src.Extract(f.Name())
c.cloneTo(s, f.Type, dst, src, tracker)
}
case *semantic.Slice:
// TODO: Attempting to clone a slice requires a context, which we
// currently do not have. Weak-copy for now.
dst.Store(src)
// size := src.Extract(compiler.SliceSize)
// c.MakeSliceAt(s, size, dst)
// c.CopySlice(s, dst, src)
case *semantic.StaticArray:
for i := 0; i < int(ty.Size); i++ {
// TODO: Be careful of large arrays!
c.cloneTo(s, ty.ValueType, dst.Index(0, i), src.Extract(i), tracker)
}
case *semantic.Pointer:
dst.Store(src)
default:
panic(fmt.Errorf("cloneTo not implemented for type %v", ty))
}
}
|
package lua
const (
TAB_1 = "\t"
TAB_2 = "\t\t"
TAB_10 = "\t\t\t\t\t\t\t\t\t\t"
COMMENT = "--"
AUTO_FILE_DESC = "-- auto generated, modification is not permitted."
)
type result struct {
file, content string
}
|
package utils
import (
"os"
"strconv"
)
func getHttpPort() int {
env := os.Getenv("HTTP_PORT")
if env == "" {
return 3000
}
value, e := strconv.Atoi(env)
if e != nil {
return 3000
}
return value
}
var port = getHttpPort()
func GetHttpPort() int {
return port
}
|
package apm
import (
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"math/rand"
"strings"
"sync"
"time"
"github.com/junhwong/goost/apm/field"
)
// 符合 W3C 规范的 TraceID 或 SpanID.
// https://www.w3.org/TR/trace-context/#trace-id
type HexID []byte
func (id HexID) Bytes() []byte { return id }
func (id HexID) High() HexID {
if len(id) != 16 {
return nil
}
return id[:8]
}
func (id HexID) Low() HexID {
if len(id) != 16 {
return nil
}
return id[8:]
}
func (id HexID) String() string {
if l := len(id); !(l == 0 || l == 8 || l == 16) {
return "<invalid>"
}
return fmt.Sprintf("%x", id.Bytes())
}
var seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
var mu sync.Mutex
// randomTimestamped can generate 128 bit time sortable traceid's compatible
// with AWS X-Ray and 64 bit spanid's.
func NewHexID() HexID {
mu.Lock()
var b []byte
if i := uint64(time.Now().Unix()<<32) + uint64(seededIDGen.Int31()); i > 0 {
b = binary.BigEndian.AppendUint64(b, i)
}
if i := uint64(seededIDGen.Int63()); i > 0 {
b = binary.BigEndian.AppendUint64(b, i)
}
mu.Unlock()
return b
}
var (
errInvalidHexID = errors.New("hex-id can only contain hex characters, len (16 or 32)")
)
// ParseHexID returns a HexID from a hex string.
func ParseHexID(h string) (HexID, error) {
decoded, err := hex.DecodeString(h)
if err != nil {
return nil, errInvalidHexID
}
switch len(decoded) {
case 16:
case 8:
decoded = append(make([]byte, 8), decoded...)
default:
return nil, errInvalidHexID
}
return decoded, nil
}
// Deprecated: Drivers
func GetTraceID(ctx context.Context) (traceID, spanID string) {
if ctx == nil {
return "", ""
}
if p, ok := ctx.Value(spanInContextKey).(SpanContext); ok && p != nil {
return p.GetTranceID(), p.GetSpanID()
}
// https://opentelemetry.io/docs/reference/specification/sdk-environment-variables/
// https://www.envoyproxy.io/docs/envoy/latest/configuration/http/http_conn_man/headers#id21
if s, ok := ctx.Value("trace_id").(string); ok && s != "" {
return s, ""
}
// todo https://www.w3.org/TR/trace-context/
if s, ok := ctx.Value("traceparent").(string); ok && s != "" {
// version
// trace-id
// parent-id
// trace-flags
return s, ""
}
if s, ok := ctx.Value("request_id").(string); ok && s != "" {
return s, ""
}
return "", ""
}
// 解析 W3C trace.
//
// 示例: `00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01`.
//
// see: https://www.w3.org/TR/trace-context/#traceparent-header
func ParseW3Traceparent(traceparent string) (version byte, traceID, parentSpanID HexID, flags byte, err error) {
arr := strings.Split(traceparent, "-")
if len(arr) != 4 {
return
}
decoded, ex := hex.DecodeString(arr[0])
if ex != nil || len(decoded) != 1 {
err = fmt.Errorf("invalid version")
return
}
version = decoded[0]
decoded, ex = hex.DecodeString(arr[3])
if ex != nil || len(decoded) != 1 {
err = fmt.Errorf("invalid flags")
return
}
flags = decoded[0]
traceID, err = ParseHexID(arr[1])
if err != nil {
return
}
parentSpanID, err = ParseHexID(arr[2])
if err != nil {
return
}
return
}
// 解析 W3C tracestate.
//
// 示例: `rojo=00f067aa0ba902b7,congo=t61rcWkgMzE`.
//
// see: https://www.w3.org/TR/trace-context/#tracestate-header
func ParseW3Tracestate(tracestate string) (fs field.FieldSet, err error) {
arr := strings.Split(tracestate, ",")
if len(arr) == 0 {
return nil, nil
}
for _, s := range arr {
s := strings.TrimSpace(s)
if len(s) == 0 {
continue
}
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid state item")
}
f := field.SetString(field.New(kv[0]), kv[1]) // TODO 推断值类型?
if f.GetType() == field.StringKind {
fs.Set(f)
}
}
return
}
|
// Copyright 2019 - 2022 The Samply Community
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fhir
import (
"encoding/json"
"fmt"
"strings"
)
// THIS FILE IS GENERATED BY https://github.com/samply/golang-fhir-models
// PLEASE DO NOT EDIT BY HAND
// LinkType is documented here http://hl7.org/fhir/ValueSet/link-type
type LinkType int
const (
LinkTypeReplacedBy LinkType = iota
LinkTypeReplaces
LinkTypeRefer
LinkTypeSeealso
)
func (code LinkType) MarshalJSON() ([]byte, error) {
return json.Marshal(code.Code())
}
func (code *LinkType) UnmarshalJSON(json []byte) error {
s := strings.Trim(string(json), "\"")
switch s {
case "replaced-by":
*code = LinkTypeReplacedBy
case "replaces":
*code = LinkTypeReplaces
case "refer":
*code = LinkTypeRefer
case "seealso":
*code = LinkTypeSeealso
default:
return fmt.Errorf("unknown LinkType code `%s`", s)
}
return nil
}
func (code LinkType) String() string {
return code.Code()
}
func (code LinkType) Code() string {
switch code {
case LinkTypeReplacedBy:
return "replaced-by"
case LinkTypeReplaces:
return "replaces"
case LinkTypeRefer:
return "refer"
case LinkTypeSeealso:
return "seealso"
}
return "<unknown>"
}
func (code LinkType) Display() string {
switch code {
case LinkTypeReplacedBy:
return "Replaced-by"
case LinkTypeReplaces:
return "Replaces"
case LinkTypeRefer:
return "Refer"
case LinkTypeSeealso:
return "See also"
}
return "<unknown>"
}
func (code LinkType) Definition() string {
switch code {
case LinkTypeReplacedBy:
return "The patient resource containing this link must no longer be used. The link points forward to another patient resource that must be used in lieu of the patient resource that contains this link."
case LinkTypeReplaces:
return "The patient resource containing this link is the current active patient record. The link points back to an inactive patient resource that has been merged into this resource, and should be consulted to retrieve additional referenced information."
case LinkTypeRefer:
return "The patient resource containing this link is in use and valid but not considered the main source of information about a patient. The link points forward to another patient resource that should be consulted to retrieve additional patient information."
case LinkTypeSeealso:
return "The patient resource containing this link is in use and valid, but points to another patient resource that is known to contain data about the same person. Data in this resource might overlap or contradict information found in the other patient resource. This link does not indicate any relative importance of the resources concerned, and both should be regarded as equally valid."
}
return "<unknown>"
}
|
package requests
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
)
// CreateErrorReport Create a new error report documenting an experienced problem
//
// Performs the same action as when a user uses the "help -> report a problem"
// dialog.
// https://canvas.instructure.com/doc/api/error_reports.html
//
// Form Parameters:
// # Form.Error.Subject (Required) The summary of the problem
// # Form.Error.Url (Optional) URL from which the report was issued
// # Form.Error.Email (Optional) Email address for the reporting user
// # Form.Error.Comments (Optional) The long version of the story from the user one what they experienced
// # Form.Error.HttpEnv (Optional) A collection of metadata about the users' environment. If not provided,
// canvas will collect it based on information found in the request.
// (Doesn't have to be HTTPENV info, could be anything JSON object that can be
// serialized as a hash, a mobile app might include relevant metadata for
// itself)
//
type CreateErrorReport struct {
Form struct {
Error struct {
Subject string `json:"subject" url:"subject,omitempty"` // (Required)
Url string `json:"url" url:"url,omitempty"` // (Optional)
Email string `json:"email" url:"email,omitempty"` // (Optional)
Comments string `json:"comments" url:"comments,omitempty"` // (Optional)
HttpEnv map[string](interface{}) `json:"http_env" url:"http_env,omitempty"` // (Optional)
} `json:"error" url:"error,omitempty"`
} `json:"form"`
}
func (t *CreateErrorReport) GetMethod() string {
return "POST"
}
func (t *CreateErrorReport) GetURLPath() string {
return ""
}
func (t *CreateErrorReport) GetQuery() (string, error) {
return "", nil
}
func (t *CreateErrorReport) GetBody() (url.Values, error) {
return query.Values(t.Form)
}
func (t *CreateErrorReport) GetJSON() ([]byte, error) {
j, err := json.Marshal(t.Form)
if err != nil {
return nil, nil
}
return j, nil
}
func (t *CreateErrorReport) HasErrors() error {
errs := []string{}
if t.Form.Error.Subject == "" {
errs = append(errs, "'Form.Error.Subject' is required")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *CreateErrorReport) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package assets
import (
"regexp"
"sort"
"strings"
)
// TODO: ignore data: , escaped quotes , spaces between brackets?
var cssURLs = regexp.MustCompile(`url\(("[^"]*"|'[^']*'|[^)]*)\)`)
var cssImports = regexp.MustCompile(`@import "(.*?)"`)
func cssUrlsIndex(css string) [][]int {
var idxs [][]int
for _, match := range cssURLs.FindAllStringSubmatchIndex(css, -1) {
idxs = append(idxs, match[2:])
}
for _, match := range cssImports.FindAllStringSubmatchIndex(css, -1) {
idxs = append(idxs, match[2:])
}
sort.Slice(idxs, func(i, j int) bool {
return idxs[i][0] > idxs[j][0]
})
return idxs
}
func unquote(str string) (string, string) {
str = strings.TrimSpace(str)
if len(str) <= 2 {
return str, ""
}
if str[0] == '"' && str[len(str)-1] == '"' {
return str[1 : len(str)-1], "\""
}
if str[0] == '\'' && str[len(str)-1] == '\'' {
return str[1 : len(str)-1], "'"
}
return str, ""
}
func ExtractURLsFromCSS(css string) []string {
indexes := cssUrlsIndex(css)
urls := make([]string, len(indexes))
for _, idx := range indexes {
f := idx[0]
t := idx[1]
rawurl, _ := unquote(css[f:t])
urls = append(urls, rawurl)
}
return urls
}
func rewriteLinks(css string, rewrite func(rawurl string) string) string {
for _, idx := range cssUrlsIndex(css) {
f := idx[0]
t := idx[1]
rawurl, q := unquote(css[f:t])
// why exactly quote back?
css = css[:f] + q + rewrite(rawurl) + q + css[t:]
}
return css
}
func ResolveCSS(baseURL string, css string) string {
css = rewriteLinks(css, func(rawurl string) string {
return ResolveURL(baseURL, rawurl)
})
return strings.Replace(css, ":hover", ".-asayer-hover", -1)
}
func (r *Rewriter) RewriteCSS(sessionID uint64, baseurl string, css string) string {
css = rewriteLinks(css, func(rawurl string) string {
url , _ := r.RewriteURL(sessionID, baseurl, rawurl)
return url
})
return strings.Replace(css, ":hover", ".-asayer-hover", -1)
}
|
package main
import (
"encoding/json"
"github.com/nolka/gooffroadmaster/mvc"
"github.com/nolka/gooffroadmaster/mvc/controllers"
"github.com/nolka/gooffroadmaster/util"
"gopkg.in/telegram-bot-api.v4"
"io/ioutil"
"log"
"os"
"os/signal"
)
func main() {
util.EnsureDirectories()
config := getConfig()
bot, err := tgbotapi.NewBotAPI(config.Token)
if err != nil {
log.Panic(err)
}
bot.Debug = config.IsDebug
log.Printf("Authorized on account %s", bot.Self.UserName)
u := tgbotapi.NewUpdate(0)
u.Timeout = 60
updates, err := bot.GetUpdatesChan(u)
var results = make(chan tgbotapi.MessageConfig)
manager := mvc.NewMessageRouter(bot, results)
manager.RegisterController(controllers.NewTrackConverter(manager, util.GetRuntimePath()))
manager.RegisterController(controllers.NewInteractiveMenu(manager))
subscribeInterrupt(manager)
go resultsSender(results, bot)
for update := range updates {
if update.Message != nil {
log.Printf("[%s] => %s\n", update.Message.From.UserName, update.Message.Text)
}
go func() {
manager.Dispatch(update)
}()
}
}
func resultsSender(message chan tgbotapi.MessageConfig, bot *tgbotapi.BotAPI) {
for message := range message {
bot.Send(message)
}
}
func subscribeInterrupt(manager *mvc.Router) {
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for sig := range c {
log.Printf("SIG %s", sig.String())
manager.Halt()
os.Exit(1)
}
}()
}
func getConfig() *Config {
f, err := os.Open("config.json")
if err != nil {
log.Printf("CONF OPEN ERR: %s", err)
return nil
}
b, err := ioutil.ReadAll(f)
if err != nil {
log.Printf("CONF READ ERR: %s", err)
return nil
}
cfg := &Config{}
err = json.Unmarshal(b, cfg)
if err != nil {
log.Printf("CONF PARSE ERR: %s", err)
}
cfg.WorkDir = util.GetStartupPath()
cfg.RuntimeDir = cfg.WorkDir + string(os.PathSeparator) + "runtime"
return cfg
}
//TODO: Move to another controller
// func instantiate(p reflect.Type) ExecutableCommand {
// instance := reflect.New(p).Elem()
// return instance.Interface().(ExecutableCommand)
// }
// func getCommand(cmd string, args []string) ExecutableCommand {
// for c, instance := range EnumerateCommands() {
// if cmd == c {
// // instance := instantiate(typeName).(ExecutableCommand)
// instance.SetArgs(args)
// return instance
// }
// }
// return nil
// }
// func parseCommand(command string) ExecutableCommand {
// if !strings.HasPrefix(command, "/") {
// return nil
// }
// parts := strings.Split(command[1:], " ")
// cmd := parts[0]
// parts = parts[1:]
//
// instance := getCommand(cmd, parts)
// if instance == nil {
// log.Printf("Failed to find command handler for '%s'", cmd)
// return nil
// }
// return instance
// }
// func handleChannelMessage(update *tgbotapi.Update, bot *tgbotapi.BotAPI, results chan tgbotapi.MessageConfig) {
// message := update.Message
//
// go resultsSender(results, bot)
// go func() {
// if strings.HasPrefix(message.Text, "/") {
// cmd := parseCommand(message.Text)
// if cmd == nil {
// return
// }
// result, err := cmd.Handle(message, bot)
// if err != nil {
// log.Printf("ERROR: %s", err)
// return
// }
// results <- result
// return
// }
// }()
// }
// func handlePrivateMessage(update *tgbotapi.Update, bot *tgbotapi.BotAPI, results chan tgbotapi.MessageConfig) {
// message := update.Message
// log.Println(message.Chat.Title)
// }
|
// Copyright 2019 Yunion
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package saml
import (
"context"
"encoding/base64"
"fmt"
"yunion.io/x/jsonutils"
"yunion.io/x/log"
"yunion.io/x/pkg/errors"
api "yunion.io/x/onecloud/pkg/apis/identity"
"yunion.io/x/onecloud/pkg/httperrors"
"yunion.io/x/onecloud/pkg/keystone/driver"
"yunion.io/x/onecloud/pkg/keystone/models"
"yunion.io/x/onecloud/pkg/keystone/saml"
"yunion.io/x/onecloud/pkg/mcclient"
"yunion.io/x/onecloud/pkg/util/samlutils"
"yunion.io/x/onecloud/pkg/util/samlutils/sp"
)
// SAML 2.0 Service Provider Driver
type SSAMLDriver struct {
driver.SBaseIdentityDriver
samlConfig *api.SSAMLIdpConfigOptions
isDebug bool
}
func NewSAMLDriver(idpId, idpName, template, targetDomainId string, conf api.TConfigs) (driver.IIdentityBackend, error) {
base, err := driver.NewBaseIdentityDriver(idpId, idpName, template, targetDomainId, conf)
if err != nil {
return nil, errors.Wrap(err, "NewBaseIdentityDriver")
}
drv := SSAMLDriver{SBaseIdentityDriver: base}
drv.SetVirtualObject(&drv)
err = drv.prepareConfig()
if err != nil {
return nil, errors.Wrap(err, "prepareConfig")
}
return &drv, nil
}
func (self *SSAMLDriver) prepareConfig() error {
if self.samlConfig == nil {
confJson := jsonutils.Marshal(self.Config["saml"])
conf := api.SSAMLIdpConfigOptions{}
switch self.Template {
case api.IdpTemplateSAMLTest:
conf = SAMLTestTemplate
case api.IdpTemplateAzureADSAML:
conf = AzureADTemplate
tenantId, _ := confJson.GetString("tenant_id")
conf.EntityId = fmt.Sprintf("https://sts.windows.net/%s/", tenantId)
conf.RedirectSSOUrl = fmt.Sprintf("https://login.microsoftonline.com/%s/saml2", tenantId)
}
err := confJson.Unmarshal(&conf)
if err != nil {
return errors.Wrap(err, "json.Unmarshal")
}
log.Debugf("%s %s %#v", self.Config, confJson, self.samlConfig)
self.samlConfig = &conf
}
return nil
}
func (self *SSAMLDriver) GetSsoRedirectUri(ctx context.Context, callbackUrl, state string) (string, error) {
spLoginFunc := func(ctx context.Context, idp *sp.SSAMLIdentityProvider) (sp.SSAMLSpInitiatedLoginRequest, error) {
result := sp.SSAMLSpInitiatedLoginRequest{}
result.RequestID = samlutils.GenerateSAMLId()
result.RelayState = state
return result, nil
}
spInst := sp.NewSpInstance(saml.SAMLInstance(), self.IdpName, nil, spLoginFunc)
spInst.SetAssertionConsumerUri(callbackUrl)
err := spInst.AddIdp(self.samlConfig.EntityId, self.samlConfig.RedirectSSOUrl)
if err != nil {
return "", errors.Wrap(err, "Invalid SAMLIdentityProvider")
}
input := samlutils.SSpInitiatedLoginInput{
EntityID: self.samlConfig.EntityId,
}
redir, err := spInst.ProcessSpInitiatedLogin(ctx, input)
if err != nil {
return "", errors.Wrap(err, "ProcessSpInitiatedLogin")
}
return redir, nil
}
func (self *SSAMLDriver) Authenticate(ctx context.Context, ident mcclient.SAuthenticationIdentity) (*api.SUserExtended, error) {
samlRespBytes, err := base64.StdEncoding.DecodeString(ident.SAMLAuth.Response)
if err != nil {
return nil, errors.Wrap(err, "base64.StdEncoding.DecodeString")
}
resp, err := saml.SAMLInstance().UnmarshalResponse(samlRespBytes)
if err != nil {
return nil, errors.Wrap(err, "decode SAMLResponse error")
}
if !resp.IsSuccess() {
return nil, errors.Wrap(httperrors.ErrInvalidCredential, "SAML auth unsuccess")
}
attrs := resp.FetchAttribtues()
var domainId, domainName, usrId, usrName string
if v, ok := attrs[self.samlConfig.DomainIdAttribute]; ok && len(v) > 0 {
domainId = v[0]
}
if v, ok := attrs[self.samlConfig.DomainNameAttribute]; ok && len(v) > 0 {
domainName = v[0]
}
if v, ok := attrs[self.samlConfig.UserIdAttribute]; ok && len(v) > 0 {
usrId = v[0]
}
if v, ok := attrs[self.samlConfig.UserNameAttribute]; ok && len(v) > 0 {
usrName = v[0]
}
idp, err := models.IdentityProviderManager.FetchIdentityProviderById(self.IdpId)
if err != nil {
return nil, errors.Wrap(err, "self.GetIdentityProvider")
}
domain, usr, err := idp.SyncOrCreateDomainAndUser(ctx, domainId, domainName, usrId, usrName)
if err != nil {
return nil, errors.Wrap(err, "idp.SyncOrCreateDomainAndUser")
}
extUser, err := models.UserManager.FetchUserExtended(usr.Id, "", "", "")
if err != nil {
return nil, errors.Wrap(err, "models.UserManager.FetchUserExtended")
}
idp.TryUserJoinProject(self.samlConfig.SIdpAttributeOptions, ctx, usr, domain.Id, attrs)
return extUser, nil
}
func (self *SSAMLDriver) Sync(ctx context.Context) error {
return nil
}
func (self *SSAMLDriver) Probe(ctx context.Context) error {
return nil
}
|
package main
func main() {
// TODO imgUpload command
}
|
package storage
func AddBooking() {
}
|
package main
import (
"log"
"net/http"
"os"
"github.com/lichuan0620/logtap/cmd/logtap/option"
"github.com/lichuan0620/logtap/pkg/logtap"
"github.com/lichuan0620/logtap/pkg/logtap/handler"
)
func main() {
logTap, err := logtap.NewLogTap(option.Spec, option.Name)
if err != nil {
os.Exit(1)
}
go serveHTTP(logTap)
if err = logTap.Run(option.StopCh); err != nil {
panic(err)
}
}
func serveHTTP(tap logtap.LogTap) {
if err := http.ListenAndServe(option.WebAddress, handler.NewLogTapHandler(tap)); err != nil {
log.Fatalln(err.Error())
}
}
|
// Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"github.com/google/gapid/core/text/parse"
"github.com/google/gapid/core/text/parse/cst"
"github.com/google/gapid/gapil/ast"
)
// lhs { extend }
func (p *parser) requireExpression(b *cst.Branch) ast.Node {
lhs := p.requireLHSExpression(b)
for {
if e := p.extendExpression(lhs); e != nil {
lhs = e
} else {
break
}
}
return lhs
}
// ( group | switch | literal | unary_op | generic)
func (p *parser) requireLHSExpression(b *cst.Branch) ast.Node {
if g := p.group(b); g != nil {
return g
}
if s := p.switch_(b); s != nil {
return s
}
if l := p.literal(b); l != nil {
return l
}
if u := p.unaryOp(b); u != nil {
return u
}
if g := p.generic(b); g != nil {
return g
}
p.Expected("expression")
v := &ast.Invalid{}
p.mappings.Add(v, b)
return v
}
// lhs (index | call | binary_op | member)
func (p *parser) extendExpression(lhs ast.Node) ast.Node {
if i := p.index(lhs); i != nil {
return i
}
if c := p.call(lhs); c != nil {
return c
}
if e := p.binaryOp(lhs); e != nil {
return e
}
if m := p.member(lhs); m != nil {
return m
}
return nil
}
// 'null' | 'true' | 'false' | '"' string '"' | '?' | number
func (p *parser) literal(b *cst.Branch) ast.Node {
if l := p.keyword(ast.KeywordNull, b); l != nil {
v := &ast.Null{}
p.mappings.Add(v, l)
return v
}
if l := p.keyword(ast.KeywordTrue, b); l != nil {
v := &ast.Bool{Value: true}
p.mappings.Add(v, l)
return v
}
if l := p.keyword(ast.KeywordFalse, b); l != nil {
v := &ast.Bool{Value: false}
p.mappings.Add(v, l)
return v
}
if s := p.string_(b); s != nil {
return s
}
if u := p.unknown(b); u != nil {
return u
}
if n := p.number(b); n != nil {
return n
}
return nil
}
func (p *parser) unknown(b *cst.Branch) *ast.Unknown {
scanned := p.scanOperator()
if ast.OpUnknown != scanned {
p.Rollback()
return nil
}
n := &ast.Unknown{}
p.ParseLeaf(b, func(l *cst.Leaf) {
p.mappings.Add(n, l)
l.Token = p.Consume()
})
return n
}
// "string" | `string`
func (p *parser) string_(b *cst.Branch) *ast.String {
quote, backtick := p.Rune(ast.Quote), p.Rune(ast.Backtick)
var term rune
switch {
case quote:
term = ast.Quote
case backtick:
term = ast.Backtick
default:
return nil
}
n := &ast.String{}
p.ParseLeaf(b, func(l *cst.Leaf) {
p.mappings.Add(n, l)
p.SeekRune(term)
if !p.Rune(term) {
n = nil
return
}
l.Token = p.Consume()
v := l.Token.String()
n.Value = v[1 : len(v)-1]
})
return n
}
func (p *parser) requireString(b *cst.Branch) *ast.String {
s := p.string_(b)
if s == nil {
p.Expected("string")
s = ast.InvalidString
}
return s
}
// standard numeric formats
func (p *parser) number(b *cst.Branch) *ast.Number {
_ = p.Rune('+') || p.Rune('-') // optional sign
if p.Numeric() == parse.NotNumeric {
p.Rollback()
return nil
}
n := &ast.Number{}
p.ParseLeaf(b, func(l *cst.Leaf) {
p.mappings.Add(n, l)
l.Token = p.Consume()
n.Value = l.Tok().String()
})
return n
}
func (p *parser) requireNumber(b *cst.Branch) *ast.Number {
n := p.number(b)
if n == nil {
p.Expected("number")
n = ast.InvalidNumber
}
return n
}
// '(' expression ')'
func (p *parser) group(b *cst.Branch) *ast.Group {
if !p.peekOperator(ast.OpListStart) {
return nil
}
e := &ast.Group{}
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(e, b)
p.requireOperator(ast.OpListStart, b)
e.Expression = p.requireExpression(b)
p.requireOperator(ast.OpListEnd, b)
})
return e
}
// switch '{' { 'case' { expresion } ':' block } '}'
func (p *parser) switch_(b *cst.Branch) *ast.Switch {
if !p.peekKeyword(ast.KeywordSwitch) {
return nil
}
e := &ast.Switch{}
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(e, b)
p.requireKeyword(ast.KeywordSwitch, b)
e.Value = p.requireExpression(b)
p.requireOperator(ast.OpBlockStart, b)
annotations := &ast.Annotations{}
p.parseAnnotations(annotations, b)
for p.peekKeyword(ast.KeywordCase) {
p.ParseBranch(b, func(b *cst.Branch) {
entry := &ast.Case{Annotations: annotationsOrNil(*annotations)}
p.mappings.Add(entry, b)
p.requireKeyword(ast.KeywordCase, b)
for !p.operator(ast.OpInitialise, b) {
if len(entry.Conditions) > 0 {
p.requireOperator(ast.OpListSeparator, b)
}
entry.Conditions = append(entry.Conditions, p.requireExpression(b))
}
entry.Block = p.requireBlock(b)
e.Cases = append(e.Cases, entry)
})
annotations = &ast.Annotations{}
p.parseAnnotations(annotations, b)
}
if p.peekKeyword(ast.KeywordDefault) {
p.ParseBranch(b, func(b *cst.Branch) {
entry := &ast.Default{}
p.mappings.Add(entry, b)
p.requireKeyword(ast.KeywordDefault, b)
p.requireOperator(ast.OpInitialise, b)
entry.Block = p.requireBlock(b)
e.Default = entry
})
}
p.requireOperator(ast.OpBlockEnd, b)
})
return e
}
// lhs '[' expression [ ':' [ expression ] ] ']'
func (p *parser) index(lhs ast.Node) *ast.Index {
if !p.peekOperator(ast.OpIndexStart) {
return nil
}
e := &ast.Index{Object: lhs}
p.Extend(p.mappings.CST(lhs), func(b *cst.Branch) {
p.mappings.Add(e, b)
p.requireOperator(ast.OpIndexStart, b)
e.Index = p.requireExpression(b)
if p.operator(ast.OpSlice, b) {
n := &ast.BinaryOp{LHS: e.Index, Operator: ast.OpSlice}
if !p.peekOperator(ast.OpIndexEnd) {
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(n, b)
n.RHS = p.requireExpression(b)
})
}
e.Index = n
}
p.requireOperator(ast.OpIndexEnd, b)
})
return e
}
// lhs '(' [ expression { ',' expression } ] ')'
func (p *parser) call(lhs ast.Node) *ast.Call {
if !p.peekOperator(ast.OpListStart) {
return nil
}
e := &ast.Call{Target: lhs}
p.Extend(p.mappings.CST(lhs), func(b *cst.Branch) {
p.mappings.Add(e, b)
p.requireOperator(ast.OpListStart, b)
for !p.operator(ast.OpListEnd, b) {
arg := p.requireExpression(b)
if i, ok := arg.(*ast.Generic); ok && p.operator(ast.OpInitialise, b) {
n := &ast.NamedArg{Name: i.Name}
p.ParseBranch(b, func(b *cst.Branch) {
p.mappings.Add(n, b)
n.Value = p.requireExpression(b)
})
arg = n
}
e.Arguments = append(e.Arguments, arg)
if p.operator(ast.OpListEnd, b) {
break
}
p.requireOperator(ast.OpListSeparator, b)
}
})
return e
}
// lhs '.' identifier
func (p *parser) member(lhs ast.Node) *ast.Member {
if !p.peekOperator(ast.OpMember) {
return nil
}
e := &ast.Member{Object: lhs}
p.Extend(p.mappings.CST(lhs), func(b *cst.Branch) {
p.mappings.Add(e, b)
p.requireOperator(ast.OpMember, b)
e.Name = p.requireIdentifier(b)
})
return e
}
|
package main
import "fmt"
func main() {
var stack []int
stack = append(stack, 1)
stack = append(stack, 2)
fmt.Printf("stack: %v\n", stack)
top := stack[len(stack)-1]
fmt.Printf("Top is: %d\n", top)
stack = remove(stack, 1)
fmt.Printf("stack: %v\n", stack)
stack = append(stack, 3, 4, 5, 6)
fmt.Printf("stack: %v\n", stack)
stack = removeNotKeepSort(stack, 2)
fmt.Printf("stack: %v\n", stack)
}
// 删除 slice 中间的某个元素并保存原有的元素顺序
func remove(slice []int, i int) []int {
copy(slice[i:], slice[i+1:])
return slice[:len(slice)-1]
}
// 删除 slice 中间的某个元素 不保存原有顺序,将最后一位直接覆盖到被移除的元素上
func removeNotKeepSort(slice []int, i int) []int {
slice[i] = slice[len(slice)-1]
return slice[:len(slice)-1 ]
}
|
package chconn
type ChErrorType int32
const (
ChErrorOk ChErrorType = 0 // OK
ChErrorUnsupportedMethod ChErrorType = 1 // UNSUPPORTED_METHOD
ChErrorUnsupportedParameter ChErrorType = 2 // UNSUPPORTED_PARAMETER
ChErrorUnexpectedEndOfFile ChErrorType = 3 // UNEXPECTED_END_OF_FILE
ChErrorExpectedEndOfFile ChErrorType = 4 // EXPECTED_END_OF_FILE
ChErrorCannotParseText ChErrorType = 6 // CANNOT_PARSE_TEXT
ChErrorIncorrectNumberOfColumns ChErrorType = 7 // INCORRECT_NUMBER_OF_COLUMNS
ChErrorThereIsNoColumn ChErrorType = 8 // THERE_IS_NO_COLUMN
ChErrorSizesOfColumnsDoesntMatch ChErrorType = 9 // SIZES_OF_COLUMNS_DOESNT_MATCH
ChErrorNotFoundColumnInBlock ChErrorType = 10 // NOT_FOUND_COLUMN_IN_BLOCK
ChErrorPositionOutOfBound ChErrorType = 11 // POSITION_OUT_OF_BOUND
ChErrorParameterOutOfBound ChErrorType = 12 // PARAMETER_OUT_OF_BOUND
ChErrorSizesOfColumnsInTupleDoesntMatch ChErrorType = 13 // SIZES_OF_COLUMNS_IN_TUPLE_DOESNT_MATCH
ChErrorDuplicateColumn ChErrorType = 15 // DUPLICATE_COLUMN
ChErrorNoSuchColumnInTable ChErrorType = 16 // NO_SUCH_COLUMN_IN_TABLE
ChErrorDelimiterInStringLiteralDoesntMatch ChErrorType = 17 // DELIMITER_IN_STRING_LITERAL_DOESNT_MATCH
ChErrorCannotInsertElementIntoConstantColumn ChErrorType = 18 // CANNOT_INSERT_ELEMENT_INTO_CONSTANT_COLUMN
ChErrorSizeOfFixedStringDoesntMatch ChErrorType = 19 // SIZE_OF_FIXED_STRING_DOESNT_MATCH
ChErrorNumberOfColumnsDoesntMatch ChErrorType = 20 // NUMBER_OF_COLUMNS_DOESNT_MATCH
ChErrorCannotReadAllDataFromTabSeparatedInput ChErrorType = 21 // CANNOT_READ_ALL_DATA_FROM_TAB_SEPARATED_INPUT
ChErrorCannotParseAllValueFromTabSeparatedInput ChErrorType = 22 // CANNOT_PARSE_ALL_VALUE_FROM_TAB_SEPARATED_INPUT
ChErrorCannotReadFromIstream ChErrorType = 23 // CANNOT_READ_FROM_ISTREAM
ChErrorCannotWriteToOstream ChErrorType = 24 // CANNOT_WRITE_TO_OSTREAM
ChErrorCannotParseEscapeSequence ChErrorType = 25 // CANNOT_PARSE_ESCAPE_SEQUENCE
ChErrorCannotParseQuotedString ChErrorType = 26 // CANNOT_PARSE_QUOTED_STRING
ChErrorCannotParseInputAssertionFailed ChErrorType = 27 // CANNOT_PARSE_INPUT_ASSERTION_FAILED
ChErrorCannotPrintFloatOrDoubleNumber ChErrorType = 28 // CANNOT_PRINT_FLOAT_OR_DOUBLE_NUMBER
ChErrorCannotPrintInteger ChErrorType = 29 // CANNOT_PRINT_INTEGER
ChErrorCannotReadSizeOfCompressedChunk ChErrorType = 30 // CANNOT_READ_SIZE_OF_COMPRESSED_CHUNK
ChErrorCannotReadCompressedChunk ChErrorType = 31 // CANNOT_READ_COMPRESSED_CHUNK
ChErrorAttemptToReadAfterEOF ChErrorType = 32 // ATTEMPT_TO_READ_AFTER_EOF
ChErrorCannotReadAllData ChErrorType = 33 // CANNOT_READ_ALL_DATA
ChErrorTooManyArgumentsForFunction ChErrorType = 34 // TOO_MANY_ARGUMENTS_FOR_FUNCTION
ChErrorTooFewArgumentsForFunction ChErrorType = 35 // TOO_FEW_ARGUMENTS_FOR_FUNCTION
ChErrorBadArguments ChErrorType = 36 // BAD_ARGUMENTS
ChErrorUnknownElementInAst ChErrorType = 37 // UNKNOWN_ELEMENT_IN_AST
ChErrorCannotParseDate ChErrorType = 38 // CANNOT_PARSE_DATE
ChErrorTooLargeSizeCompressed ChErrorType = 39 // TOO_LARGE_SIZE_COMPRESSED
ChErrorChecksumDoesntMatch ChErrorType = 40 // CHECKSUM_DOESNT_MATCH
ChErrorCannotParseDatetime ChErrorType = 41 // CANNOT_PARSE_DATETIME
ChErrorNumberOfArgumentsDoesntMatch ChErrorType = 42 // NUMBER_OF_ARGUMENTS_DOESNT_MATCH
ChErrorIllegalTypeOfArgument ChErrorType = 43 // ILLEGAL_TYPE_OF_ARGUMENT
ChErrorIllegalColumn ChErrorType = 44 // ILLEGAL_COLUMN
ChErrorIllegalNumberOfResultColumns ChErrorType = 45 // ILLEGAL_NUMBER_OF_RESULT_COLUMNS
ChErrorUnknownFunction ChErrorType = 46 // UNKNOWN_FUNCTION
ChErrorUnknownIdentifier ChErrorType = 47 // UNKNOWN_IDENTIFIER
ChErrorNotImplemented ChErrorType = 48 // NOT_IMPLEMENTED
ChErrorLogicalError ChErrorType = 49 // LOGICAL_ERROR
ChErrorUnknownType ChErrorType = 50 // UNKNOWN_TYPE
ChErrorEmptyListOfColumnsQueried ChErrorType = 51 // EMPTY_LIST_OF_COLUMNS_QUERIED
ChErrorColumnQueriedMoreThanOnce ChErrorType = 52 // COLUMN_QUERIED_MORE_THAN_ONCE
ChErrorTypeMismatch ChErrorType = 53 // TYPE_MISMATCH
ChErrorStorageDoesntAllowParameters ChErrorType = 54 // STORAGE_DOESNT_ALLOW_PARAMETERS
ChErrorStorageRequiresParameter ChErrorType = 55 // STORAGE_REQUIRES_PARAMETER
ChErrorUnknownStorage ChErrorType = 56 // UNKNOWN_STORAGE
ChErrorTableAlreadyExists ChErrorType = 57 // TABLE_ALREADY_EXISTS
ChErrorTableMetadataAlreadyExists ChErrorType = 58 // TABLE_METADATA_ALREADY_EXISTS
ChErrorIllegalTypeOfColumnForFilter ChErrorType = 59 // ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER
ChErrorUnknownTable ChErrorType = 60 // UNKNOWN_TABLE
ChErrorOnlyFilterColumnInBlock ChErrorType = 61 // ONLY_FILTER_COLUMN_IN_BLOCK
ChErrorSyntaxError ChErrorType = 62 // SYNTAX_ERROR
ChErrorUnknownAggregateFunction ChErrorType = 63 // UNKNOWN_AGGREGATE_FUNCTION
ChErrorCannotReadAggregateFunctionFromText ChErrorType = 64 // CANNOT_READ_AGGREGATE_FUNCTION_FROM_TEXT
ChErrorCannotWriteAggregateFunctionAsText ChErrorType = 65 // CANNOT_WRITE_AGGREGATE_FUNCTION_AS_TEXT
ChErrorNotAColumn ChErrorType = 66 // NOT_A_COLUMN
ChErrorIllegalKeyOfAggregation ChErrorType = 67 // ILLEGAL_KEY_OF_AGGREGATION
ChErrorCannotGetSizeOfField ChErrorType = 68 // CANNOT_GET_SIZE_OF_FIELD
ChErrorArgumentOutOfBound ChErrorType = 69 // ARGUMENT_OUT_OF_BOUND
ChErrorCannotConvertType ChErrorType = 70 // CANNOT_CONVERT_TYPE
ChErrorCannotWriteAfterEndOfBuffer ChErrorType = 71 // CANNOT_WRITE_AFTER_END_OF_BUFFER
ChErrorCannotParseNumber ChErrorType = 72 // CANNOT_PARSE_NUMBER
ChErrorUnknownFormat ChErrorType = 73 // UNKNOWN_FORMAT
ChErrorCannotReadFromFileDescriptor ChErrorType = 74 // CANNOT_READ_FROM_FILE_DESCRIPTOR
ChErrorCannotWriteToFileDescriptor ChErrorType = 75 // CANNOT_WRITE_TO_FILE_DESCRIPTOR
ChErrorCannotOpenFile ChErrorType = 76 // CANNOT_OPEN_FILE
ChErrorCannotCloseFile ChErrorType = 77 // CANNOT_CLOSE_FILE
ChErrorUnknownTypeOfQuery ChErrorType = 78 // UNKNOWN_TYPE_OF_QUERY
ChErrorIncorrectFileName ChErrorType = 79 // INCORRECT_FILE_NAME
ChErrorIncorrectQuery ChErrorType = 80 // INCORRECT_QUERY
ChErrorUnknownDatabase ChErrorType = 81 // UNKNOWN_DATABASE
ChErrorDatabaseAlreadyExists ChErrorType = 82 // DATABASE_ALREADY_EXISTS
ChErrorDirectoryDoesntExist ChErrorType = 83 // DIRECTORY_DOESNT_EXIST
ChErrorDirectoryAlreadyExists ChErrorType = 84 // DIRECTORY_ALREADY_EXISTS
ChErrorFormatIsNotSuitableForInput ChErrorType = 85 // FORMAT_IS_NOT_SUITABLE_FOR_INPUT
ChErrorReceivedErrorFromRemoteIoServer ChErrorType = 86 // RECEIVED_ERROR_FROM_REMOTE_IO_SERVER
ChErrorCannotSeekThroughFile ChErrorType = 87 // CANNOT_SEEK_THROUGH_FILE
ChErrorCannotTruncateFile ChErrorType = 88 // CANNOT_TRUNCATE_FILE
ChErrorUnknownCompressionMethod ChErrorType = 89 // UNKNOWN_COMPRESSION_METHOD
ChErrorEmptyListOfColumnsPassed ChErrorType = 90 // EMPTY_LIST_OF_COLUMNS_PASSED
ChErrorSizesOfMarksFilesAreInconsistent ChErrorType = 91 // SIZES_OF_MARKS_FILES_ARE_INCONSISTENT
ChErrorEmptyDataPassed ChErrorType = 92 // EMPTY_DATA_PASSED
ChErrorUnknownAggregatedDataVariant ChErrorType = 93 // UNKNOWN_AGGREGATED_DATA_VARIANT
ChErrorCannotMergeDifferentAggregatedDataVariants ChErrorType = 94 // CANNOT_MERGE_DIFFERENT_AGGREGATED_DATA_VARIANTS
ChErrorCannotReadFromSocket ChErrorType = 95 // CANNOT_READ_FROM_SOCKET
ChErrorCannotWriteToSocket ChErrorType = 96 // CANNOT_WRITE_TO_SOCKET
ChErrorCannotReadAllDataFromChunkedInput ChErrorType = 97 // CANNOT_READ_ALL_DATA_FROM_CHUNKED_INPUT
ChErrorCannotWriteToEmptyBlockOutputStream ChErrorType = 98 // CANNOT_WRITE_TO_EMPTY_BLOCK_OUTPUT_STREAM
ChErrorUnknownPacketFromClient ChErrorType = 99 // UNKNOWN_PACKET_FROM_CLIENT
ChErrorUnknownPacketFromServer ChErrorType = 100 // UNKNOWN_PACKET_FROM_SERVER
ChErrorUnexpectedPacketFromClient ChErrorType = 101 // UNEXPECTED_PACKET_FROM_CLIENT
ChErrorUnexpectedPacketFromServer ChErrorType = 102 // UNEXPECTED_PACKET_FROM_SERVER
ChErrorReceivedDataForWrongQueryID ChErrorType = 103 // RECEIVED_DATA_FOR_WRONG_QUERY_ID
ChErrorTooSmallBufferSize ChErrorType = 104 // TOO_SMALL_BUFFER_SIZE
ChErrorCannotReadHistory ChErrorType = 105 // CANNOT_READ_HISTORY
ChErrorCannotAppendHistory ChErrorType = 106 // CANNOT_APPEND_HISTORY
ChErrorFileDoesntExist ChErrorType = 107 // FILE_DOESNT_EXIST
ChErrorNoDataToInsert ChErrorType = 108 // NO_DATA_TO_INSERT
ChErrorCannotBlockSignal ChErrorType = 109 // CANNOT_BLOCK_SIGNAL
ChErrorCannotUnblockSignal ChErrorType = 110 // CANNOT_UNBLOCK_SIGNAL
ChErrorCannotManipulateSigset ChErrorType = 111 // CANNOT_MANIPULATE_SIGSET
ChErrorCannotWaitForSignal ChErrorType = 112 // CANNOT_WAIT_FOR_SIGNAL
ChErrorThereIsNoSession ChErrorType = 113 // THERE_IS_NO_SESSION
ChErrorCannotClockGettime ChErrorType = 114 // CANNOT_CLOCK_GETTIME
ChErrorUnknownSetting ChErrorType = 115 // UNKNOWN_SETTING
ChErrorThereIsNoDefaultValue ChErrorType = 116 // THERE_IS_NO_DEFAULT_VALUE
ChErrorIncorrectData ChErrorType = 117 // INCORRECT_DATA
ChErrorEngineRequired ChErrorType = 119 // ENGINE_REQUIRED
ChErrorCannotInsertValueOfDifferentSizeIntoTuple ChErrorType = 120 // CANNOT_INSERT_VALUE_OF_DIFFERENT_SIZE_INTO_TUPLE
ChErrorUnsupportedJoinKeys ChErrorType = 121 // UNSUPPORTED_JOIN_KEYS
ChErrorIncompatibleColumns ChErrorType = 122 // INCOMPATIBLE_COLUMNS
ChErrorUnknownTypeOfAstNode ChErrorType = 123 // UNKNOWN_TYPE_OF_AST_NODE
ChErrorIncorrectElementOfSet ChErrorType = 124 // INCORRECT_ELEMENT_OF_SET
ChErrorIncorrectResultOfScalarSubquery ChErrorType = 125 // INCORRECT_RESULT_OF_SCALAR_SUBQUERY
ChErrorCannotGetReturnType ChErrorType = 126 // CANNOT_GET_RETURN_TYPE
ChErrorIllegalIndex ChErrorType = 127 // ILLEGAL_INDEX
ChErrorTooLargeArraySize ChErrorType = 128 // TOO_LARGE_ARRAY_SIZE
ChErrorFunctionIsSpecial ChErrorType = 129 // FUNCTION_IS_SPECIAL
ChErrorCannotReadArrayFromText ChErrorType = 130 // CANNOT_READ_ARRAY_FROM_TEXT
ChErrorTooLargeStringSize ChErrorType = 131 // TOO_LARGE_STRING_SIZE
ChErrorAggregateFunctionDoesntAllowParameters ChErrorType = 133 // AGGREGATE_FUNCTION_DOESNT_ALLOW_PARAMETERS
ChErrorParametersToAggregateFunctionsMustBeLiterals ChErrorType = 134 // PARAMETERS_TO_AGGREGATE_FUNCTIONS_MUST_BE_LITERALS
ChErrorZeroArrayOrTupleIndex ChErrorType = 135 // ZERO_ARRAY_OR_TUPLE_INDEX
ChErrorUnknownElementInConfig ChErrorType = 137 // UNKNOWN_ELEMENT_IN_CONFIG
ChErrorExcessiveElementInConfig ChErrorType = 138 // EXCESSIVE_ELEMENT_IN_CONFIG
ChErrorNoElementsInConfig ChErrorType = 139 // NO_ELEMENTS_IN_CONFIG
ChErrorAllRequestedColumnsAreMissing ChErrorType = 140 // ALL_REQUESTED_COLUMNS_ARE_MISSING
ChErrorSamplingNotSupported ChErrorType = 141 // SAMPLING_NOT_SUPPORTED
ChErrorNotFoundNode ChErrorType = 142 // NOT_FOUND_NODE
ChErrorFoundMoreThanOneNode ChErrorType = 143 // FOUND_MORE_THAN_ONE_NODE
ChErrorFirstDateIsBiggerThanLastDate ChErrorType = 144 // FIRST_DATE_IS_BIGGER_THAN_LAST_DATE
ChErrorUnknownOverflowMode ChErrorType = 145 // UNKNOWN_OVERFLOW_MODE
ChErrorQuerySectionDoesntMakeSense ChErrorType = 146 // QUERY_SECTION_DOESNT_MAKE_SENSE
ChErrorNotFoundFunctionElementForAggregate ChErrorType = 147 // NOT_FOUND_FUNCTION_ELEMENT_FOR_AGGREGATE
ChErrorNotFoundRelationElementForCondition ChErrorType = 148 // NOT_FOUND_RELATION_ELEMENT_FOR_CONDITION
ChErrorNotFoundRhsElementForCondition ChErrorType = 149 // NOT_FOUND_RHS_ELEMENT_FOR_CONDITION
ChErrorEmptyListOfAttributesPassed ChErrorType = 150 // EMPTY_LIST_OF_ATTRIBUTES_PASSED
ChErrorIndexOfColumnInSortClauseIsOutOfRange ChErrorType = 151 // INDEX_OF_COLUMN_IN_SORT_CLAUSE_IS_OUT_OF_RANGE
ChErrorUnknownDirectionOfSorting ChErrorType = 152 // UNKNOWN_DIRECTION_OF_SORTING
ChErrorIllegalDivision ChErrorType = 153 // ILLEGAL_DIVISION
ChErrorAggregateFunctionNotApplicable ChErrorType = 154 // AGGREGATE_FUNCTION_NOT_APPLICABLE
ChErrorUnknownRelation ChErrorType = 155 // UNKNOWN_RELATION
ChErrorDictionariesWasNotLoaded ChErrorType = 156 // DICTIONARIES_WAS_NOT_LOADED
ChErrorIllegalOverflowMode ChErrorType = 157 // ILLEGAL_OVERFLOW_MODE
ChErrorTooManyRows ChErrorType = 158 // TOO_MANY_ROWS
ChErrorTimeoutExceeded ChErrorType = 159 // TIMEOUT_EXCEEDED
ChErrorTooSlow ChErrorType = 160 // TOO_SLOW
ChErrorTooManyColumns ChErrorType = 161 // TOO_MANY_COLUMNS
ChErrorTooDeepSubqueries ChErrorType = 162 // TOO_DEEP_SUBQUERIES
ChErrorTooDeepPipeline ChErrorType = 163 // TOO_DEEP_PIPELINE
ChErrorReadonly ChErrorType = 164 // READONLY
ChErrorTooManyTemporaryColumns ChErrorType = 165 // TOO_MANY_TEMPORARY_COLUMNS
ChErrorTooManyTemporaryNonConstColumns ChErrorType = 166 // TOO_MANY_TEMPORARY_NON_CONST_COLUMNS
ChErrorTooDeepAst ChErrorType = 167 // TOO_DEEP_AST
ChErrorTooBigAst ChErrorType = 168 // TOO_BIG_AST
ChErrorBadTypeOfField ChErrorType = 169 // BAD_TYPE_OF_FIELD
ChErrorBadGet ChErrorType = 170 // BAD_GET
ChErrorCannotCreateDirectory ChErrorType = 172 // CANNOT_CREATE_DIRECTORY
ChErrorCannotAllocateMemory ChErrorType = 173 // CANNOT_ALLOCATE_MEMORY
ChErrorCyclicAliases ChErrorType = 174 // CYCLIC_ALIASES
ChErrorChunkNotFound ChErrorType = 176 // CHUNK_NOT_FOUND
ChErrorDuplicateChunkName ChErrorType = 177 // DUPLICATE_CHUNK_NAME
ChErrorMultipleAliasesForExpression ChErrorType = 178 // MULTIPLE_ALIASES_FOR_EXPRESSION
ChErrorMultipleExpressionsForAlias ChErrorType = 179 // MULTIPLE_EXPRESSIONS_FOR_ALIAS
ChErrorThereIsNoProfile ChErrorType = 180 // THERE_IS_NO_PROFILE
ChErrorIllegalFinal ChErrorType = 181 // ILLEGAL_FINAL
ChErrorIllegalPrewhere ChErrorType = 182 // ILLEGAL_PREWHERE
ChErrorUnexpectedExpression ChErrorType = 183 // UNEXPECTED_EXPRESSION
ChErrorIllegalAggregation ChErrorType = 184 // ILLEGAL_AGGREGATION
ChErrorUnsupportedMyisamBlockType ChErrorType = 185 // UNSUPPORTED_MYISAM_BLOCK_TYPE
ChErrorUnsupportedCollationLocale ChErrorType = 186 // UNSUPPORTED_COLLATION_LOCALE
ChErrorCollationComparisonFailed ChErrorType = 187 // COLLATION_COMPARISON_FAILED
ChErrorUnknownAction ChErrorType = 188 // UNKNOWN_ACTION
ChErrorTableMustNotBeCreatedManually ChErrorType = 189 // TABLE_MUST_NOT_BE_CREATED_MANUALLY
ChErrorSizesOfArraysDoesntMatch ChErrorType = 190 // SIZES_OF_ARRAYS_DOESNT_MATCH
ChErrorSetSizeLimitExceeded ChErrorType = 191 // SET_SIZE_LIMIT_EXCEEDED
ChErrorUnknownUser ChErrorType = 192 // UNKNOWN_USER
ChErrorWrongPassword ChErrorType = 193 // WRONG_PASSWORD
ChErrorRequiredPassword ChErrorType = 194 // REQUIRED_PASSWORD
ChErrorIPAddressNotAllowed ChErrorType = 195 // IP_ADDRESS_NOT_ALLOWED
ChErrorUnknownAddressPatternType ChErrorType = 196 // UNKNOWN_ADDRESS_PATTERN_TYPE
ChErrorServerRevisionIsTooOld ChErrorType = 197 // SERVER_REVISION_IS_TOO_OLD
ChErrorDNSError ChErrorType = 198 // DNS_ERROR
ChErrorUnknownQuota ChErrorType = 199 // UNKNOWN_QUOTA
ChErrorQuotaDoesntAllowKeys ChErrorType = 200 // QUOTA_DOESNT_ALLOW_KEYS
ChErrorQuotaExpired ChErrorType = 201 // QUOTA_EXPIRED
ChErrorTooManySimultaneousQueries ChErrorType = 202 // TOO_MANY_SIMULTANEOUS_QUERIES
ChErrorNoFreeConnection ChErrorType = 203 // NO_FREE_CONNECTION
ChErrorCannotFsync ChErrorType = 204 // CANNOT_FSYNC
ChErrorNestedTypeTooDeep ChErrorType = 205 // NESTED_TYPE_TOO_DEEP
ChErrorAliasRequired ChErrorType = 206 // ALIAS_REQUIRED
ChErrorAmbiguousIdentifier ChErrorType = 207 // AMBIGUOUS_IDENTIFIER
ChErrorEmptyNestedTable ChErrorType = 208 // EMPTY_NESTED_TABLE
ChErrorSocketTimeout ChErrorType = 209 // SOCKET_TIMEOUT
ChErrorNetworkError ChErrorType = 210 // NETWORK_ERROR
ChErrorEmptyQuery ChErrorType = 211 // EMPTY_QUERY
ChErrorUnknownLoadBalancing ChErrorType = 212 // UNKNOWN_LOAD_BALANCING
ChErrorUnknownTotalsMode ChErrorType = 213 // UNKNOWN_TOTALS_MODE
ChErrorCannotStatvfs ChErrorType = 214 // CANNOT_STATVFS
ChErrorNotAnAggregate ChErrorType = 215 // NOT_AN_AGGREGATE
ChErrorQueryWithSameIDIsAlreadyRunning ChErrorType = 216 // QUERY_WITH_SAME_ID_IS_ALREADY_RUNNING
ChErrorClientHasConnectedToWrongPort ChErrorType = 217 // CLIENT_HAS_CONNECTED_TO_WRONG_PORT
ChErrorTableIsDropped ChErrorType = 218 // TABLE_IS_DROPPED
ChErrorDatabaseNotEmpty ChErrorType = 219 // DATABASE_NOT_EMPTY
ChErrorDuplicateInterserverIoEndpoint ChErrorType = 220 // DUPLICATE_INTERSERVER_IO_ENDPOINT
ChErrorNoSuchInterserverIoEndpoint ChErrorType = 221 // NO_SUCH_INTERSERVER_IO_ENDPOINT
ChErrorAddingReplicaToNonEmptyTable ChErrorType = 222 // ADDING_REPLICA_TO_NON_EMPTY_TABLE
ChErrorUnexpectedAstStructure ChErrorType = 223 // UNEXPECTED_AST_STRUCTURE
ChErrorReplicaIsAlreadyActive ChErrorType = 224 // REPLICA_IS_ALREADY_ACTIVE
ChErrorNoZookeeper ChErrorType = 225 // NO_ZOOKEEPER
ChErrorNoFileInDataPart ChErrorType = 226 // NO_FILE_IN_DATA_PART
ChErrorUnexpectedFileInDataPart ChErrorType = 227 // UNEXPECTED_FILE_IN_DATA_PART
ChErrorBadSizeOfFileInDataPart ChErrorType = 228 // BAD_SIZE_OF_FILE_IN_DATA_PART
ChErrorQueryIsTooLarge ChErrorType = 229 // QUERY_IS_TOO_LARGE
ChErrorNotFoundExpectedDataPart ChErrorType = 230 // NOT_FOUND_EXPECTED_DATA_PART
ChErrorTooManyUnexpectedDataParts ChErrorType = 231 // TOO_MANY_UNEXPECTED_DATA_PARTS
ChErrorNoSuchDataPart ChErrorType = 232 // NO_SUCH_DATA_PART
ChErrorBadDataPartName ChErrorType = 233 // BAD_DATA_PART_NAME
ChErrorNoReplicaHasPart ChErrorType = 234 // NO_REPLICA_HAS_PART
ChErrorDuplicateDataPart ChErrorType = 235 // DUPLICATE_DATA_PART
ChErrorAborted ChErrorType = 236 // ABORTED
ChErrorNoReplicaNameGiven ChErrorType = 237 // NO_REPLICA_NAME_GIVEN
ChErrorFormatVersionTooOld ChErrorType = 238 // FORMAT_VERSION_TOO_OLD
ChErrorCannotMunmap ChErrorType = 239 // CANNOT_MUNMAP
ChErrorCannotMremap ChErrorType = 240 // CANNOT_MREMAP
ChErrorMemoryLimitExceeded ChErrorType = 241 // MEMORY_LIMIT_EXCEEDED
ChErrorTableIsReadOnly ChErrorType = 242 // TABLE_IS_READ_ONLY
ChErrorNotEnoughSpace ChErrorType = 243 // NOT_ENOUGH_SPACE
ChErrorUnexpectedZookeeperError ChErrorType = 244 // UNEXPECTED_ZOOKEEPER_ERROR
ChErrorCorruptedData ChErrorType = 246 // CORRUPTED_DATA
ChErrorIncorrectMark ChErrorType = 247 // INCORRECT_MARK
ChErrorInvalidPartitionValue ChErrorType = 248 // INVALID_PARTITION_VALUE
ChErrorNotEnoughBlockNumbers ChErrorType = 250 // NOT_ENOUGH_BLOCK_NUMBERS
ChErrorNoSuchReplica ChErrorType = 251 // NO_SUCH_REPLICA
ChErrorTooManyParts ChErrorType = 252 // TOO_MANY_PARTS
ChErrorReplicaIsAlreadyExist ChErrorType = 253 // REPLICA_IS_ALREADY_EXIST
ChErrorNoActiveReplicas ChErrorType = 254 // NO_ACTIVE_REPLICAS
ChErrorTooManyRetriesToFetchParts ChErrorType = 255 // TOO_MANY_RETRIES_TO_FETCH_PARTS
ChErrorPartitionAlreadyExists ChErrorType = 256 // PARTITION_ALREADY_EXISTS
ChErrorPartitionDoesntExist ChErrorType = 257 // PARTITION_DOESNT_EXIST
ChErrorUnionAllResultStructuresMismatch ChErrorType = 258 // UNION_ALL_RESULT_STRUCTURES_MISMATCH
ChErrorClientOutputFormatSpecified ChErrorType = 260 // CLIENT_OUTPUT_FORMAT_SPECIFIED
ChErrorUnknownBlockInfoField ChErrorType = 261 // UNKNOWN_BLOCK_INFO_FIELD
ChErrorBadCollation ChErrorType = 262 // BAD_COLLATION
ChErrorCannotCompileCode ChErrorType = 263 // CANNOT_COMPILE_CODE
ChErrorIncompatibleTypeOfJoin ChErrorType = 264 // INCOMPATIBLE_TYPE_OF_JOIN
ChErrorNoAvailableReplica ChErrorType = 265 // NO_AVAILABLE_REPLICA
ChErrorMismatchReplicasDataSources ChErrorType = 266 // MISMATCH_REPLICAS_DATA_SOURCES
ChErrorStorageDoesntSupportParallelReplicas ChErrorType = 267 // STORAGE_DOESNT_SUPPORT_PARALLEL_REPLICAS
ChErrorCpuidError ChErrorType = 268 // CPUID_ERROR
ChErrorInfiniteLoop ChErrorType = 269 // INFINITE_LOOP
ChErrorCannotCompress ChErrorType = 270 // CANNOT_COMPRESS
ChErrorCannotDecompress ChErrorType = 271 // CANNOT_DECOMPRESS
ChErrorCannotIoSubmit ChErrorType = 272 // CANNOT_IO_SUBMIT
ChErrorCannotIoGetevents ChErrorType = 273 // CANNOT_IO_GETEVENTS
ChErrorAioReadError ChErrorType = 274 // AIO_READ_ERROR
ChErrorAioWriteError ChErrorType = 275 // AIO_WRITE_ERROR
ChErrorIndexNotUsed ChErrorType = 277 // INDEX_NOT_USED
ChErrorAllConnectionTriesFailed ChErrorType = 279 // ALL_CONNECTION_TRIES_FAILED
ChErrorNoAvailableData ChErrorType = 280 // NO_AVAILABLE_DATA
ChErrorDictionaryIsEmpty ChErrorType = 281 // DICTIONARY_IS_EMPTY
ChErrorIncorrectIndex ChErrorType = 282 // INCORRECT_INDEX
ChErrorUnknownDistributedProductMode ChErrorType = 283 // UNKNOWN_DISTRIBUTED_PRODUCT_MODE
ChErrorWrongGlobalSubquery ChErrorType = 284 // WRONG_GLOBAL_SUBQUERY
ChErrorTooFewLiveReplicas ChErrorType = 285 // TOO_FEW_LIVE_REPLICAS
ChErrorUnsatisfiedQuorumForPreviousWrite ChErrorType = 286 // UNSATISFIED_QUORUM_FOR_PREVIOUS_WRITE
ChErrorUnknownFormatVersion ChErrorType = 287 // UNKNOWN_FORMAT_VERSION
ChErrorDistributedInJoinSubqueryDenied ChErrorType = 288 // DISTRIBUTED_IN_JOIN_SUBQUERY_DENIED
ChErrorReplicaIsNotInQuorum ChErrorType = 289 // REPLICA_IS_NOT_IN_QUORUM
ChErrorLimitExceeded ChErrorType = 290 // LIMIT_EXCEEDED
ChErrorDatabaseAccessDenied ChErrorType = 291 // DATABASE_ACCESS_DENIED
ChErrorMongodbCannotAuthenticate ChErrorType = 293 // MONGODB_CANNOT_AUTHENTICATE
ChErrorInvalidBlockExtraInfo ChErrorType = 294 // INVALID_BLOCK_EXTRA_INFO
ChErrorReceivedEmptyData ChErrorType = 295 // RECEIVED_EMPTY_DATA
ChErrorNoRemoteShardFound ChErrorType = 296 // NO_REMOTE_SHARD_FOUND
ChErrorShardHasNoConnections ChErrorType = 297 // SHARD_HAS_NO_CONNECTIONS
ChErrorCannotPipe ChErrorType = 298 // CANNOT_PIPE
ChErrorCannotFork ChErrorType = 299 // CANNOT_FORK
ChErrorCannotDlsym ChErrorType = 300 // CANNOT_DLSYM
ChErrorCannotCreateChildProcess ChErrorType = 301 // CANNOT_CREATE_CHILD_PROCESS
ChErrorChildWasNotExitedNormally ChErrorType = 302 // CHILD_WAS_NOT_EXITED_NORMALLY
ChErrorCannotSelect ChErrorType = 303 // CANNOT_SELECT
ChErrorCannotWaitpid ChErrorType = 304 // CANNOT_WAITPID
ChErrorTableWasNotDropped ChErrorType = 305 // TABLE_WAS_NOT_DROPPED
ChErrorTooDeepRecursion ChErrorType = 306 // TOO_DEEP_RECURSION
ChErrorTooManyBytes ChErrorType = 307 // TOO_MANY_BYTES
ChErrorUnexpectedNodeInZookeeper ChErrorType = 308 // UNEXPECTED_NODE_IN_ZOOKEEPER
ChErrorFunctionCannotHaveParameters ChErrorType = 309 // FUNCTION_CANNOT_HAVE_PARAMETERS
ChErrorInvalidShardWeight ChErrorType = 317 // INVALID_SHARD_WEIGHT
ChErrorInvalidConfigParameter ChErrorType = 318 // INVALID_CONFIG_PARAMETER
ChErrorUnknownStatusOfInsert ChErrorType = 319 // UNKNOWN_STATUS_OF_INSERT
ChErrorValueIsOutOfRangeOfDataType ChErrorType = 321 // VALUE_IS_OUT_OF_RANGE_OF_DATA_TYPE
ChErrorBarrierTimeout ChErrorType = 335 // BARRIER_TIMEOUT
ChErrorUnknownDatabaseEngine ChErrorType = 336 // UNKNOWN_DATABASE_ENGINE
ChErrorDdlGuardIsActive ChErrorType = 337 // DDL_GUARD_IS_ACTIVE
ChErrorUnfinished ChErrorType = 341 // UNFINISHED
ChErrorMetadataMismatch ChErrorType = 342 // METADATA_MISMATCH
ChErrorSupportIsDisabled ChErrorType = 344 // SUPPORT_IS_DISABLED
ChErrorTableDiffersTooMuch ChErrorType = 345 // TABLE_DIFFERS_TOO_MUCH
ChErrorCannotConvertCharset ChErrorType = 346 // CANNOT_CONVERT_CHARSET
ChErrorCannotLoadConfig ChErrorType = 347 // CANNOT_LOAD_CONFIG
ChErrorCannotInsertNullInOrdinaryColumn ChErrorType = 349 // CANNOT_INSERT_NULL_IN_ORDINARY_COLUMN
ChErrorIncompatibleSourceTables ChErrorType = 350 // INCOMPATIBLE_SOURCE_TABLES
ChErrorAmbiguousTableName ChErrorType = 351 // AMBIGUOUS_TABLE_NAME
ChErrorAmbiguousColumnName ChErrorType = 352 // AMBIGUOUS_COLUMN_NAME
ChErrorIndexOfPositionalArgumentIsOutOfRange ChErrorType = 353 // INDEX_OF_POSITIONAL_ARGUMENT_IS_OUT_OF_RANGE
ChErrorZlibInflateFailed ChErrorType = 354 // ZLIB_INFLATE_FAILED
ChErrorZlibDeflateFailed ChErrorType = 355 // ZLIB_DEFLATE_FAILED
ChErrorBadLambda ChErrorType = 356 // BAD_LAMBDA
ChErrorReservedIdentifierName ChErrorType = 357 // RESERVED_IDENTIFIER_NAME
ChErrorIntoOutfileNotAllowed ChErrorType = 358 // INTO_OUTFILE_NOT_ALLOWED
ChErrorTableSizeExceedsMaxDropSizeLimit ChErrorType = 359 // TABLE_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT
ChErrorCannotCreateCharsetConverter ChErrorType = 360 // CANNOT_CREATE_CHARSET_CONVERTER
ChErrorSeekPositionOutOfBound ChErrorType = 361 // SEEK_POSITION_OUT_OF_BOUND
ChErrorCurrentWriteBufferIsExhausted ChErrorType = 362 // CURRENT_WRITE_BUFFER_IS_EXHAUSTED
ChErrorCannotCreateIoBuffer ChErrorType = 363 // CANNOT_CREATE_IO_BUFFER
ChErrorReceivedErrorTooManyRequests ChErrorType = 364 // RECEIVED_ERROR_TOO_MANY_REQUESTS
ChErrorSizesOfNestedColumnsAreInconsistent ChErrorType = 366 // SIZES_OF_NESTED_COLUMNS_ARE_INCONSISTENT
ChErrorTooManyFetches ChErrorType = 367 // TOO_MANY_FETCHES
ChErrorAllReplicasAreStale ChErrorType = 369 // ALL_REPLICAS_ARE_STALE
ChErrorDataTypeCannotBeUsedInTables ChErrorType = 370 // DATA_TYPE_CANNOT_BE_USED_IN_TABLES
ChErrorInconsistentClusterDefinition ChErrorType = 371 // INCONSISTENT_CLUSTER_DEFINITION
ChErrorSessionNotFound ChErrorType = 372 // SESSION_NOT_FOUND
ChErrorSessionIsLocked ChErrorType = 373 // SESSION_IS_LOCKED
ChErrorInvalidSessionTimeout ChErrorType = 374 // INVALID_SESSION_TIMEOUT
ChErrorCannotDlopen ChErrorType = 375 // CANNOT_DLOPEN
ChErrorCannotParseUUID ChErrorType = 376 // CANNOT_PARSE_UUID
ChErrorIllegalSyntaxForDataType ChErrorType = 377 // ILLEGAL_SYNTAX_FOR_DATA_TYPE
ChErrorDataTypeCannotHaveArguments ChErrorType = 378 // DATA_TYPE_CANNOT_HAVE_ARGUMENTS
ChErrorUnknownStatusOfDistributedDdlTask ChErrorType = 379 // UNKNOWN_STATUS_OF_DISTRIBUTED_DDL_TASK
ChErrorCannotKill ChErrorType = 380 // CANNOT_KILL
ChErrorHTTPLengthRequired ChErrorType = 381 // HTTP_LENGTH_REQUIRED
ChErrorCannotLoadCatboostModel ChErrorType = 382 // CANNOT_LOAD_CATBOOST_MODEL
ChErrorCannotApplyCatboostModel ChErrorType = 383 // CANNOT_APPLY_CATBOOST_MODEL
ChErrorPartIsTemporarilyLocked ChErrorType = 384 // PART_IS_TEMPORARILY_LOCKED
ChErrorMultipleStreamsRequired ChErrorType = 385 // MULTIPLE_STREAMS_REQUIRED
ChErrorNoCommonType ChErrorType = 386 // NO_COMMON_TYPE
ChErrorDictionaryAlreadyExists ChErrorType = 387 // DICTIONARY_ALREADY_EXISTS
ChErrorCannotAssignOptimize ChErrorType = 388 // CANNOT_ASSIGN_OPTIMIZE
ChErrorInsertWasDeduplicated ChErrorType = 389 // INSERT_WAS_DEDUPLICATED
ChErrorCannotGetCreateTableQuery ChErrorType = 390 // CANNOT_GET_CREATE_TABLE_QUERY
ChErrorExternalLibraryError ChErrorType = 391 // EXTERNAL_LIBRARY_ERROR
ChErrorQueryIsProhibited ChErrorType = 392 // QUERY_IS_PROHIBITED
ChErrorThereIsNoQuery ChErrorType = 393 // THERE_IS_NO_QUERY
ChErrorQueryWasCancelled ChErrorType = 394 // QUERY_WAS_CANCELED
ChErrorFunctionThrowIfValueIsNonZero ChErrorType = 395 // FUNCTION_THROW_IF_VALUE_IS_NON_ZERO
ChErrorTooManyRowsOrBytes ChErrorType = 396 // TOO_MANY_ROWS_OR_BYTES
ChErrorQueryIsNotSupportedInMaterializedView ChErrorType = 397 // QUERY_IS_NOT_SUPPORTED_IN_MATERIALIZED_VIEW
ChErrorUnknownMutationCommand ChErrorType = 398 // UNKNOWN_MUTATION_COMMAND
ChErrorFormatIsNotSuitableForOutput ChErrorType = 399 // FORMAT_IS_NOT_SUITABLE_FOR_OUTPUT
ChErrorCannotStat ChErrorType = 400 // CANNOT_STAT
ChErrorFeatureIsNotEnabledAtBuildTime ChErrorType = 401 // FEATURE_IS_NOT_ENABLED_AT_BUILD_TIME
ChErrorCannotIosetup ChErrorType = 402 // CANNOT_IOSETUP
ChErrorInvalidJoinOnExpression ChErrorType = 403 // INVALID_JOIN_ON_EXPRESSION
ChErrorBadOdbcConnectionString ChErrorType = 404 // BAD_ODBC_CONNECTION_STRING
ChErrorPartitionSizeExceedsMaxDropSizeLimit ChErrorType = 405 // PARTITION_SIZE_EXCEEDS_MAX_DROP_SIZE_LIMIT
ChErrorTopAndLimitTogether ChErrorType = 406 // TOP_AND_LIMIT_TOGETHER
ChErrorDecimalOverflow ChErrorType = 407 // DECIMAL_OVERFLOW
ChErrorBadRequestParameter ChErrorType = 408 // BAD_REQUEST_PARAMETER
ChErrorExternalExecutableNotFound ChErrorType = 409 // EXTERNAL_EXECUTABLE_NOT_FOUND
ChErrorExternalServerIsNotResponding ChErrorType = 410 // EXTERNAL_SERVER_IS_NOT_RESPONDING
ChErrorPthreadError ChErrorType = 411 // PTHREAD_ERROR
ChErrorNetlinkError ChErrorType = 412 // NETLINK_ERROR
ChErrorCannotSetSignalHandler ChErrorType = 413 // CANNOT_SET_SIGNAL_HANDLER
ChErrorAllReplicasLost ChErrorType = 415 // ALL_REPLICAS_LOST
ChErrorReplicaStatusChanged ChErrorType = 416 // REPLICA_STATUS_CHANGED
ChErrorExpectedAllOrAny ChErrorType = 417 // EXPECTED_ALL_OR_ANY
ChErrorUnknownJoin ChErrorType = 418 // UNKNOWN_JOIN
ChErrorMultipleAssignmentsToColumn ChErrorType = 419 // MULTIPLE_ASSIGNMENTS_TO_COLUMN
ChErrorCannotUpdateColumn ChErrorType = 420 // CANNOT_UPDATE_COLUMN
ChErrorCannotAddDifferentAggregateStates ChErrorType = 421 // CANNOT_ADD_DIFFERENT_AGGREGATE_STATES
ChErrorUnsupportedURIScheme ChErrorType = 422 // UNSUPPORTED_URI_SCHEME
ChErrorCannotGettimeofday ChErrorType = 423 // CANNOT_GETTIMEOFDAY
ChErrorCannotLink ChErrorType = 424 // CANNOT_LINK
ChErrorSystemError ChErrorType = 425 // SYSTEM_ERROR
ChErrorCannotCompileRegexp ChErrorType = 427 // CANNOT_COMPILE_REGEXP
ChErrorUnknownLogLevel ChErrorType = 428 // UNKNOWN_LOG_LEVEL
ChErrorFailedToGetpwuid ChErrorType = 429 // FAILED_TO_GETPWUID
ChErrorMismatchingUsersForProcessAndData ChErrorType = 430 // MISMATCHING_USERS_FOR_PROCESS_AND_DATA
ChErrorIllegalSyntaxForCodecType ChErrorType = 431 // ILLEGAL_SYNTAX_FOR_CODEC_TYPE
ChErrorUnknownCodec ChErrorType = 432 // UNKNOWN_CODEC
ChErrorIllegalCodecParameter ChErrorType = 433 // ILLEGAL_CODEC_PARAMETER
ChErrorCannotParseProtobufSchema ChErrorType = 434 // CANNOT_PARSE_PROTOBUF_SCHEMA
ChErrorNoColumnSerializedToRequiredProtobufField ChErrorType = 435 // NO_COLUMN_SERIALIZED_TO_REQUIRED_PROTOBUF_FIELD
ChErrorProtobufBadCast ChErrorType = 436 // PROTOBUF_BAD_CAST
ChErrorProtobufFieldNotRepeated ChErrorType = 437 // PROTOBUF_FIELD_NOT_REPEATED
ChErrorDataTypeCannotBePromoted ChErrorType = 438 // DATA_TYPE_CANNOT_BE_PROMOTED
ChErrorCannotScheduleTask ChErrorType = 439 // CANNOT_SCHEDULE_TASK
ChErrorInvalidLimitExpression ChErrorType = 440 // INVALID_LIMIT_EXPRESSION
ChErrorCannotParseDomainValueFromString ChErrorType = 441 // CANNOT_PARSE_DOMAIN_VALUE_FROM_STRING
ChErrorBadDatabaseForTemporaryTable ChErrorType = 442 // BAD_DATABASE_FOR_TEMPORARY_TABLE
ChErrorNoColumnsSerializedToProtobufFields ChErrorType = 443 // NO_COLUMNS_SERIALIZED_TO_PROTOBUF_FIELDS
ChErrorUnknownProtobufFormat ChErrorType = 444 // UNKNOWN_PROTOBUF_FORMAT
ChErrorCannotMprotect ChErrorType = 445 // CANNOT_MPROTECT
ChErrorFunctionNotAllowed ChErrorType = 446 // FUNCTION_NOT_ALLOWED
ChErrorHyperscanCannotScanText ChErrorType = 447 // HYPERSCAN_CANNOT_SCAN_TEXT
ChErrorBrotliReadFailed ChErrorType = 448 // BROTLI_READ_FAILED
ChErrorBrotliWriteFailed ChErrorType = 449 // BROTLI_WRITE_FAILED
ChErrorBadTTLExpression ChErrorType = 450 // BAD_TTL_EXPRESSION
ChErrorBadTTLFile ChErrorType = 451 // BAD_TTL_FILE
ChErrorSettingConstraintViolation ChErrorType = 452 // SETTING_CONSTRAINT_VIOLATION
ChErrorMysqlClientInsufficientCapabilities ChErrorType = 453 // MYSQL_CLIENT_INSUFFICIENT_CAPABILITIES
ChErrorOpensslError ChErrorType = 454 // OPENSSL_ERROR
ChErrorSuspiciousTypeForLowCardinality ChErrorType = 455 // SUSPICIOUS_TYPE_FOR_LOW_CARDINALITY
ChErrorUnknownQueryParameter ChErrorType = 456 // UNKNOWN_QUERY_PARAMETER
ChErrorBadQueryParameter ChErrorType = 457 // BAD_QUERY_PARAMETER
ChErrorCannotUnlink ChErrorType = 458 // CANNOT_UNLINK
ChErrorCannotSetThreadPriority ChErrorType = 459 // CANNOT_SET_THREAD_PRIORITY
ChErrorCannotCreateTimer ChErrorType = 460 // CANNOT_CREATE_TIMER
ChErrorCannotSetTimerPeriod ChErrorType = 461 // CANNOT_SET_TIMER_PERIOD
ChErrorCannotDeleteTimer ChErrorType = 462 // CANNOT_DELETE_TIMER
ChErrorCannotFcntl ChErrorType = 463 // CANNOT_FCNTL
ChErrorCannotParseElf ChErrorType = 464 // CANNOT_PARSE_ELF
ChErrorCannotParseDwarf ChErrorType = 465 // CANNOT_PARSE_DWARF
ChErrorInsecurePath ChErrorType = 466 // INSECURE_PATH
ChErrorCannotParseBool ChErrorType = 467 // CANNOT_PARSE_BOOL
ChErrorCannotPthreadAttr ChErrorType = 468 // CANNOT_PTHREAD_ATTR
ChErrorViolatedConstraint ChErrorType = 469 // VIOLATED_CONSTRAINT
ChErrorQueryIsNotSupportedInLiveView ChErrorType = 470 // QUERY_IS_NOT_SUPPORTED_IN_LIVE_VIEW
ChErrorInvalidSettingValue ChErrorType = 471 // INVALID_SETTING_VALUE
ChErrorReadonlySetting ChErrorType = 472 // READONLY_SETTING
ChErrorDeadlockAvoided ChErrorType = 473 // DEADLOCK_AVOIDED
ChErrorInvalidTemplateFormat ChErrorType = 474 // INVALID_TEMPLATE_FORMAT
ChErrorInvalidWithFillExpression ChErrorType = 475 // INVALID_WITH_FILL_EXPRESSION
ChErrorWithTiesWithoutOrderBy ChErrorType = 476 // WITH_TIES_WITHOUT_ORDER_BY
ChErrorInvalidUsageOfInput ChErrorType = 477 // INVALID_USAGE_OF_INPUT
ChErrorUnknownPolicy ChErrorType = 478 // UNKNOWN_POLICY
ChErrorUnknownDisk ChErrorType = 479 // UNKNOWN_DISK
ChErrorUnknownProtocol ChErrorType = 480 // UNKNOWN_PROTOCOL
ChErrorPathAccessDenied ChErrorType = 481 // PATH_ACCESS_DENIED
ChErrorDictionaryAccessDenied ChErrorType = 482 // DICTIONARY_ACCESS_DENIED
ChErrorTooManyRedirects ChErrorType = 483 // TOO_MANY_REDIRECTS
ChErrorInternalRedisError ChErrorType = 484 // INTERNAL_REDIS_ERROR
ChErrorScalarAlreadyExists ChErrorType = 485 // SCALAR_ALREADY_EXISTS
ChErrorCannotGetCreateDictionaryQuery ChErrorType = 487 // CANNOT_GET_CREATE_DICTIONARY_QUERY
ChErrorUnknownDictionary ChErrorType = 488 // UNKNOWN_DICTIONARY
ChErrorIncorrectDictionaryDefinition ChErrorType = 489 // INCORRECT_DICTIONARY_DEFINITION
ChErrorCannotFormatDatetime ChErrorType = 490 // CANNOT_FORMAT_DATETIME
ChErrorUnacceptableURL ChErrorType = 491 // UNACCEPTABLE_URL
ChErrorAccessEntityNotFound ChErrorType = 492 // ACCESS_ENTITY_NOT_FOUND
ChErrorAccessEntityAlreadyExists ChErrorType = 493 // ACCESS_ENTITY_ALREADY_EXISTS
ChErrorAccessEntityFoundDuplicates ChErrorType = 494 // ACCESS_ENTITY_FOUND_DUPLICATES
ChErrorAccessStorageReadonly ChErrorType = 495 // ACCESS_STORAGE_READONLY
ChErrorQuotaRequiresClientKey ChErrorType = 496 // QUOTA_REQUIRES_CLIENT_KEY
ChErrorAccessDenied ChErrorType = 497 // ACCESS_DENIED
ChErrorLimitByWithTiesIsNotSupported ChErrorType = 498 // LIMIT_BY_WITH_TIES_IS_NOT_SUPPORTED
ChErrorS3Error ChErrorType = 499 // S3_ERROR
ChErrorAzureBlobStorageError ChErrorType = 500 // AZURE_BLOB_STORAGE_ERROR
ChErrorCannotCreateDatabase ChErrorType = 501 // CANNOT_CREATE_DATABASE
ChErrorCannotSigqueue ChErrorType = 502 // CANNOT_SIGQUEUE
ChErrorAggregateFunctionThrow ChErrorType = 503 // AGGREGATE_FUNCTION_THROW
ChErrorFileAlreadyExists ChErrorType = 504 // FILE_ALREADY_EXISTS
ChErrorCannotDeleteDirectory ChErrorType = 505 // CANNOT_DELETE_DIRECTORY
ChErrorUnexpectedErrorCode ChErrorType = 506 // UNEXPECTED_ERROR_CODE
ChErrorUnableToSkipUnusedShards ChErrorType = 507 // UNABLE_TO_SKIP_UNUSED_SHARDS
ChErrorUnknownAccessType ChErrorType = 508 // UNKNOWN_ACCESS_TYPE
ChErrorInvalidGrant ChErrorType = 509 // INVALID_GRANT
ChErrorCacheDictionaryUpdateFail ChErrorType = 510 // CACHE_DICTIONARY_UPDATE_FAIL
ChErrorUnknownRole ChErrorType = 511 // UNKNOWN_ROLE
ChErrorSetNonGrantedRole ChErrorType = 512 // SET_NON_GRANTED_ROLE
ChErrorUnknownPartType ChErrorType = 513 // UNKNOWN_PART_TYPE
ChErrorAccessStorageForInsertionNotFound ChErrorType = 514 // ACCESS_STORAGE_FOR_INSERTION_NOT_FOUND
ChErrorIncorrectAccessEntityDefinition ChErrorType = 515 // INCORRECT_ACCESS_ENTITY_DEFINITION
ChErrorAuthenticationFailed ChErrorType = 516 // AUTHENTICATION_FAILED
ChErrorCannotAssignAlter ChErrorType = 517 // CANNOT_ASSIGN_ALTER
ChErrorCannotCommitOffset ChErrorType = 518 // CANNOT_COMMIT_OFFSET
ChErrorNoRemoteShardAvailable ChErrorType = 519 // NO_REMOTE_SHARD_AVAILABLE
ChErrorCannotDetachDictionaryAsTable ChErrorType = 520 // CANNOT_DETACH_DICTIONARY_AS_TABLE
ChErrorAtomicRenameFail ChErrorType = 521 // ATOMIC_RENAME_FAIL
ChErrorUnknownRowPolicy ChErrorType = 523 // UNKNOWN_ROW_POLICY
ChErrorAlterOfColumnIsForbidden ChErrorType = 524 // ALTER_OF_COLUMN_IS_FORBIDDEN
ChErrorIncorrectDiskIndex ChErrorType = 525 // INCORRECT_DISK_INDEX
ChErrorNoSuitableFunctionImplementation ChErrorType = 527 // NO_SUITABLE_FUNCTION_IMPLEMENTATION
ChErrorCassandraInternalError ChErrorType = 528 // CASSANDRA_INTERNAL_ERROR
ChErrorNotALeader ChErrorType = 529 // NOT_A_LEADER
ChErrorCannotConnectRabbitmq ChErrorType = 530 // CANNOT_CONNECT_RABBITMQ
ChErrorCannotFstat ChErrorType = 531 // CANNOT_FSTAT
ChErrorLdapError ChErrorType = 532 // LDAP_ERROR
ChErrorInconsistentReservations ChErrorType = 533 // INCONSISTENT_RESERVATIONS
ChErrorNoReservationsProvided ChErrorType = 534 // NO_RESERVATIONS_PROVIDED
ChErrorUnknownRaidType ChErrorType = 535 // UNKNOWN_RAID_TYPE
ChErrorCannotRestoreFromFieldDump ChErrorType = 536 // CANNOT_RESTORE_FROM_FIELD_DUMP
ChErrorIllegalMysqlVariable ChErrorType = 537 // ILLEGAL_MYSQL_VARIABLE
ChErrorMysqlSyntaxError ChErrorType = 538 // MYSQL_SYNTAX_ERROR
ChErrorCannotBindRabbitmqExchange ChErrorType = 539 // CANNOT_BIND_RABBITMQ_EXCHANGE
ChErrorCannotDeclareRabbitmqExchange ChErrorType = 540 // CANNOT_DECLARE_RABBITMQ_EXCHANGE
ChErrorCannotCreateRabbitmqQueueBinding ChErrorType = 541 // CANNOT_CREATE_RABBITMQ_QUEUE_BINDING
ChErrorCannotRemoveRabbitmqExchange ChErrorType = 542 // CANNOT_REMOVE_RABBITMQ_EXCHANGE
ChErrorUnknownMysqlDatatypesSupportLevel ChErrorType = 543 // UNKNOWN_MYSQL_DATATYPES_SUPPORT_LEVEL
ChErrorRowAndRowsTogether ChErrorType = 544 // ROW_AND_ROWS_TOGETHER
ChErrorFirstAndNextTogether ChErrorType = 545 // FIRST_AND_NEXT_TOGETHER
ChErrorNoRowDelimiter ChErrorType = 546 // NO_ROW_DELIMITER
ChErrorInvalidRaidType ChErrorType = 547 // INVALID_RAID_TYPE
ChErrorUnknownVolume ChErrorType = 548 // UNKNOWN_VOLUME
ChErrorDataTypeCannotBeUsedInKey ChErrorType = 549 // DATA_TYPE_CANNOT_BE_USED_IN_KEY
ChErrorConditionalTreeParentNotFound ChErrorType = 550 // CONDITIONAL_TREE_PARENT_NOT_FOUND
ChErrorIllegalProjectionManipulator ChErrorType = 551 // ILLEGAL_PROJECTION_MANIPULATOR
ChErrorUnrecognizedArguments ChErrorType = 552 // UNRECOGNIZED_ARGUMENTS
ChErrorLzmaStreamEncoderFailed ChErrorType = 553 // LZMA_STREAM_ENCODER_FAILED
ChErrorLzmaStreamDecoderFailed ChErrorType = 554 // LZMA_STREAM_DECODER_FAILED
ChErrorRocksdbError ChErrorType = 555 // ROCKSDB_ERROR
ChErrorSyncMysqlUserAccessErro ChErrorType = 556 // SYNC_MYSQL_USER_ACCESS_ERRO
ChErrorUnknownUnion ChErrorType = 557 // UNKNOWN_UNION
ChErrorExpectedAllOrDistinct ChErrorType = 558 // EXPECTED_ALL_OR_DISTINCT
ChErrorInvalidGrpcQueryInfo ChErrorType = 559 // INVALID_GRPC_QUERY_INFO
ChErrorZstdEncoderFailed ChErrorType = 560 // ZSTD_ENCODER_FAILED
ChErrorZstdDecoderFailed ChErrorType = 561 // ZSTD_DECODER_FAILED
ChErrorTldListNotFound ChErrorType = 562 // TLD_LIST_NOT_FOUND
ChErrorCannotReadMapFromText ChErrorType = 563 // CANNOT_READ_MAP_FROM_TEXT
ChErrorInterserverSchemeDoesntMatch ChErrorType = 564 // INTERSERVER_SCHEME_DOESNT_MATCH
ChErrorTooManyPartitions ChErrorType = 565 // TOO_MANY_PARTITIONS
ChErrorCannotRmdir ChErrorType = 566 // CANNOT_RMDIR
ChErrorDuplicatedPartUuids ChErrorType = 567 // DUPLICATED_PART_UUIDS
ChErrorRaftError ChErrorType = 568 // RAFT_ERROR
ChErrorMultipleColumnsSerializedToSameProtobufField ChErrorType = 569 // MULTIPLE_COLUMNS_SERIALIZED_TO_SAME_PROTOBUF_FIELD
ChErrorDataTypeIncompatibleWithProtobufField ChErrorType = 570 // DATA_TYPE_INCOMPATIBLE_WITH_PROTOBUF_FIELD
ChErrorDatabaseReplicationFailed ChErrorType = 571 // DATABASE_REPLICATION_FAILED
ChErrorTooManyQueryPlanOptimizations ChErrorType = 572 // TOO_MANY_QUERY_PLAN_OPTIMIZATIONS
ChErrorEpollError ChErrorType = 573 // EPOLL_ERROR
ChErrorDistributedTooManyPendingBytes ChErrorType = 574 // DISTRIBUTED_TOO_MANY_PENDING_BYTES
ChErrorUnknownSnapshot ChErrorType = 575 // UNKNOWN_SNAPSHOT
ChErrorKerberosError ChErrorType = 576 // KERBEROS_ERROR
ChErrorInvalidShardID ChErrorType = 577 // INVALID_SHARD_ID
ChErrorInvalidFormatInsertQueryWithData ChErrorType = 578 // INVALID_FORMAT_INSERT_QUERY_WITH_DATA
ChErrorIncorrectPartType ChErrorType = 579 // INCORRECT_PART_TYPE
ChErrorCannotSetRoundingMode ChErrorType = 580 // CANNOT_SET_ROUNDING_MODE
ChErrorTooLargeDistributedDepth ChErrorType = 581 // TOO_LARGE_DISTRIBUTED_DEPTH
ChErrorNoSuchProjectionInTable ChErrorType = 582 // NO_SUCH_PROJECTION_IN_TABLE
ChErrorIllegalProjection ChErrorType = 583 // ILLEGAL_PROJECTION
ChErrorProjectionNotUsed ChErrorType = 584 // PROJECTION_NOT_USED
ChErrorCannotParseYaml ChErrorType = 585 // CANNOT_PARSE_YAML
ChErrorCannotCreateFile ChErrorType = 586 // CANNOT_CREATE_FILE
ChErrorConcurrentAccessNotSupported ChErrorType = 587 // CONCURRENT_ACCESS_NOT_SUPPORTED
ChErrorDistributedBrokenBatchInfo ChErrorType = 588 // DISTRIBUTED_BROKEN_BATCH_INFO
ChErrorDistributedBrokenBatchFiles ChErrorType = 589 // DISTRIBUTED_BROKEN_BATCH_FILES
ChErrorCannotSysconf ChErrorType = 590 // CANNOT_SYSCONF
ChErrorSqliteEngineError ChErrorType = 591 // SQLITE_ENGINE_ERROR
ChErrorDataEncryptionError ChErrorType = 592 // DATA_ENCRYPTION_ERROR
ChErrorZeroCopyReplicationError ChErrorType = 593 // ZERO_COPY_REPLICATION_ERROR
ChErrorBzip2StreamDecoderFailed ChErrorType = 594 // BZIP2_STREAM_DECODER_FAILED
ChErrorBzip2StreamEncoderFailed ChErrorType = 595 // BZIP2_STREAM_ENCODER_FAILED
ChErrorIntersectOrExceptResultStructuresMismatch ChErrorType = 596 // INTERSECT_OR_EXCEPT_RESULT_STRUCTURES_MISMATCH
ChErrorNoSuchErrorCode ChErrorType = 597 // NO_SUCH_ERROR_CODE
ChErrorBackupAlreadyExists ChErrorType = 598 // BACKUP_ALREADY_EXISTS
ChErrorBackupNotFound ChErrorType = 599 // BACKUP_NOT_FOUND
ChErrorBackupVersionNotSupported ChErrorType = 600 // BACKUP_VERSION_NOT_SUPPORTED
ChErrorBackupDamaged ChErrorType = 601 // BACKUP_DAMAGED
ChErrorNoBaseBackup ChErrorType = 602 // NO_BASE_BACKUP
ChErrorWrongBaseBackup ChErrorType = 603 // WRONG_BASE_BACKUP
ChErrorBackupEntryAlreadyExists ChErrorType = 604 // BACKUP_ENTRY_ALREADY_EXISTS
ChErrorBackupEntryNotFound ChErrorType = 605 // BACKUP_ENTRY_NOT_FOUND
ChErrorBackupIsEmpty ChErrorType = 606 // BACKUP_IS_EMPTY
ChErrorBackupElementDuplicate ChErrorType = 607 // BACKUP_ELEMENT_DUPLICATE
ChErrorCannotRestoreTable ChErrorType = 608 // CANNOT_RESTORE_TABLE
ChErrorFunctionAlreadyExists ChErrorType = 609 // FUNCTION_ALREADY_EXISTS
ChErrorCannotDropFunction ChErrorType = 610 // CANNOT_DROP_FUNCTION
ChErrorCannotCreateRecursiveFunction ChErrorType = 611 // CANNOT_CREATE_RECURSIVE_FUNCTION
ChErrorObjectAlreadyStoredOnDisk ChErrorType = 612 // OBJECT_ALREADY_STORED_ON_DISK
ChErrorObjectWasNotStoredOnDisk ChErrorType = 613 // OBJECT_WAS_NOT_STORED_ON_DISK
ChErrorPostgresqlConnectionFailure ChErrorType = 614 // POSTGRESQL_CONNECTION_FAILURE
ChErrorCannotAdvise ChErrorType = 615 // CANNOT_ADVISE
ChErrorUnknownReadMethod ChErrorType = 616 // UNKNOWN_READ_METHOD
ChErrorLz4EncoderFailed ChErrorType = 617 // LZ4_ENCODER_FAILED
ChErrorLz4DecoderFailed ChErrorType = 618 // LZ4_DECODER_FAILED
ChErrorPostgresqlReplicationInternalError ChErrorType = 619 // POSTGRESQL_REPLICATION_INTERNAL_ERROR
ChErrorQueryNotAllowed ChErrorType = 620 // QUERY_NOT_ALLOWED
ChErrorCannotNormalizeString ChErrorType = 621 // CANNOT_NORMALIZE_STRING
ChErrorCannotParseCapnProtoSchema ChErrorType = 622 // CANNOT_PARSE_CAPN_PROTO_SCHEMA
ChErrorCapnProtoBadCast ChErrorType = 623 // CAPN_PROTO_BAD_CAST
ChErrorBadFileType ChErrorType = 624 // BAD_FILE_TYPE
ChErrorIoSetupError ChErrorType = 625 // IO_SETUP_ERROR
ChErrorCannotSkipUnknownField ChErrorType = 626 // CANNOT_SKIP_UNKNOWN_FIELD
ChErrorBackupEngineNotFound ChErrorType = 627 // BACKUP_ENGINE_NOT_FOUND
ChErrorOffsetFetchWithoutOrderBy ChErrorType = 628 // OFFSET_FETCH_WITHOUT_ORDER_BY
ChErrorHTTPRangeNotSatisfiable ChErrorType = 629 // HTTP_RANGE_NOT_SATISFIABLE
ChErrorHaveDependentObjects ChErrorType = 630 // HAVE_DEPENDENT_OBJECTS
ChErrorUnknownFileSize ChErrorType = 631 // UNKNOWN_FILE_SIZE
ChErrorUnexpectedDataAfterParsedValue ChErrorType = 632 // UNEXPECTED_DATA_AFTER_PARSED_VALUE
ChErrorQueryIsNotSupportedInWindowView ChErrorType = 633 // QUERY_IS_NOT_SUPPORTED_IN_WINDOW_VIEW
ChErrorMongodbError ChErrorType = 634 // MONGODB_ERROR
ChErrorCannotPoll ChErrorType = 635 // CANNOT_POLL
ChErrorCannotExtractTableStructure ChErrorType = 636 // CANNOT_EXTRACT_TABLE_STRUCTURE
ChErrorInvalidTableOverride ChErrorType = 637 // INVALID_TABLE_OVERRIDE
ChErrorSnappyUncompressFailed ChErrorType = 638 // SNAPPY_UNCOMPRESS_FAILED
ChErrorSnappyCompressFailed ChErrorType = 639 // SNAPPY_COMPRESS_FAILED
ChErrorNoHivemetastore ChErrorType = 640 // NO_HIVEMETASTORE
ChErrorCannotAppendToFile ChErrorType = 641 // CANNOT_APPEND_TO_FILE
ChErrorCannotPackArchive ChErrorType = 642 // CANNOT_PACK_ARCHIVE
ChErrorCannotUnpackArchive ChErrorType = 643 // CANNOT_UNPACK_ARCHIVE
ChErrorKeeperException ChErrorType = 999 // KEEPER_EXCEPTION
ChErrorPocoException ChErrorType = 1000 // POCO_EXCEPTION
ChErrorStdException ChErrorType = 1001 // STD_EXCEPTION
ChErrorUnknownException ChErrorType = 1002 // UNKNOWN_EXCEPTION
)
|
package popcount
import "testing"
var tests = []struct {
num uint64
expected int
}{
{28867, 7},
{55573, 8},
{10012002, 10},
}
func TestPopCount(t *testing.T) {
for _, test := range tests {
if actual := PopCount(test.num); actual != test.expected {
t.Errorf("expected %d but got %d", test.expected, actual)
}
}
}
func TestPopCountLoop(t *testing.T) {
for _, test := range tests {
if actual := PopCountLoop(test.num); actual != test.expected {
t.Errorf("expected %d but got %d", test.expected, actual)
}
}
}
//result: BenchmarkPopCount-8 2000000000 0.30 ns/op
func BenchmarkPopCount(b *testing.B) {
for i := 0; i < b.N; i++ {
PopCount(10012002)
}
}
//result: BenchmarkPopCountLoop-8 100000000 20.1 ns/op
func BenchmarkPopCountLoop(b *testing.B) {
for i := 0; i < b.N; i++ {
PopCountLoop(10012002)
}
}
|
package models
import (
"errors"
"github.com/jinzhu/gorm"
"time"
)
type Timeframe struct {
ID uint64 `gorm:"primary_key;auto_increment" json:"id"`
TaskID uint32 `gorm:"not null" json:"task_id"`
FromTime time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"from"`
ToTime time.Time `gorm:"default:CURRENT_TIMESTAMP" json:"to"`
}
func (p *Timeframe) Prepare() {
p.ID = 0
p.FromTime = time.Now()
p.ToTime = time.Now()
}
func (p *Timeframe) Validate() error {
if p.TaskID < 1 {
return errors.New("Required Timeframe")
}
return nil
}
func (p *Timeframe) SaveTimeframe(db *gorm.DB) (*Timeframe, error) {
var err error
err = db.Debug().Model(&Timeframe{}).Create(&p).Error
if err != nil {
return &Timeframe{}, err
}
if p.ID != 0 {
err = db.Debug().Model(&Timeframe{}).Where("id = ?", p.TaskID).Error
if err != nil {
return &Timeframe{}, err
}
}
return p, nil
}
func (p *Timeframe) FindAllTimeframes(db *gorm.DB) (*[]Timeframe, error) {
var err error
tasks := []Timeframe{}
err = db.Debug().Model(&Timeframe{}).Limit(100).Find(&tasks).Error
if err != nil {
return &[]Timeframe{}, err
}
if len(tasks) > 0 {
for i, _ := range tasks {
err := db.Debug().Model(&Timeframe{}).Where("id = ?", tasks[i].TaskID).Error
if err != nil {
return &[]Timeframe{}, err
}
}
}
return &tasks, nil
}
func (p *Timeframe) FindTimeframeByID(db *gorm.DB, pid uint64) (*Timeframe, error) {
var err error
err = db.Debug().Model(&Timeframe{}).Where("id = ?", pid).Take(&p).Error
if err != nil {
return &Timeframe{}, err
}
if p.ID != 0 {
err = db.Debug().Model(&Timeframe{}).Where("id = ?", p.TaskID).Error
if err != nil {
return &Timeframe{}, err
}
}
return p, nil
}
func (p *Timeframe) DeleteATimeframe(db *gorm.DB, pid uint64) (int64, error) {
db = db.Debug().Model(&Timeframe{}).Where("id = ?", pid).Take(&Timeframe{}).Delete(&Timeframe{})
if db.Error != nil {
if gorm.IsRecordNotFoundError(db.Error) {
return 0, errors.New("Timeframe not found")
}
return 0, db.Error
}
return db.RowsAffected, nil
}
func FindTimerameByTaskID(db *gorm.DB, TaskID uint32)([]Timeframe){
var timeframes []Timeframe
rowsTimeframe, err := db.Raw("SELECT id, task_id, from_time, to_time from timeframes WHERE task_id = ?", TaskID ).Rows()
if err!=nil{
panic(err)
}
defer rowsTimeframe.Close()
for rowsTimeframe.Next(){
var timeframe Timeframe
rowsTimeframe.Scan(&timeframe.ID,&timeframe.TaskID,&timeframe.FromTime,&timeframe.ToTime)
timeframes = append(timeframes, timeframe)
}
return timeframes
}
|
package server
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"testing"
)
func TestCORSHandler(t *testing.T) {
tests := []struct {
given string
givenPrefix string
wantOrigin string
wantCreds string
wantHeaders string
wantMethods string
}{
{
"",
"",
"",
"",
"",
"",
},
{
".nytimes.com.",
"",
".nytimes.com.",
"true",
"Content-Type, x-requested-by, *",
"GET, PUT, POST, DELETE, OPTIONS",
},
{
".nytimes.com.",
"blah.com",
"",
"",
"",
"",
},
}
for _, test := range tests {
r, _ := http.NewRequest("GET", "", nil)
r.Header.Add("Origin", test.given)
w := httptest.NewRecorder()
CORSHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}), test.givenPrefix).ServeHTTP(w, r)
if got := w.Header().Get("Access-Control-Allow-Origin"); got != test.wantOrigin {
t.Errorf("expected CORS origin header to be '%#v', got '%#v'", test.wantOrigin, got)
}
if got := w.Header().Get("Access-Control-Allow-Credentials"); got != test.wantCreds {
t.Errorf("expected CORS creds header to be '%#v', got '%#v'", test.wantCreds, got)
}
if got := w.Header().Get("Access-Control-Allow-Headers"); got != test.wantHeaders {
t.Errorf("expected CORS 'headers' header to be '%#v', got '%#v'", test.wantHeaders, got)
}
if got := w.Header().Get("Access-Control-Allow-Methods"); got != test.wantMethods {
t.Errorf("expected CORS 'methods' header to be '%#v', got '%#v'", test.wantMethods, got)
}
}
}
func TestJSONToHTTP(t *testing.T) {
tests := []struct {
given JSONEndpoint
givenBody io.Reader
wantCode int
wantBody string
}{
{
JSONEndpoint(func(r *http.Request) (int, interface{}, error) {
bod, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Error("unable to read given request body: ", err)
}
if string(bod) != "yup" {
t.Errorf("expected 'yup', got %+v", string(bod))
}
return http.StatusOK, struct{ Howdy string }{"Hi"}, nil
}),
bytes.NewBufferString("yup"),
http.StatusOK,
"{\"Howdy\":\"Hi\"}\n",
},
{
JSONEndpoint(func(r *http.Request) (int, interface{}, error) {
return http.StatusServiceUnavailable, nil, &testJSONError{"nope"}
}),
nil,
http.StatusServiceUnavailable,
"{\"error\":\"nope\"}\n",
},
}
for _, test := range tests {
r, _ := http.NewRequest("GET", "", test.givenBody)
w := httptest.NewRecorder()
JSONToHTTP(test.given).ServeHTTP(w, r)
if w.Code != test.wantCode {
t.Errorf("expected status code %d, got %d", test.wantCode, w.Code)
}
if gotHdr := w.Header().Get("Content-Type"); gotHdr != jsonContentType {
t.Errorf("expected Content-Type header of '%#v', got '%#v'", jsonContentType, gotHdr)
}
if got := w.Body.String(); got != test.wantBody {
t.Errorf("expected body of '%#v', got '%#v'", test.wantBody, got)
}
}
}
type testJSONError struct {
Err string `json:"error"`
}
func (t *testJSONError) Error() string {
return t.Err
}
func TestJSONPHandler(t *testing.T) {
r, _ := http.NewRequest("GET", "", nil)
r.Form = url.Values{"callback": {"harumph"}}
w := httptest.NewRecorder()
JSONPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("{\"jsonp\":\"sucks\"}"))
})).ServeHTTP(w, r)
want := `/**/harumph({"jsonp":"sucks"});`
if got := w.Body.String(); got != want {
t.Errorf("expected JSONP response of '%#v', got '%#v'", want, got)
}
// once again, without a callback
r, _ = http.NewRequest("GET", "", nil)
w = httptest.NewRecorder()
JSONPHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("{\"jsonp\":\"sucks\"}"))
})).ServeHTTP(w, r)
want = `{"jsonp":"sucks"}`
if got := w.Body.String(); got != want {
t.Errorf("expected JSONP response of '%#v', got '%#v'", want, got)
}
}
func TestNoCacheHandler(t *testing.T) {
r, _ := http.NewRequest("GET", "", nil)
w := httptest.NewRecorder()
NoCacheHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
})).ServeHTTP(w, r)
want := "no-cache, no-store, must-revalidate"
if got := w.Header().Get("Cache-Control"); got != want {
t.Errorf("expected no-cache control header to be '%#v', got '%#v'", want, got)
}
want = "no-cache"
if got := w.Header().Get("Pragma"); got != want {
t.Errorf("expected no-cache pragma header to be '%#v', got '%#v'", want, got)
}
want = "0"
if got := w.Header().Get("Expires"); got != want {
t.Errorf("expected no-cache Expires header to be '%#v', got '%#v'", want, got)
}
}
|
package main
import (
"fmt"
"github.com/azak-azkaran/cascade/utils"
"github.com/stretchr/testify/assert"
"os"
"strings"
"testing"
"time"
)
func TestChangeMode(t *testing.T) {
fmt.Println("Running: TestChangeMode")
utils.Init(os.Stdout, os.Stdout, os.Stderr)
assert.False(t, Config.OnlineCheck)
Config.verbose = true
Config.ProxyURL = "something"
Config.CascadeMode = true
DirectOverrideChan = false
fmt.Println("Test switch from\nCascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan, " to DirectMode")
ChangeMode(true, Config.OnlineCheck)
assert.False(t, Config.CascadeMode)
assert.True(t, DirectOverrideChan)
fmt.Println("Result CascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan)
Config.CascadeMode = false
DirectOverrideChan = true
fmt.Println("Test switch from\nCascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan, " to CascadeMode")
ChangeMode(false, Config.OnlineCheck)
assert.True(t, Config.CascadeMode)
assert.False(t, DirectOverrideChan)
fmt.Println("Result CascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan)
Config.CascadeMode = true
DirectOverrideChan = false
fmt.Println("Test switch from\nCascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan, " to DirectMode")
ChangeMode(false, true)
assert.False(t, Config.CascadeMode)
assert.True(t, DirectOverrideChan)
fmt.Println("Result CascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan)
Config.CascadeMode = false
DirectOverrideChan = true
fmt.Println("Test switch from\nCascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan, " to CascadeMode")
ChangeMode(true, true)
assert.True(t, Config.CascadeMode)
assert.False(t, DirectOverrideChan)
fmt.Println("Result CascadeMode: ", Config.CascadeMode, " DirectOverrideChan: ", DirectOverrideChan)
Config.ProxyURL = ""
}
func TestModeSelection(t *testing.T) {
fmt.Println("Running: TestModeSelection")
utils.Init(os.Stdout, os.Stdout, os.Stderr)
Config.verbose = true
Config.CascadeMode = true
Config.ProxyURL = "something"
Config.proxyRedirectList = strings.Split("golang.org,youtube.com", ",")
ModeSelection("https://www.asda12313.de")
time.Sleep(1 * time.Millisecond)
assert.False(t, DirectOverrideChan)
ModeSelection("https://www.google.de")
time.Sleep(1 * time.Millisecond)
assert.True(t, DirectOverrideChan)
Config = Yaml{}
}
func TestCreateConfig(t *testing.T) {
fmt.Println("Running: TestCreateConfig")
utils.Init(os.Stdout, os.Stdout, os.Stderr)
Config = Yaml{LocalPort: "8888", CheckAddress: "https://www.google.de", HealthTime: 5, HostList: "google,eclipse", Log: "info"}
CreateConfig()
assert.NotNil(t, CurrentServer)
assert.Equal(t, len(Config.proxyRedirectList), 2)
Config = Yaml{}
}
func TestHandleCustomProxies(t *testing.T) {
fmt.Println("Running: TestHandleCustomProxies")
utils.Init(os.Stdout, os.Stdout, os.Stderr)
list := strings.Split("eclipse,google->test:8888,azure->", ",")
HandleCustomProxies(list)
val, in := HostList.Get("")
assert.True(t, in)
value := val.(hostConfig)
assert.True(t, value.reg.MatchString("eclipse2017.nasa.gov"))
assert.True(t, in)
assert.False(t, value.proxyAddr != "")
val, in = HostList.Get("test:8888")
assert.True(t, in)
value = val.(hostConfig)
assert.True(t, value.reg.MatchString("www.google.de"))
assert.Equal(t, strings.Compare(value.proxyAddr, "http://test:8888"), 0)
val, in = HostList.Get("")
assert.True(t, in)
value = val.(hostConfig)
assert.True(t, value.reg.MatchString("https://azure.microsoft.com/en-us/"))
assert.False(t, value.proxyAddr != "")
}
func TestDisableAutoChangeMode(t *testing.T) {
fmt.Println("Running: TestDisableAutoChangeMode")
utils.Init(os.Stdout, os.Stdout, os.Stderr)
Config.verbose = true
Config.CascadeMode = true
Config.ProxyURL = "something"
DirectOverrideChan = false
Config.proxyRedirectList = strings.Split("golang.org,youtube.com", ",")
Config.DisableAutoChangeMode = true
ModeSelection("https://www.asda12313.de")
time.Sleep(1 * time.Millisecond)
assert.False(t, DirectOverrideChan)
Config = Yaml{}
}
|
package opsgenie
import (
"testing"
"time"
)
var testargs = OpsArgs{"testKey", "testName", "testDescription", 99, "month", time.Second * 10, true}
func TestCreateUrl(t *testing.T) {
var requestParams = make(map[string]string)
requestParams["apiKey"] = "test"
url, err := createURL("/v1/test", requestParams)
if err != nil {
t.Errorf(err.Error())
}
testURL := "https://api.opsgenie.com/v1/test?apiKey=test"
if url != testURL {
t.Errorf("Url not correct is [%s] but should be [%s]", url, testURL)
}
}
func TestAllContentParams(t *testing.T) {
var all = allContentParams(testargs)
if all["apiKey"] != testargs.ApiKey || all["name"] != testargs.Name || all["description"] != testargs.Description || all["interval"] != testargs.Interval || all["intervalUnit"] != testargs.IntervalUnit {
t.Errorf("OpsArgs [%+v] are not the same as all content params [%s]", testargs, all)
}
}
func TestMandatoryRequestParams(t *testing.T) {
var params = mandatoryRequestParams(testargs)
if params["apiKey"] != testargs.ApiKey || params["name"] != testargs.Name {
t.Errorf("Requested params [%s] are not the same as from OpsArgs [%+v]", params, testargs)
}
}
func TestCreateErrorResponse(t *testing.T) {
json := `{"code":10, "error": "test error"}`
errorResp, err := createErrorResponse([]byte(json))
if err != nil {
t.Errorf(err.Error())
}
if errorResp.Code != 10 || errorResp.Message != "test error" {
t.Errorf("Error [%+v] does not correspond to json [%s]", errorResp, json)
}
}
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package telemetry
import (
"os"
"path"
"sync"
"syscall"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/client-go/tools/clientcmd"
"github.com/gardener/test-infra/pkg/shoot-telemetry/analyse"
"github.com/gardener/test-infra/pkg/shoot-telemetry/common"
"github.com/gardener/test-infra/pkg/shoot-telemetry/config"
"github.com/gardener/test-infra/pkg/shoot-telemetry/controller"
)
type Telemetry struct {
log logr.Logger
mut sync.Mutex
shootsFilter map[string]bool
interval time.Duration
err error
started bool
stopCh chan struct{}
signalCh chan os.Signal
}
func New(log logr.Logger, interval time.Duration) (*Telemetry, error) {
return &Telemetry{
log: log,
mut: sync.Mutex{},
interval: interval,
shootsFilter: make(map[string]bool),
}, nil
}
// Start starts the telemetry measurement with a specific kubeconfig to watch all shoots
func (c *Telemetry) Start(kubeconfigPath, resultDir string) error {
if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
return err
}
if _, err := os.Stat(resultDir); err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(resultDir, os.ModePerm); err != nil {
return err
}
}
cfg := &config.Config{
KubeConfigPath: kubeconfigPath,
CheckInterval: c.interval,
OutputDir: resultDir,
DisableAnalyse: true,
}
c.StartWithConfig(cfg)
return nil
}
// StartWithKubeconfig starts the telemetry measurement with a specific kubeconfig configuration to watch all shoots
func (c *Telemetry) StartWithKubeconfig(kubeconfig clientcmd.ClientConfig, resultDir string) error {
if _, err := os.Stat(resultDir); err != nil {
if !os.IsNotExist(err) {
return err
}
if err := os.MkdirAll(resultDir, os.ModePerm); err != nil {
return err
}
}
cfg := &config.Config{
KubeConfig: kubeconfig,
CheckInterval: c.interval,
OutputDir: resultDir,
DisableAnalyse: true,
ShootsFilter: c.shootsFilter,
}
c.StartWithConfig(cfg)
return nil
}
// StartForShoot starts the telemetry measurement with a kubeconfig for a specific shoot
func (c *Telemetry) StartForShoot(shootName, shootNamespace, kubeconfigPath, resultDir string) error {
if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
return err
}
c.shootsFilter = map[string]bool{
common.GetShootKey(shootName, shootNamespace): true,
}
cfg := &config.Config{
KubeConfigPath: kubeconfigPath,
CheckInterval: c.interval,
OutputDir: resultDir,
DisableAnalyse: true,
ShootsFilter: c.shootsFilter,
}
c.StartWithConfig(cfg)
return nil
}
// StartForShoots starts the telemetry measurement with a kubeconfig for specific shoots
func (c *Telemetry) StartForShoots(kubeconfigPath, resultDir string, shootKeys []string) error {
if _, err := os.Stat(kubeconfigPath); os.IsNotExist(err) {
return err
}
c.shootsFilter = make(map[string]bool, len(shootKeys))
for _, key := range shootKeys {
c.shootsFilter[key] = true
}
cfg := &config.Config{
KubeConfigPath: kubeconfigPath,
CheckInterval: c.interval,
OutputDir: resultDir,
DisableAnalyse: true,
ShootsFilter: c.shootsFilter,
}
c.StartWithConfig(cfg)
return nil
}
func (c *Telemetry) StartWithConfig(cfg *config.Config) {
c.stopCh = make(chan struct{})
c.signalCh = make(chan os.Signal)
c.started = true
go func() {
defer close(c.stopCh)
if err := controller.StartController(cfg, c.signalCh); err != nil {
c.err = err
return
}
}()
}
// IsStarted indicates if the controller is already running
func (c *Telemetry) IsStarted() bool {
return c.started
}
// AddShoot adds another shoot to watch
func (c *Telemetry) AddShoot(shootKey string) {
c.mut.Lock()
defer c.mut.Unlock()
c.shootsFilter[shootKey] = true
}
// RemoveShoot removes a shoot from the telemetry watch
func (c *Telemetry) RemoveShoot(shootKey string) {
c.mut.Lock()
defer c.mut.Unlock()
delete(c.shootsFilter, shootKey)
}
// HasShoot returns true if a shoot is measured
func (c *Telemetry) HasShoot(shootKey string) bool {
c.mut.Lock()
defer c.mut.Unlock()
measured, ok := c.shootsFilter[shootKey]
if !ok {
return false
}
return measured
}
// WatchedShoots returns the number of monitored shoots.
func (c *Telemetry) ShootsLen() int {
c.mut.Lock()
defer c.mut.Unlock()
return len(c.shootsFilter)
}
// StopAndAnalyze stops the telemetry measurement and generates a result summary
func (c *Telemetry) StopAndAnalyze(resultDir, format string) (string, map[string]*analyse.Figures, error) {
if err := c.Stop(); err != nil {
return "", nil, err
}
return c.Analyze(resultDir, format)
}
// WriteOutput forces the telemetry controller to write in memory data to file
func (c *Telemetry) WriteOutput() {
c.signalCh <- syscall.SIGUSR1
}
// Stop stops the measurement of the telemetry controller
func (c *Telemetry) Stop() error {
defer close(c.signalCh)
c.log.V(3).Info("stop telemetry controller")
if c.err != nil {
return errors.Wrapf(c.err, "error during telemetry controller execution")
}
c.signalCh <- syscall.SIGTERM
// wait for controller to finish
<-c.stopCh
c.started = false
return nil
}
// Analyze analyzes the previously measured values and returns the path to the summary
func (c *Telemetry) Analyze(resultDir, format string) (string, map[string]*analyse.Figures, error) {
c.log.V(3).Info("analyze telemetry metrics")
summaryOutput := ""
if resultDir != "" {
summaryOutput = path.Join(resultDir, "summary.json")
}
figures, err := analyse.AnalyseDir(resultDir, summaryOutput, format)
if err != nil {
return "", nil, errors.Wrap(err, "unable to analyze measurement")
}
return summaryOutput, figures, nil
}
|
package main
import (
"fmt"
"time"
)
type workItem struct {
quantity int
}
func main() {
workChannel := make(chan workItem) // Make a non buffered channel
go worker(1, workChannel)
go worker(2, workChannel)
go worker(3, workChannel)
var quantity int
for {
fmt.Println("Enter quantity, -1 to exit ")
fmt.Scanf("%d", &quantity)
if quantity == -1 {
break // Break out of loop so process can complete
}
item := workItem{quantity} // Using a composite literal to create instance, using sequence to set member value
workChannel <- item // Put quantity into channel
}
fmt.Println("Exiting")
}
func worker(id int, workChannel <-chan workItem) { // Can optionally restrict channel direction, here we indicate we can take from channel but cannot send to the channel
for {
item := <-workChannel // Receive from work channel, this is a blocking operation if nothing in the channel as we are using an unbufered channel
fmt.Printf("\t%v Worker [%d] - Working on work item with quantity %d\n", time.Now(), id, item.quantity)
}
}
|
package tree
import (
"fmt"
)
// Item ...
type Item interface {}
// Node ...
type Node struct {
Data Item
Left *Node
Right *Node
Parent *Node
Reserve1 Item
Reserve2 Item
Reserve3 Item
}
func preorderTraverseImpl(node *Node) {
if node != nil {
fmt.Printf("%v ", node.Data)
preorderTraverseImpl(node.Left)
preorderTraverseImpl(node.Right)
}
}
func inorderTraverseImpl(node *Node) {
if node != nil {
inorderTraverseImpl(node.Left)
fmt.Printf("%v ", node.Data)
inorderTraverseImpl(node.Right)
}
}
func postorderTraverseImpl(node *Node) {
if node != nil {
postorderTraverseImpl(node.Left)
postorderTraverseImpl(node.Right)
fmt.Printf("%v ", node.Data)
}
}
|
package samplepackage
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_bubbleSort(t *testing.T) {
tests := []struct {
name string
array []int64
expectedArray []int64
}{
{
name: "Elements are in random order",
array: []int64{1, 8, 3, 4, 6, 5, 7, 2, 9},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
name: "Elements are in sorted order",
array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
name: "Elements are in reverse sorted order",
array: []int64{9, 8, 7, 6, 5, 4, 3, 2, 1},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
actualValue := bubbleSort(tt.array)
assert.Equal(t, tt.expectedArray, actualValue)
})
}
}
func Test_recursiveBubbleSort(t *testing.T) {
tests := []struct {
name string
array []int64
expectedArray []int64
}{
{
name: "Elements are in random order",
array: []int64{1, 8, 3, 4, 6, 5, 7, 2, 9},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
name: "Elements are in sorted order",
array: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
{
name: "Elements are in reverse sorted order",
array: []int64{9, 8, 7, 6, 5, 4, 3, 2, 1},
expectedArray: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
recursiveBubbleSort(tt.array, len(tt.array))
assert.Equal(t, tt.expectedArray, tt.array)
})
}
}
|
// Copyright 2020 Copyright (c) 2020 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package precompute
import (
"encoding/json"
"fmt"
"io"
"strings"
"github.com/gardener/test-infra/pkg/testmachinery/metadata"
)
type BulkResponse struct {
ErrorsOccurred bool `json:"errors"`
Items []BulkItem `json:"items,omitempty"`
}
type BulkItem struct {
Index BulkItemIndex `json:"index"`
}
type BulkItemIndex struct {
Index string `json:"_index"`
Type string `json:"_type"`
ID string `json:"_id"`
HTTPStatus int `json:"status"`
Error BulkItemIndexError `json:"error"`
}
type BulkItemIndexError struct {
Type string `json:"type"`
Reason string `json:"reason,omitempty"`
}
type QueryResponse struct {
ScrollID string `json:"_scroll_id,omitempty"`
Hits Hits `json:"hits,omitempty"`
}
type Hits struct {
Total Total `json:"total,omitempty"`
Results []Result `json:"hits,omitempty"`
}
type Total struct {
Value int `json:"value"`
}
type Result struct {
Index string `json:"_index"`
DocID string `json:"_id"`
StepSummary metadata.StepSummary `json:"_source,omitempty"`
}
func BuildBulkUpdateQuery(items []Result) (path string, payload io.Reader, err error) {
path = "/_bulk"
var buffer strings.Builder
for _, item := range items {
buffer.WriteString(fmt.Sprintf("{\"index\":{\"_index\":\"%s\",\"_id\":\"%s\"}}\n", item.Index, item.DocID))
bytes, err := json.Marshal(item.StepSummary)
if err != nil {
return "", nil, err
}
buffer.Write(bytes)
buffer.WriteString("\n")
}
payload = strings.NewReader(buffer.String())
return
}
func BuildScrollQueryInitial(index string, pageSize int) (path string, payload io.Reader) {
path = fmt.Sprintf("/%s/_search?scroll=1m", index)
// default paging size
if pageSize == 0 {
pageSize = 100
}
// add a filter (or add as json object to the must array) if you want to restrict the dataset for experimenting/debugging
// "range": {
// "tm.tr.startTime": {
// "gte": "2020-09-30T16:45:24.249Z",
// "lte": "2020-10-01T16:45:24.249Z",
// "format": "strict_date_optional_time"
// }
// }
query := `
{
"size": %d,
"query": {
"bool": {
"must": [],
"filter": [
{
"match_all": {}
},
{
"match_phrase": {
"type.keyword": "teststep"
}
}
],
"should": [],
"must_not": []
}
}
}`
query = fmt.Sprintf(query, pageSize)
payload = strings.NewReader(query)
return
}
func BuildScrollQueryNextPage(scrollID string) (path string, payload io.Reader) {
path = "/_search/scroll"
query := `
{
"scroll": "1m",
"scroll_id": "%s"
}`
query = fmt.Sprintf(query, scrollID)
payload = strings.NewReader(query)
return
}
|
// Copyright 2020 The VectorSQL Authors.
//
// Code is licensed under Apache License, Version 2.0.
package dataformats
import (
"datablocks"
)
type IDataBlockInputFormat interface {
ReadPrefix() error
Read() (*datablocks.DataBlock, error)
ReadSuffix() error
}
type IDataBlockOutputFormat interface {
WritePrefix() error
Write(*datablocks.DataBlock) error
WriteSuffix() error
}
|
/*
At the first Go / No Go poll, every Elf is Go until the Fuel Counter-Upper. They haven't determined the amount of fuel required yet.
Fuel required to launch a given module is based on its mass. Specifically, to find the fuel required for a module, take its mass, divide by three, round down, and subtract 2.
For example:
For a mass of 12, divide by 3 and round down to get 4, then subtract 2 to get 2.
For a mass of 14, dividing by 3 and rounding down still yields 4, so the fuel required is also 2.
For a mass of 1969, the fuel required is 654.
For a mass of 100756, the fuel required is 33583.
The Fuel Counter-Upper needs to know the total fuel requirement. To find it, individually calculate the fuel needed for the mass of each module (your puzzle input), then add together all the fuel values.
What is the sum of the fuel requirements for all of the modules on your spacecraft?
*/
package main
import (
"bufio"
"log"
"os"
"strconv"
"github.com/spitzfaust/adventofcode2019/util"
)
func calcFuelForMass(mass int) int {
return mass/3 - 2
}
func calcTotalFuelForMass(mass int) int {
/*
So, for each module mass, calculate its fuel and add it to the total. Then, treat the fuel amount you just calculated as the input mass and repeat the process, continuing until a fuel requirement is zero or negative. For example:
A module of mass 14 requires 2 fuel. This fuel requires no further fuel (2 divided by 3 and rounded down is 0, which would call for a negative fuel), so the total fuel required is still just 2.
At first, a module of mass 1969 requires 654 fuel. Then, this fuel requires 216 more fuel (654 / 3 - 2). 216 then requires 70 more fuel, which requires 21 fuel, which requires 5 fuel, which requires no further fuel. So, the total fuel required for a module of mass 1969 is 654 + 216 + 70 + 21 + 5 = 966.
The fuel required by a module of mass 100756 and its fuel is: 33583 + 11192 + 3728 + 1240 + 411 + 135 + 43 + 12 + 2 = 50346.
*/
fuelNeeded := calcFuelForMass(mass)
fuelForFuel := calcFuelForMass(fuelNeeded)
for fuelForFuel > 0 {
fuelNeeded += fuelForFuel
fuelForFuel = calcFuelForMass(fuelForFuel)
}
return fuelNeeded
}
func main() {
file, err := os.Open("./input.txt")
util.Check(err)
defer file.Close()
fuelNeeded := 0
scanner := bufio.NewScanner(file)
for scanner.Scan() {
mass, err := strconv.Atoi(scanner.Text())
util.Check(err)
fuelNeeded += calcTotalFuelForMass(mass)
}
if err := scanner.Err(); err != nil {
log.Fatal(err)
}
log.Printf("Total fuel needed: %d\n", fuelNeeded)
}
|
package eventsourcing
import (
"context"
"testing"
"github.com/caos/zitadel/internal/api/authz"
"github.com/caos/zitadel/internal/crypto"
caos_errs "github.com/caos/zitadel/internal/errors"
"github.com/caos/zitadel/internal/eventstore/models"
proj_model "github.com/caos/zitadel/internal/project/model"
"github.com/caos/zitadel/internal/project/repository/eventsourcing/model"
)
func TestProjectByIDQuery(t *testing.T) {
type args struct {
id string
sequence uint64
}
type res struct {
filterLen int
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project by id query ok",
args: args{
id: "AggregateID",
sequence: 1,
},
res: res{
filterLen: 3,
},
},
{
name: "project by id query, no id",
args: args{
sequence: 1,
},
res: res{
filterLen: 3,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
query, err := ProjectByIDQuery(tt.args.id, tt.args.sequence)
if !tt.res.wantErr && query == nil {
t.Errorf("query should not be nil")
}
if !tt.res.wantErr && len(query.Filters) != tt.res.filterLen {
t.Errorf("got wrong filter len: expected: %v, actual: %v ", tt.res.filterLen, len(query.Filters))
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectQuery(t *testing.T) {
type args struct {
sequence uint64
}
type res struct {
filterLen int
}
tests := []struct {
name string
args args
res res
}{
{
name: "project query ok",
args: args{
sequence: 1,
},
res: res{
filterLen: 2,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
query := ProjectQuery(tt.args.sequence)
if query == nil {
t.Errorf("query should not be nil")
}
if len(query.Filters) != tt.res.filterLen {
t.Errorf("got wrong filter len: expected: %v, actual: %v ", tt.res.filterLen, len(query.Filters))
}
})
}
}
func TestProjectAggregate(t *testing.T) {
type args struct {
ctx context.Context
aggCreator *models.AggregateCreator
project *model.Project
}
type res struct {
eventLen int
aggType models.AggregateType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "create aggregate",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
aggCreator: models.NewAggregateCreator("Test"),
project: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID", Sequence: 1}},
},
res: res{
eventLen: 0,
aggType: model.ProjectAggregate,
},
},
{
name: "project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 0,
aggType: model.ProjectAggregate,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectAggregate(tt.args.ctx, tt.args.aggCreator, tt.args.project)
if !tt.res.wantErr && agg == nil {
t.Errorf("agg should not be nil")
}
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectCreateAggregate(t *testing.T) {
type args struct {
ctx context.Context
new *model.Project
member *model.ProjectMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project update aggregate ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
new: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
member: &model.ProjectMember{UserID: "UserID"},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 2,
eventType: []models.EventType{model.ProjectAdded, model.ProjectMemberAdded},
},
},
{
name: "new project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
new: nil,
member: &model.ProjectMember{UserID: "UserID"},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "new member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
new: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
member: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectCreateAggregate(tt.args.aggCreator, tt.args.new, tt.args.member)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr {
for i := range agg.Events {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventType[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectUpdateAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Project
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project update aggregate ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName_Changed", State: int32(proj_model.ProjectStateActive)},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectChanged,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "new project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectUpdateAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectDeactivateAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project deactivate aggregate ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectDeactivated,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectDeactivated,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectDeactivateAggregate(tt.args.aggCreator, tt.args.existing)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectReactivateAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project reactivate aggregate ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateInactive)},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectReactivated,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectReactivated,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectReactivateAggregate(tt.args.aggCreator, tt.args.existing)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectMemberAddedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectmember added ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectMember{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, UserID: "UserID", Roles: []string{"Roles"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberAdded,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectMemberAddedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectMemberChangedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectmember changed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectMember{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, UserID: "UserID", Roles: []string{"Roles"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberChanged,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectMemberChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectMemberRemovedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectmember removed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectMember{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, UserID: "UserID", Roles: []string{"Roles"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberRemoved,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberRemoved,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectMemberRemoved,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectMemberRemovedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectRoleAddedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new []*model.ProjectRole
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectrole added ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: []*model.ProjectRole{{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleAdded,
},
},
{
name: "projectrole multiple added ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: []*model.ProjectRole{
{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key"},
{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key2"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 2,
eventType: model.ProjectRoleAdded,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectRoleAddedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new...)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectRoleChangedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectRole
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectmember changed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectRole{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key"},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleChanged,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectRoleChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectRoleChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectRoleRemovedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectRole
grants []*model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectrole changed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectRole{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key"},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ProjectRoleRemoved},
},
},
{
name: "projectrole changed with grant",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Grants: []*model.ProjectGrant{{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, GrantID: "GrantID", GrantedOrgID: "OrgID", RoleKeys: []string{"ROLE"}}},
},
new: &model.ProjectRole{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Key: "Key"},
grants: []*model.ProjectGrant{{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, GrantID: "GrantID", GrantedOrgID: "OrgID", RoleKeys: []string{}}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 2,
eventTypes: []models.EventType{model.ProjectRoleRemoved, model.ProjectGrantCascadeChanged},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectRoleRemovedAggregate(tt.args.ctx, tt.args.aggCreator, tt.args.existing, tt.args.new, tt.args.grants)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if agg != nil {
for i := range agg.Events {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectAppAddedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Application
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "add oidc application",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.Application{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppId",
Name: "Name",
OIDCConfig: &model.OIDCConfig{AppID: "AppID", ClientID: "ClientID"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 2,
eventTypes: []models.EventType{model.ApplicationAdded, model.OIDCConfigAdded},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ApplicationAddedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectAppChangedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Application
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "change app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name"},
}},
new: &model.Application{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppId",
Name: "NameChanged",
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ApplicationChanged},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ApplicationChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectAppRemovedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Application
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "remove app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name"},
}},
new: &model.Application{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppId",
Name: "Name",
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ApplicationRemoved},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ApplicationRemovedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectAppDeactivatedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Application
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "deactivate app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name"},
}},
new: &model.Application{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppId",
Name: "Name",
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ApplicationDeactivated},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ApplicationDeactivatedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectAppReactivatedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.Application
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "deactivate app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name"},
}},
new: &model.Application{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppId",
Name: "Name",
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ApplicationReactivated},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ApplicationReactivatedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestOIDCConfigchangAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.OIDCConfig
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "deactivate app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name", OIDCConfig: &model.OIDCConfig{AppID: "AppID", AuthMethodType: 1}},
}},
new: &model.OIDCConfig{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppID",
AuthMethodType: 2,
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.OIDCConfigChanged},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "app nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := OIDCConfigChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestOIDCConfigSecretChangeAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.OIDCConfig
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "change client secret",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Applications: []*model.Application{
{AppID: "AppID", Name: "Name", OIDCConfig: &model.OIDCConfig{AppID: "AppID", AuthMethodType: 1}},
}},
new: &model.OIDCConfig{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppID",
ClientSecret: &crypto.CryptoValue{},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.OIDCConfigSecretChanged},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
new: &model.OIDCConfig{
ObjectRoot: models.ObjectRoot{AggregateID: "AggregateID"},
AppID: "AppID",
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := OIDCConfigSecretChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new.AppID, tt.args.new.ClientSecret)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantAddedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "projectgrant added ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectGrant{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, GrantID: "GrantID", GrantedOrgID: "OrgID"},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantAdded,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "grant nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantAddedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantChangedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "change project grant",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Grants: []*model.ProjectGrant{
{GrantID: "GrantID", GrantedOrgID: "GrantedOrgID", RoleKeys: []string{"Key"}},
}},
new: &model.ProjectGrant{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
GrantID: "GrantID",
GrantedOrgID: "GrantedOrgID",
RoleKeys: []string{"KeyChanged"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ProjectGrantChanged},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "projectgrant nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantRemovedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "remove app",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Grants: []*model.ProjectGrant{
{GrantID: "GrantID", GrantedOrgID: "GrantedOrgID"},
}},
new: &model.ProjectGrant{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
GrantID: "GrantID",
GrantedOrgID: "GrantedOrgID",
RoleKeys: []string{"KeyChanged"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ProjectGrantRemoved},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "projectgrant nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantRemovedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantDeactivatedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "deactivate project grant",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateActive),
Grants: []*model.ProjectGrant{
{GrantID: "GrantID", GrantedOrgID: "GrantedOrgID"},
}},
new: &model.ProjectGrant{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
GrantID: "GrantID",
GrantedOrgID: "GrantedOrgID",
RoleKeys: []string{"KeyChanged"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ProjectGrantDeactivated},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "grant nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantDeactivatedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantReactivatedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrant
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventTypes []models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "reactivate project grant",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
Name: "ProjectName",
State: int32(proj_model.ProjectStateInactive),
Grants: []*model.ProjectGrant{
{GrantID: "GrantID", GrantedOrgID: "GrantedOrgID"},
}},
new: &model.ProjectGrant{
ObjectRoot: models.ObjectRoot{AggregateID: "ID"},
GrantID: "GrantID",
GrantedOrgID: "GrantedOrgID",
RoleKeys: []string{"KeyChanged"},
},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventTypes: []models.EventType{model.ProjectGrantReactivated},
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "grant nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateInactive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantReactivatedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
for i := 0; i < tt.res.eventLen; i++ {
if !tt.res.wantErr && agg.Events[i].Type != tt.res.eventTypes[i] {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventTypes[i], agg.Events[i].Type.String())
}
if !tt.res.wantErr && agg.Events[i].Data == nil {
t.Errorf("should have data in event")
}
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantMemberAddedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrantMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project grant member added ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectGrantMember{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, GrantID: "GrantID", UserID: "UserID", Roles: []string{"Roles"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberAdded,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberAdded,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantMemberAddedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantMemberChangedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrantMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project grant member changed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectGrantMember{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, UserID: "UserID", Roles: []string{"RolesChanged"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberChanged,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberChanged,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantMemberChangedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
func TestProjectGrantMemberRemovedAggregate(t *testing.T) {
type args struct {
ctx context.Context
existing *model.Project
new *model.ProjectGrantMember
aggCreator *models.AggregateCreator
}
type res struct {
eventLen int
eventType models.EventType
wantErr bool
errFunc func(err error) bool
}
tests := []struct {
name string
args args
res res
}{
{
name: "project grant member removed ok",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: &model.ProjectGrantMember{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, UserID: "UserID", Roles: []string{"Roles"}},
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberRemoved,
},
},
{
name: "existing project nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberRemoved,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
{
name: "member nil",
args: args{
ctx: authz.NewMockContext("orgID", "userID"),
existing: &model.Project{ObjectRoot: models.ObjectRoot{AggregateID: "ID"}, Name: "ProjectName", State: int32(proj_model.ProjectStateActive)},
new: nil,
aggCreator: models.NewAggregateCreator("Test"),
},
res: res{
eventLen: 1,
eventType: model.ProjectGrantMemberRemoved,
wantErr: true,
errFunc: caos_errs.IsPreconditionFailed,
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
agg, err := ProjectGrantMemberRemovedAggregate(tt.args.aggCreator, tt.args.existing, tt.args.new)(tt.args.ctx)
if !tt.res.wantErr && len(agg.Events) != tt.res.eventLen {
t.Errorf("got wrong event len: expected: %v, actual: %v ", tt.res.eventLen, len(agg.Events))
}
if !tt.res.wantErr && agg.Events[0].Type != tt.res.eventType {
t.Errorf("got wrong event type: expected: %v, actual: %v ", tt.res.eventType, agg.Events[0].Type.String())
}
if !tt.res.wantErr && agg.Events[0].Data == nil {
t.Errorf("should have data in event")
}
if tt.res.wantErr && !tt.res.errFunc(err) {
t.Errorf("got wrong err: %v ", err)
}
})
}
}
|
package k8sutil_test
import (
"context"
"fmt"
"strconv"
"testing"
"github.com/golang/mock/gomock"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/kinvolk/flatcar-linux-update-operator/pkg/k8sutil"
mock_v1 "github.com/kinvolk/flatcar-linux-update-operator/pkg/k8sutil/mocks"
)
func atomicCounterIncrement(t *testing.T) func(n *corev1.Node) {
t.Helper()
return func(n *corev1.Node) {
counterAnno := "counter"
s := n.Annotations[counterAnno]
var i int
if s == "" {
i = 0
} else {
var err error
i, err = strconv.Atoi(s)
if err != nil {
t.Fatalf("parsing %q to integer: %v", s, err)
}
}
n.Annotations[counterAnno] = strconv.Itoa(i + 1)
}
}
func TestUpdateNodeRetryHandlesConflict(t *testing.T) {
t.Parallel()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockNi := mock_v1.NewMockNodeInterface(ctrl)
mockNode := &corev1.Node{}
mockNode.SetName("mock_node")
mockNode.SetNamespace("default")
mockNode.SetAnnotations(map[string]string{"counter": "20"})
mockNode.SetResourceVersion("20")
mockNi.EXPECT().Get(context.TODO(), "mock_node", metav1.GetOptions{}).Return(mockNode, nil).AnyTimes()
// Conflict once; mock that a third party incremented the counter from '20'
// to '21' right after the node is returned
gomock.InOrder(
mockNi.EXPECT().Update(context.TODO(), mockNode, metav1.UpdateOptions{}).Do(func(
ctx context.Context, n *corev1.Node, uo metav1.UpdateOptions) {
// Fake conflict; the counter was incremented elsewhere; resourceVersion is now 21
mockNode.SetAnnotations(map[string]string{"counter": "21"})
mockNode.SetResourceVersion("21")
},
).Return(mockNode, errors.NewConflict(schema.GroupResource{}, "mock_node", fmt.Errorf("err"))),
// And then the successful retry
mockNi.EXPECT().Update(context.TODO(), mockNode, metav1.UpdateOptions{}).Return(mockNode, nil),
)
err := k8sutil.UpdateNodeRetry(context.TODO(), mockNi, "mock_node", atomicCounterIncrement(t))
if err != nil {
t.Errorf("unexpected error: expected increment to succeed")
}
if mockNode.Annotations["counter"] != "22" {
t.Errorf("expected the counter to hit 22; was %v", mockNode.Annotations["counter"])
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.