text stringlengths 11 4.05M |
|---|
package services
import (
"KServer/library/kiface/isocket"
"KServer/manage"
"KServer/proto"
"KServer/server/utils/msg"
"fmt"
)
type SocketDiscovery struct {
IManage manage.IManage
}
func NewSocketDiscovery(m manage.IManage) *SocketDiscovery {
return &SocketDiscovery{IManage: m}
}
func (c *SocketDiscovery) PreHandle(request isocket.IRequest) {
if !c.IManage.Discover().CheckService(request.GetMessage().GetId()) {
// 判断id是否存在
request.GetConnection().SendBuffMsg([]byte("无服务"))
return
}
data := c.IManage.Message().DataPack().Pack(request.GetMessage().GetId(), request.GetMessage().GetMsgId(),
c.IManage.Socket().Client().GetIdByConnId(request.GetConnection().GetConnID()),
c.IManage.Server().GetId(), request.GetMessage().GetData())
_, _, err := c.IManage.Message().Kafka().Send().Sync(
c.IManage.Discover().GetTopic(request.GetMessage().GetId()),
c.IManage.Server().GetId(), data)
if err != nil {
fmt.Println(request.GetMessage().GetId(), request.GetMessage().GetMsgId(), "转发失败")
}
//fmt.Println("CustomHandle")
}
func (c *SocketDiscovery) PostHandle(request isocket.IRequest) {
//fmt.Println("CustomHandle2")
}
// 用于服务中心头
func (c *SocketDiscovery) DiscoverHandle(data proto.IDataPack) {
fmt.Println("收到服务变化", data.GetMsgId())
switch data.GetMsgId() {
case msg.ServiceDiscoveryRegister:
{
c.ResponseAddService(data)
}
case msg.ServiceDiscoveryCloseService:
c.ResponseDelService(data)
}
}
// 用于服务中心注册服务
func (c *SocketDiscovery) ResponseAddService(data proto.IDataPack) {
//fmt.Println("服务发现添加服务")
d := &proto.Discovery{}
err := data.GetData().ProtoBuf(d)
if err != nil {
fmt.Println("服务发现解析失败")
return
}
c.IManage.Discover().AddService(d.Id, d)
fmt.Println(d.Id, d.Topic, "服务发现添加服务完成")
//fmt.Println(c.IManage.Discover().GetAllTopic())
}
// 用于服务中心删除服务
func (c *SocketDiscovery) ResponseDelService(data proto.IDataPack) {
d := &proto.Discovery{}
err := data.GetData().ProtoBuf(d)
//fmt.Println("服务发现删除服务")
if err != nil {
fmt.Println("服务发现解析失败")
return
}
if c.IManage.Discover().CheckService(d.Id) {
c.IManage.Discover().DelService(d.Id)
fmt.Println(d.Id, d.Topic, "服务发现删除服务完成")
//fmt.Println(c.IManage.Discover().GetAllTopic())
}
}
|
//
// Package bitfinex implements the Connector, Ticker and Trader interfaces
// for the Bitfinex websocket API v2
//
package bitfinex
import (
"time"
"sync"
"strings"
"strconv"
"encoding/json"
"github.com/aglyzov/ws-machine"
"github.com/aglyzov/exchange-api"
)
func (_ *API) GetName() string {
return "Bitfinex websocket API v2"
}
func (a *API) Connect(key string) (<-chan api.Status, error) {
a.Lock()
defer a.Unlock()
switch a.status.State {
case api.DISCONNECTED: // TODO
case api.CONNECTING: // TODO
case api.CONNECTED: // TODO
case api.WAITING: // TODO
}
return a.statusChan, nil
}
func (a *API) Disconnect() error {
a.Lock()
defer a.Unlock()
/*
switch a.status.State {
case api.DISCONNECTED: // TODO
case api.CONNECTING: // TODO
case api.CONNECTED: // TODO
case api.WAITING: // TODO
}
*/
return nil
}
|
package shardmaster
import (
"log"
"sort"
"sync"
"sync/atomic"
"time"
"../labgob"
"../labrpc"
"../raft"
)
const Debug = 0
func DPrintf(format string, a ...interface{}) (n int, err error) {
if Debug > 0 {
log.Printf(format, a...)
}
return
}
func DPrint(v ...interface{}) (n int, err error) {
if Debug > 0 {
log.Print(v...)
}
return
}
const (
TimeoutServerInterval = time.Duration(1 * time.Second)
)
type ShardMaster struct {
mu sync.Mutex
me int
rf *raft.Raft
applyCh chan raft.ApplyMsg
// Your data here.
killCh chan bool
applyChanMap map[int]ApplyChanMapItem
dead int32 // set by Kill()
seen map[int64]int
configs []Config // indexed by config num
}
type Op struct {
// Your data here.
Data interface{}
OperationType string
ClientId int64
ClientOperationNumber int
}
type SMMapItem struct {
err Err
wrongLeader bool
val []Config
}
type ApplyChanMapItem struct {
ch chan SMMapItem
expectedClientId int64
expectedClientOperationNumber int
}
func copyConfig(original Config) Config {
configCopy := Config{}
configCopy.Num = original.Num
configCopy.Groups = copyMap(original.Groups)
return configCopy
}
func copyMap(original map[int][]string) map[int][]string {
mapCopy := make(map[int][]string)
for key, value := range original {
var newSlice = make([]string, len(value))
copy(newSlice, value)
mapCopy[key] = newSlice
}
return mapCopy
}
func (sm *ShardMaster) Join(args *JoinArgs, reply *JoinReply) {
if !sm.killed() {
_, wrongLeader, err := sm.startOp(*args, JOIN, args.ClientInfo)
reply.WrongLeader = wrongLeader
reply.Err = err
return
}
}
func (sm *ShardMaster) Leave(args *LeaveArgs, reply *LeaveReply) {
if !sm.killed() {
_, wrongLeader, err := sm.startOp(*args, LEAVE, args.ClientInfo)
reply.WrongLeader = wrongLeader
reply.Err = err
return
}
}
func (sm *ShardMaster) Move(args *MoveArgs, reply *MoveReply) {
if !sm.killed() {
_, wrongLeader, err := sm.startOp(*args, MOVE, args.ClientInfo)
reply.WrongLeader = wrongLeader
reply.Err = err
return
}
}
func (sm *ShardMaster) Query(args *QueryArgs, reply *QueryReply) {
if !sm.killed() {
val, wrongLeader, err := sm.startOp(args.Num, QUERY, args.ClientInfo)
if len(val) > 0 {
reply.Config = val[0]
}
reply.WrongLeader = wrongLeader
reply.Err = err
return
}
}
func (sm *ShardMaster) QueryHigher(args *QueryArgs, reply *QueryHigherReply) {
if !sm.killed() {
val, wrongLeader, err := sm.startOp(args.Num, QUERY_HIGHER, args.ClientInfo)
reply.Configs = val
reply.WrongLeader = wrongLeader
reply.Err = err
return
}
}
func (sm *ShardMaster) startOp(data interface{}, operationType string, clientInfo ClientInformation) ([]Config, bool, Err) {
op := Op{}
op.Data = data
op.OperationType = operationType
op.ClientId = clientInfo.ClientId
op.ClientOperationNumber = clientInfo.ClientOperationNumber
expectedIndex, _, isLeader := sm.rf.Start(op)
if isLeader {
sm.mu.Lock()
DPrintf("%d: listening expectedIndex %d and operation %v", sm.me, expectedIndex, op)
msgCh := make(chan SMMapItem)
sm.applyChanMap[expectedIndex] = ApplyChanMapItem{ch: msgCh, expectedClientOperationNumber: op.ClientOperationNumber, expectedClientId: op.ClientId}
sm.mu.Unlock()
select {
case <-time.After(TimeoutServerInterval):
DPrintf("%d: timed out waiting for message expectedIndex %d and operation %v", sm.me, expectedIndex, op)
sm.mu.Lock()
defer sm.mu.Unlock()
delete(sm.applyChanMap, expectedIndex)
return []Config{}, true, ""
case msg := <-msgCh:
DPrintf("%d: reply: %v, original op %v", sm.me, msg, op)
return msg.val, msg.wrongLeader, msg.err
}
} else {
return []Config{}, true, ""
}
}
func (sm *ShardMaster) rebalance(newConfig *Config) {
keys := make([]int, len(newConfig.Groups))
i := 0
for k := range newConfig.Groups {
keys[i] = k
i++
}
sort.Ints(keys)
var newShards [NShards]int
for index := range newShards {
if len(keys) == 0 {
newShards[index] = 0
} else {
newShards[index] = keys[index%len(keys)]
}
}
newConfig.Shards = newShards
}
func (sm *ShardMaster) copyShards(newConfig *Config, oldConfig *Config) {
var newShards [NShards]int
for index := range newShards {
newShards[index] = oldConfig.Shards[index]
}
newConfig.Shards = newShards
}
func (sm *ShardMaster) joinConfig(data interface{}) {
var lastIndex = len(sm.configs) - 1
var lastConfig = sm.configs[lastIndex]
var newConfig = copyConfig(lastConfig)
newConfig.Num += 1
var convertedData = data.(JoinArgs)
for index, data := range convertedData.Servers {
newConfig.Groups[index] = data
}
sm.rebalance(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) leaveConfig(data interface{}) {
var lastIndex = len(sm.configs) - 1
var lastConfig = sm.configs[lastIndex]
var newConfig = copyConfig(lastConfig)
newConfig.Num += 1
var convertedData = data.(LeaveArgs)
for _, data := range convertedData.GIDs {
delete(newConfig.Groups, data)
}
sm.rebalance(&newConfig)
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) moveConfig(data interface{}) {
var lastIndex = len(sm.configs) - 1
var lastConfig = sm.configs[lastIndex]
var newConfig = copyConfig(lastConfig)
newConfig.Num += 1
var convertedData = data.(MoveArgs)
sm.copyShards(&newConfig, &lastConfig) // using this to copy the shards info
newConfig.Shards[convertedData.Shard] = convertedData.GID
sm.configs = append(sm.configs, newConfig)
}
func (sm *ShardMaster) modifyConfig(command Op, commandType string, operation func(interface{})) {
previousOperationNumber, ok := sm.seen[command.ClientId]
if !ok || previousOperationNumber < command.ClientOperationNumber {
DPrintf("%d: commandType %s, %v, before config length %d: %v", sm.me, commandType, command.Data, len(sm.configs), sm.configs[len(sm.configs)-1])
operation(command.Data)
sm.seen[command.ClientId] = command.ClientOperationNumber
DPrintf("%d: commandType %s, %v, after config length %d: %v", sm.me, commandType, command.Data, len(sm.configs), sm.configs[len(sm.configs)-1])
} else {
DPrintf("%d: skipped message id %d %s from %d as we have already seen it. previous seen operation is %d ", sm.me,
command.ClientOperationNumber, commandType, command.ClientId, previousOperationNumber)
}
}
func (sm *ShardMaster) processApplyChMessage(msg raft.ApplyMsg) ([]Config, Err) {
if msg.CommandValid {
DPrintf("%d: Got message; commandIndex: %d, isSnapshot: %v; %v", sm.me, msg.CommandIndex, msg.IsSnapshot, msg)
command := msg.Command.(Op)
commandType := command.OperationType
switch commandType {
case JOIN:
sm.modifyConfig(command, commandType, sm.joinConfig)
case LEAVE:
sm.modifyConfig(command, commandType, sm.leaveConfig)
case MOVE:
sm.modifyConfig(command, commandType, sm.moveConfig)
case QUERY:
var convertedData = command.Data.(int)
if convertedData == -1 || convertedData > len(sm.configs)-1 {
return []Config{sm.configs[len(sm.configs)-1]}, OK
} else {
return []Config{sm.configs[convertedData]}, OK
}
case QUERY_HIGHER:
var convertedData = command.Data.(int)
result := []Config{}
for i := convertedData + 1; i < len(sm.configs); i++ {
result = append(result, sm.configs[i])
}
return result, OK
default:
DPrintf("this should not happen!!!!!!!!!!!!!!: %v", msg)
panic("REALLY BAD")
}
} else {
DPrintf("%d: message skipped: %v", sm.me, msg)
}
return []Config{}, OK
}
func (sm *ShardMaster) getMessages() {
for {
select {
case msg := <-sm.applyCh:
sm.mu.Lock()
val, err := sm.processApplyChMessage(msg)
command := msg.Command.(Op)
index := msg.CommandIndex
applyChanMapItem, ok := sm.applyChanMap[index]
sm.mu.Unlock()
if ok {
sm.sendMessageToApplyChanMap(applyChanMapItem, command, val, err)
}
case <-sm.killCh:
return
}
}
}
func (sm *ShardMaster) sendMessageToApplyChanMap(applyChanMapItem ApplyChanMapItem, command Op, val []Config, err Err) {
messageCh := applyChanMapItem.ch
expectedClientId := applyChanMapItem.expectedClientId
expectedClientOperationNumber := applyChanMapItem.expectedClientOperationNumber
var msg SMMapItem
if command.ClientId != expectedClientId || command.ClientOperationNumber != expectedClientOperationNumber {
DPrintf("%d: No Longer leader", sm.me)
msg = SMMapItem{val: []Config{}, wrongLeader: true, err: err}
} else {
msg = SMMapItem{val: val, wrongLeader: false, err: err}
}
select {
case messageCh <- msg:
return
default:
DPrintf("%d: tried to send message %v: %v to apply channel, but it was not available for listening", sm.me, expectedClientId, expectedClientOperationNumber)
}
}
//
// the tester calls Kill() when a ShardMaster instance won't
// be needed again. you are not required to do anything
// in Kill(), but it might be convenient to (for example)
// turn off debug output from this instance.
//
func (sm *ShardMaster) Kill() {
sm.rf.Kill()
// Your code here, if desired.
atomic.StoreInt32(&sm.dead, 1)
close(sm.killCh)
}
func (sm *ShardMaster) killed() bool {
z := atomic.LoadInt32(&sm.dead)
return z == 1
}
// needed by shardkv tester
func (sm *ShardMaster) Raft() *raft.Raft {
return sm.rf
}
//
// servers[] contains the ports of the set of
// servers that will cooperate via Paxos to
// form the fault-tolerant shardmaster service.
// me is the index of the current server in servers[].
//
func StartServer(servers []*labrpc.ClientEnd, me int, persister *raft.Persister) *ShardMaster {
sm := new(ShardMaster)
sm.me = me
sm.configs = make([]Config, 1)
sm.configs[0].Groups = map[int][]string{}
labgob.Register(Op{})
labgob.Register(JoinArgs{})
labgob.Register(LeaveArgs{})
labgob.Register(MoveArgs{})
sm.applyCh = make(chan raft.ApplyMsg)
sm.rf = raft.Make(servers, me, persister, sm.applyCh)
// Your code here.
sm.killCh = make(chan bool)
sm.applyChanMap = make(map[int]ApplyChanMapItem)
sm.seen = make(map[int64]int)
go func() {
sm.getMessages()
}()
return sm
}
|
package main
import "fmt"
func main() {
fmt.Println("Hello Golang!")
fmt.Println("Hello 2!")
fmt.Println("Hello 3!")
fmt.Println("Hello 4!")
fmt.Println("Hello 5!")
fmt.Println("Hello 6!")
fmt.Println("Hello 7!")
fmt.Println("Hello 8!")
}
|
package hello
import (
"fmt"
"github.com/TeamChii/hello-lambda/common"
"github.com/labstack/echo/v4"
"go.uber.org/zap"
)
type Servicer interface {
HelloService(c echo.Context) (*HelloResponse, error)
}
type Service struct {
logger *zap.Logger
}
func NewService(logger *zap.Logger) *Service {
return &Service{logger: logger}
}
func (s *Service) HelloService(c echo.Context, req *HelloRequest) (*HelloResponse, error) {
s.logger.Info("process HelloService")
return &HelloResponse{
HeaderResp: common.HeaderResp{
Code: common.StatusCdSuccess,
Message: common.StatusDescMap[common.StatusCdSuccess],
}, Msg: fmt.Sprintf("hello %v", req.Name),
}, nil
}
|
// Original work Copyright 2018 Twitch Interactive, Inc. All Rights Reserved.
// Modified work Copyright 2018 MyGnar, Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may not
// use this file except in compliance with the License. A copy of the License is
// located at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// or in the "license" file accompanying this file. This file is distributed on
// an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
// Adapted from https://github.com/twitchtv/twirp/tree/master/example
// and https://github.com/twitchtv/twirp/issues/70
package main
import (
"context"
"io"
"log"
"time"
"github.com/gnarbox/twirpjs/example/twirper"
"github.com/twitchtv/twirp"
)
func sendRepeatResps(ctx context.Context, req *twirper.RepeatReq, respStream chan twirper.RepeatRespOrError) {
defer close(respStream)
repeated := int32(0)
lastTime := time.Now()
for {
if req.ErrAfter != 0 && repeated == req.ErrAfter {
err := twirp.NewError(twirp.Unknown, `you wanted this error`)
err = err.WithMeta(`extra_info`, `goes in meta`)
log.Printf("(sendRepeatResps) Client requested an error, returning error %#v", err)
respStream <- twirper.RepeatRespOrError{Err: err}
return
}
if repeated == req.NumRepeats {
log.Printf("(sendRepeatResps) Closing stream")
return
}
repeated++
var delay <-chan time.Time
if req.DelayMs == 0 {
dd := make(chan time.Time)
close(dd)
delay = dd
} else {
delay = time.After(time.Duration(req.DelayMs) * time.Millisecond)
}
select {
case <-ctx.Done():
err := errAborted(ctx.Err())
log.Printf(
`(sendRepeatResps) Context canceled, returning error "%+v"`+
" (Note: this error goes nowhere because the connection is closed)\n",
err,
)
respStream <- twirper.RepeatRespOrError{Err: err}
case <-delay:
resp := &twirper.RepeatResp{
Message: req.Message,
DelayedMs: time.Since(lastTime).Nanoseconds() / 1000000,
ID: repeated,
}
lastTime = time.Now()
log.Printf("(sendRepeatResps) Sending %#v", *resp)
respStream <- twirper.RepeatRespOrError{Msg: resp}
}
}
}
type repeatRespStream struct {
req *twirper.RepeatReq
lastTime time.Time
repeated int32
}
func newRepeatRespStream(req *twirper.RepeatReq) *repeatRespStream {
return &repeatRespStream{req, time.Now(), 0}
}
func (rs *repeatRespStream) Next(ctx context.Context) (*twirper.RepeatResp, error) {
if rs.req.ErrAfter != 0 && rs.repeated == rs.req.ErrAfter {
err := twirp.NewError(twirp.Unknown, `you wanted this error`)
err = err.WithMeta(`extra_info`, `goes in meta`)
log.Printf("(sendRepeatResps) Client requested an error, returning error %#v", err)
return nil, err
}
if rs.repeated == rs.req.NumRepeats {
log.Printf("(sendRepeatResps) Returning %#v", io.EOF)
return nil, io.EOF
}
rs.repeated++
var delay <-chan time.Time
if rs.req.DelayMs == 0 {
dd := make(chan time.Time)
close(dd)
delay = dd
} else {
delay = time.After(time.Duration(rs.req.DelayMs) * time.Millisecond)
}
select {
case <-ctx.Done():
err := errAborted(ctx.Err())
log.Printf(
`(sendRepeatResps) Context canceled, returning error "%+v"`+
" (Note: this error goes nowhere because the connection is closed)\n",
err,
)
return nil, err
// // Things get really weird if you don't return an error...
// return &twirper.RepeatResp{Message: err.Error()}, nil
case <-delay:
resp := &twirper.RepeatResp{
Message: rs.req.Message,
DelayedMs: time.Since(rs.lastTime).Nanoseconds() / 1000000,
ID: rs.repeated,
}
rs.lastTime = time.Now()
log.Printf("(sendRepeatResps) Returning %#v", *resp)
return resp, nil
}
}
// For a sender, End will be called by generated code when we're going to stop
// sending messages for any reason: either we have received nil, io.EOF from a
// call to Next, or we have to shut down for some other reason (like the
// receiver went away).
// [from https://github.com/twitchtv/twirp/issues/70#issuecomment-361454005]
func (rs *repeatRespStream) End(err error) {
log.Printf("(repeatRespStream#End) Stream ended with %#v\n", err)
}
func errAborted(err error) error {
if err == nil {
return twirp.NewError(twirp.Aborted, `canceled`).WithMeta(`cause`, `unknown`)
}
return twirp.NewError(twirp.Aborted, err.Error())
}
|
package main
import (
"fmt"
"go-taylor/calculator"
)
func main() {
var x float64
fmt.Print("x = ")
fmt.Scan(&x)
fmt.Print("e ^ x = ")
fmt.Println(calculator.Exp(x))
fmt.Print("ln x = ")
fmt.Println(calculator.Ln(x))
fmt.Print("e ^ (-x^2) = ")
fmt.Println(calculator.Norm(x))
fmt.Print("sin(x) = ")
fmt.Println(calculator.Sin(x))
fmt.Print("cos(x) = ")
fmt.Println(calculator.Cos(x))
fmt.Print("tan(x) = ")
fmt.Println(calculator.Sin(x) / calculator.Cos(x))
fmt.Print("cot(x) = ")
fmt.Println(calculator.Cos(x) / calculator.Sin(x))
}
|
package handler
import (
"context"
"errors"
"fmt"
"time"
"github.com/jinmukeji/jiujiantang-services/jinmuid/mysqldb"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
)
// ModifySecureEmail 修改安全邮箱
func (j *JinmuIDService) ModifySecureEmail(ctx context.Context, req *proto.ModifySecureEmailRequest, resp *proto.ModifySecureEmailResponse) error {
token, ok := TokenFromContext(ctx)
if !ok {
return NewError(ErrInvalidUser, errors.New("failed to get token from context"))
}
userID, err := j.datastore.FindUserIDByToken(ctx, token)
if err != nil {
return NewError(ErrUserUnauthorized, fmt.Errorf("failed to get userID by token: %s", err.Error()))
}
user, errFindUserByEmail := j.datastore.FindUserByEmail(ctx, req.OldEmail)
if errFindUserByEmail != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to find user by old email %s: %s", req.OldEmail, errFindUserByEmail.Error()))
}
// 验证传入的user_id,phone,nation_code,token 都属于同一个用户
if userID != req.UserId || user.UserID != req.UserId {
return NewError(ErrInvalidUser, fmt.Errorf("token or phone or userID from request do not belong to current user %d", userID))
}
// 验证旧邮件格式
if !checkEmailFormat(req.OldEmail) {
return NewError(ErrInvalidEmailAddress, fmt.Errorf("old email %s format is invalid", req.OldEmail))
}
// 验证新邮件格式
if !checkEmailFormat(req.NewEmail) {
return NewError(ErrInvalidEmailAddress, fmt.Errorf("new email %s format is invalid", req.NewEmail))
}
// 判断安全邮箱是否跟以前一样
if req.NewEmail == req.OldEmail {
return NewError(ErrSameEmail, fmt.Errorf("new email %s shouldn't be same as the old one", req.NewEmail))
}
// 判断旧邮箱当前绑定邮箱
user, errFindUserBySecureEmail := j.datastore.FindUserBySecureEmail(ctx, req.OldEmail)
if errFindUserBySecureEmail != nil {
return NewError(ErrNoneExistSecureEmail, fmt.Errorf("failed to find user by old email %s: %s", req.OldEmail, errFindUserBySecureEmail.Error()))
}
if user.UserID != req.UserId {
return NewError(ErrSecureEmailAddressNotMatched, fmt.Errorf("secure email doesn't belong to current user %d", req.UserId))
}
if !user.HasSetEmail {
return NewError(ErrSecureEmailNotSet, fmt.Errorf("old email %s has not been set", req.OldEmail))
}
if user.SecureEmail == req.NewEmail {
return NewError(ErrSameEmail, fmt.Errorf("new email %s shouldn't be same as the old one", req.NewEmail))
}
// 判断新邮箱是否已经被任何人设置
hasSetSecureEmailByAnyone, _ := j.datastore.HasSetSecureEmailByAnyone(ctx, req.NewEmail)
if hasSetSecureEmailByAnyone {
return NewError(ErrSecureEmailUsedByOthers, fmt.Errorf("the secure email %s has been used", req.NewEmail))
}
// 判断验证码是否有效
record, errVerifyMVC := j.datastore.VerifyMVCBySecureEmail(ctx, req.NewSerialNumber, req.NewVerificationCode, req.NewEmail)
if errVerifyMVC != nil {
return NewError(ErrInvalidVcRecord, fmt.Errorf("failed to verify MVC by secure email %s: %s", req.NewEmail, errVerifyMVC.Error()))
}
// 是否有效
if record.ExpiredAt.Before(time.Now()) || record.HasUsed {
return NewError(ErrExpiredVcRecord, errors.New("expired vc record"))
}
// 判断验证号是否有效
isValid, errVerifyVerificationNumberByEmail := j.datastore.VerifyVerificationNumberByEmail(ctx, req.OldVerificationNumber, req.OldEmail)
if errVerifyVerificationNumberByEmail != nil || !isValid {
return NewError(ErrInvalidVerificationNumber, fmt.Errorf("failed to verify verification number %s by email %s", req.OldVerificationNumber, req.OldEmail))
}
// TODO: 修改安全邮箱,修改新邮箱的验证码状态,设置验证号为已经使用的状态要写成事务
// 修改安全邮箱
errSetSecureEmailByUserID := j.datastore.SetSecureEmailByUserID(ctx, user.UserID, req.NewEmail)
if errSetSecureEmailByUserID != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to set secure email by user %d: %s", user.UserID, errSetSecureEmailByUserID.Error()))
}
// 修改新邮箱的验证码状态
errModifyVcRecordStatusByEmail := j.datastore.ModifyVcRecordStatusByEmail(ctx, req.NewEmail, req.NewVerificationCode, req.NewSerialNumber)
if errModifyVcRecordStatusByEmail != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to modify vc record status by email %s: %s", req.NewEmail, errModifyVcRecordStatusByEmail.Error()))
}
// 设置验证号为已经使用的状态
errSetVerificationNumberAsUsed := j.datastore.SetVerificationNumberAsUsed(ctx, mysqldb.VerificationEmail, req.OldVerificationNumber)
if errSetVerificationNumberAsUsed != nil {
return NewError(ErrDatabase, fmt.Errorf("failed to set verification number of email to the status of used %s: %s", req.OldVerificationNumber, errSetVerificationNumberAsUsed.Error()))
}
return nil
}
|
// Custom logger for printing error lists together with the filename and line
// number of the originating code, implemented on top of Go's own log package.
package main
import (
"log"
"os"
"strings"
)
type printlnFn func(*log.Logger, ...interface{})
var codeLogger = log.New(os.Stderr, "", 0)
// Print pretty-prints the given error list.
func (e ErrorList) Print() {
for _, err := range e {
fn := codeLogger.Println
if err.sev == ESFatal {
fn = codeLogger.Fatalln
}
sevstr := err.sev.String()
posstr := strings.Replace(
err.pos.String(), "\n", "\n"+strings.Repeat(" ", len(sevstr)), -1,
)
fn(sevstr + posstr + err.s)
}
}
|
package ksqlparser
import "fmt"
func (p *parser) Error(expected string) error {
return fmt.Errorf("expected %s at line %d col %d, %s^", expected, p.line, p.col, p.sql[:p.i])
}
func (p *parser) SyntaxError() error {
return fmt.Errorf("syntax error at line %d col %d, %s^", p.line, p.col, p.sql[:p.i])
}
|
package app
import (
"context"
"testing"
"github.com/skos-ninja/truelayer-tech/svc/pokemon/services/pokeapi"
"github.com/skos-ninja/truelayer-tech/svc/pokemon/services/shakespeare/test"
"github.com/stretchr/testify/assert"
)
func TestGetShakespearePokemonDescriptionNotFound(t *testing.T) {
ctx := context.Background()
a := newTestApp(false, false)
const pokemon = "test"
v, err := a.GetShakespearePokemonDescription(ctx, pokemon)
assert.Empty(t, v)
assert.Equal(t, pokeapi.ErrSpeciesNotFound, err)
}
func TestGetShakespearePokemonDescriptionFailedTranslation(t *testing.T) {
ctx := context.Background()
a := newTestApp(true, false)
const pokemon = "test"
v, err := a.GetShakespearePokemonDescription(ctx, pokemon)
assert.Empty(t, v)
assert.Equal(t, test.ErrExpected, err)
}
func TestGetShakespearePokemonDescriptionSuccess(t *testing.T) {
ctx := context.Background()
a := newTestApp(true, true)
const pokemon = "test"
v, err := a.GetShakespearePokemonDescription(ctx, pokemon)
assert.Nil(t, err)
assert.Equal(t, test.TranslatedText, v)
}
|
package trace
import (
"context"
"github.com/labstack/echo"
opentracing "github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
"net/http"
"net/url"
)
type mwOptions struct {
opNameFunc func(r *http.Request) string
spanObserver func(span opentracing.Span, r *http.Request)
urlTagFunc func(u *url.URL) string
componentName string
}
func OpenTracing(componentName string) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
r := c.Request()
tracer := opentracing.GlobalTracer()
opts := mwOptions{
componentName: componentName,
opNameFunc: func(r *http.Request) string {
return "HTTP " + r.Method + " " + r.URL.Path
},
spanObserver: func(span opentracing.Span, r *http.Request) {},
urlTagFunc: func(u *url.URL) string {
return u.String()
},
}
sp := tracer.StartSpan(r.URL.String())
defer sp.Finish()
ext.HTTPMethod.Set(sp, r.Method)
ext.HTTPUrl.Set(sp, opts.urlTagFunc(r.URL))
ext.Component.Set(sp, opts.componentName)
err := tracer.Inject(sp.Context(), opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header))
if err != nil {
panic("SpanContext Inject Error!")
}
ctx := context.WithValue(context.Background(), "test", "text")
spanContext := opentracing.ContextWithSpan(ctx, sp)
r = r.WithContext(spanContext)
c.SetRequest(r)
c.Set("SpanContext", opentracing.ContextWithSpan(spanContext, sp))
sp.SetTag("request.id", r.Header.Get("C-Request-ID"))
if err := next(c); err != nil {
sp.SetTag("error", true)
c.Error(err)
}
sp.SetTag("error", false)
ext.HTTPStatusCode.Set(sp, uint16(c.Response().Status))
return nil
}
}
}
|
package clair
import (
"strconv"
"strings"
"github.com/coreos/clair/api/v1"
"github.com/coreos/pkg/capnslog"
"github.com/ContinuousSecurityTooling/clairctl/xstrings"
"github.com/spf13/viper"
"net/http"
)
var log = capnslog.NewPackageLogger("github.com/ContinuousSecurityTooling/clairctl", "clair")
var uri string
var headers map[string]string
var host string
var healthURI string
//ImageAnalysis Full image analysis
type ImageAnalysis struct {
Registry, ImageName, Tag string
Layers []v1.LayerEnvelope
}
func (imageAnalysis ImageAnalysis) String() string {
return imageAnalysis.Registry + "/" + imageAnalysis.ImageName + ":" + imageAnalysis.Tag
}
//MostRecentLayer returns the most recent layer of an ImageAnalysis object
func (imageAnalysis ImageAnalysis) MostRecentLayer() v1.LayerEnvelope {
return imageAnalysis.Layers[0]
}
func fmtURI(u string, port int) string {
if port != 0 {
u += ":" + strconv.Itoa(port)
}
if !strings.HasPrefix(u, "http://") && !strings.HasPrefix(u, "https://") {
u = "http://" + u
}
return u
}
func (imageAnalysis ImageAnalysis) ShortName(l v1.Layer) string {
return xstrings.Substr(l.Name, 0, 12)
}
//Config configure Clair from configFile
func Config() {
uri = fmtURI(viper.GetString("clair.uri"), viper.GetInt("clair.port")) + "/v1"
healthURI = fmtURI(viper.GetString("clair.uri"), viper.GetInt("clair.healthPort")) + "/health"
Report.Path = viper.GetString("clair.report.path")
Report.Format = viper.GetString("clair.report.format")
headers = viper.GetStringMapString("clair.request.headers")
host = viper.GetString("clair.request.host")
}
func SetRequestHeaders(request *http.Request) {
request.Host = host
for name, value := range headers {
request.Header.Add(name, value)
}
}
|
package lark
import "fhyx.online/lark-api-go/client"
type AuthContactResponse struct {
client.Error
Data struct {
AuthedDepartments []string `json:"authed_departments"`
AuthedEmployeeIDs []string `json:"authed_employee_ids"`
AuthedOpenIDs []string `json:"authed_open_ids"`
} `json:"data"`
}
func (acr *AuthContactResponse) GetDepartments() []string {
return acr.Data.AuthedDepartments
}
func (acr *AuthContactResponse) GetEmployeeIDs() []string {
return acr.Data.AuthedEmployeeIDs
}
func (acr *AuthContactResponse) GetOpenIDs() []string {
return acr.Data.AuthedOpenIDs
}
|
package main
import (
"bytes"
"encoding/binary"
"fmt"
"log"
)
func main() {
var num int64 = 1596 // 64位整数,8个字节
var buf bytes.Buffer
err := binary.Write(&buf, binary.BigEndian, num)
if err != nil {
log.Fatal()
}
bytes := buf.Bytes()
fmt.Println(bytes) // [0 0 0 0 0 0 0 15]
var decodingNum int64
err = binary.Read(&buf, binary.BigEndian, &decodingNum)
if err != nil {
log.Fatal()
}
fmt.Println(decodingNum) // 15
buf.Reset()
err = binary.Write(&buf, binary.LittleEndian, num)
if err != nil {
log.Fatal()
}
bytes = buf.Bytes()
fmt.Println(bytes) // [15 0 0 0 0 0 0 0]
err = binary.Read(&buf, binary.LittleEndian, &decodingNum)
if err != nil {
log.Fatal()
}
fmt.Println(decodingNum) // 15
}
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"bytes"
"fmt"
"net/url"
"strconv"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/tidb/parser/auth"
"github.com/pingcap/tidb/parser/format"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
)
var (
_ StmtNode = &AdminStmt{}
_ StmtNode = &AlterUserStmt{}
_ StmtNode = &BeginStmt{}
_ StmtNode = &BinlogStmt{}
_ StmtNode = &CommitStmt{}
_ StmtNode = &CreateUserStmt{}
_ StmtNode = &DeallocateStmt{}
_ StmtNode = &DoStmt{}
_ StmtNode = &ExecuteStmt{}
_ StmtNode = &ExplainStmt{}
_ StmtNode = &GrantStmt{}
_ StmtNode = &PrepareStmt{}
_ StmtNode = &RollbackStmt{}
_ StmtNode = &SetPwdStmt{}
_ StmtNode = &SetRoleStmt{}
_ StmtNode = &SetDefaultRoleStmt{}
_ StmtNode = &SetStmt{}
_ StmtNode = &SetSessionStatesStmt{}
_ StmtNode = &UseStmt{}
_ StmtNode = &FlushStmt{}
_ StmtNode = &KillStmt{}
_ StmtNode = &CreateBindingStmt{}
_ StmtNode = &DropBindingStmt{}
_ StmtNode = &SetBindingStmt{}
_ StmtNode = &ShutdownStmt{}
_ StmtNode = &RestartStmt{}
_ StmtNode = &RenameUserStmt{}
_ StmtNode = &HelpStmt{}
_ StmtNode = &PlanReplayerStmt{}
_ StmtNode = &CompactTableStmt{}
_ StmtNode = &SetResourceGroupStmt{}
_ Node = &PrivElem{}
_ Node = &VariableAssignment{}
)
// Isolation level constants.
const (
ReadCommitted = "READ-COMMITTED"
ReadUncommitted = "READ-UNCOMMITTED"
Serializable = "SERIALIZABLE"
RepeatableRead = "REPEATABLE-READ"
PumpType = "PUMP"
DrainerType = "DRAINER"
)
// Transaction mode constants.
const (
Optimistic = "OPTIMISTIC"
Pessimistic = "PESSIMISTIC"
)
// TypeOpt is used for parsing data type option from SQL.
type TypeOpt struct {
IsUnsigned bool
IsZerofill bool
}
// FloatOpt is used for parsing floating-point type option from SQL.
// See http://dev.mysql.com/doc/refman/5.7/en/floating-point-types.html
type FloatOpt struct {
Flen int
Decimal int
}
// AuthOption is used for parsing create use statement.
type AuthOption struct {
// ByAuthString set as true, if AuthString is used for authorization. Otherwise, authorization is done by HashString.
ByAuthString bool
AuthString string
ByHashString bool
HashString string
AuthPlugin string
}
// Restore implements Node interface.
func (n *AuthOption) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("IDENTIFIED")
if n.AuthPlugin != "" {
ctx.WriteKeyWord(" WITH ")
ctx.WriteString(n.AuthPlugin)
}
if n.ByAuthString {
ctx.WriteKeyWord(" BY ")
ctx.WriteString(n.AuthString)
} else if n.ByHashString {
ctx.WriteKeyWord(" AS ")
ctx.WriteString(n.HashString)
}
return nil
}
// TraceStmt is a statement to trace what sql actually does at background.
type TraceStmt struct {
stmtNode
Stmt StmtNode
Format string
TracePlan bool
TracePlanTarget string
}
// Restore implements Node interface.
func (n *TraceStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("TRACE ")
if n.TracePlan {
ctx.WriteKeyWord("PLAN ")
if n.TracePlanTarget != "" {
ctx.WriteKeyWord("TARGET")
ctx.WritePlain(" = ")
ctx.WriteString(n.TracePlanTarget)
ctx.WritePlain(" ")
}
} else if n.Format != "row" {
ctx.WriteKeyWord("FORMAT")
ctx.WritePlain(" = ")
ctx.WriteString(n.Format)
ctx.WritePlain(" ")
}
if err := n.Stmt.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore TraceStmt.Stmt")
}
return nil
}
// Accept implements Node Accept interface.
func (n *TraceStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TraceStmt)
node, ok := n.Stmt.Accept(v)
if !ok {
return n, false
}
n.Stmt = node.(StmtNode)
return v.Leave(n)
}
// ExplainForStmt is a statement to provite information about how is SQL statement executeing
// in connection #ConnectionID
// See https://dev.mysql.com/doc/refman/5.7/en/explain.html
type ExplainForStmt struct {
stmtNode
Format string
ConnectionID uint64
}
// Restore implements Node interface.
func (n *ExplainForStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("EXPLAIN ")
ctx.WriteKeyWord("FORMAT ")
ctx.WritePlain("= ")
ctx.WriteString(n.Format)
ctx.WritePlain(" ")
ctx.WriteKeyWord("FOR ")
ctx.WriteKeyWord("CONNECTION ")
ctx.WritePlain(strconv.FormatUint(n.ConnectionID, 10))
return nil
}
// Accept implements Node Accept interface.
func (n *ExplainForStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExplainForStmt)
return v.Leave(n)
}
// ExplainStmt is a statement to provide information about how is SQL statement executed
// or get columns information in a table.
// See https://dev.mysql.com/doc/refman/5.7/en/explain.html
type ExplainStmt struct {
stmtNode
Stmt StmtNode
Format string
Analyze bool
}
// Restore implements Node interface.
func (n *ExplainStmt) Restore(ctx *format.RestoreCtx) error {
if showStmt, ok := n.Stmt.(*ShowStmt); ok {
ctx.WriteKeyWord("DESC ")
if err := showStmt.Table.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ExplainStmt.ShowStmt.Table")
}
if showStmt.Column != nil {
ctx.WritePlain(" ")
if err := showStmt.Column.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ExplainStmt.ShowStmt.Column")
}
}
return nil
}
ctx.WriteKeyWord("EXPLAIN ")
if n.Analyze {
ctx.WriteKeyWord("ANALYZE ")
}
if !n.Analyze || strings.ToLower(n.Format) != "row" {
ctx.WriteKeyWord("FORMAT ")
ctx.WritePlain("= ")
ctx.WriteString(n.Format)
ctx.WritePlain(" ")
}
if err := n.Stmt.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ExplainStmt.Stmt")
}
return nil
}
// Accept implements Node Accept interface.
func (n *ExplainStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExplainStmt)
node, ok := n.Stmt.Accept(v)
if !ok {
return n, false
}
n.Stmt = node.(StmtNode)
return v.Leave(n)
}
// PlanReplayerStmt is a statement to dump or load information for recreating plans
type PlanReplayerStmt struct {
stmtNode
Stmt StmtNode
Analyze bool
Load bool
HistoricalStatsInfo *AsOfClause
// Capture indicates 'plan replayer capture <sql_digest> <plan_digest>'
Capture bool
// Remove indicates `plan replayer capture remove <sql_digest> <plan_digest>
Remove bool
SQLDigest string
PlanDigest string
// File is used to store 2 cases:
// 1. plan replayer load 'file';
// 2. plan replayer dump explain <analyze> 'file'
File string
// Fields below are currently useless.
// Where is the where clause in select statement.
Where ExprNode
// OrderBy is the ordering expression list.
OrderBy *OrderByClause
// Limit is the limit clause.
Limit *Limit
}
// Restore implements Node interface.
func (n *PlanReplayerStmt) Restore(ctx *format.RestoreCtx) error {
if n.Load {
ctx.WriteKeyWord("PLAN REPLAYER LOAD ")
ctx.WriteString(n.File)
return nil
}
if n.Capture {
ctx.WriteKeyWord("PLAN REPLAYER CAPTURE ")
ctx.WriteString(n.SQLDigest)
ctx.WriteKeyWord(" ")
ctx.WriteString(n.PlanDigest)
return nil
}
if n.Remove {
ctx.WriteKeyWord("PLAN REPLAYER CAPTURE REMOVE ")
ctx.WriteString(n.SQLDigest)
ctx.WriteKeyWord(" ")
ctx.WriteString(n.PlanDigest)
return nil
}
ctx.WriteKeyWord("PLAN REPLAYER DUMP ")
if n.HistoricalStatsInfo != nil {
ctx.WriteKeyWord("WITH STATS ")
if err := n.HistoricalStatsInfo.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PlanReplayerStmt.HistoricalStatsInfo")
}
ctx.WriteKeyWord(" ")
}
if n.Analyze {
ctx.WriteKeyWord("EXPLAIN ANALYZE ")
} else {
ctx.WriteKeyWord("EXPLAIN ")
}
if n.Stmt == nil {
if len(n.File) > 0 {
ctx.WriteString(n.File)
return nil
}
ctx.WriteKeyWord("SLOW QUERY")
if n.Where != nil {
ctx.WriteKeyWord(" WHERE ")
if err := n.Where.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PlanReplayerStmt.Where")
}
}
if n.OrderBy != nil {
ctx.WriteKeyWord(" ")
if err := n.OrderBy.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PlanReplayerStmt.OrderBy")
}
}
if n.Limit != nil {
ctx.WriteKeyWord(" ")
if err := n.Limit.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PlanReplayerStmt.Limit")
}
}
return nil
}
if err := n.Stmt.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PlanReplayerStmt.Stmt")
}
return nil
}
// Accept implements Node Accept interface.
func (n *PlanReplayerStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PlanReplayerStmt)
if n.Load {
return v.Leave(n)
}
if n.HistoricalStatsInfo != nil {
info, ok := n.HistoricalStatsInfo.Accept(v)
if !ok {
return n, false
}
n.HistoricalStatsInfo = info.(*AsOfClause)
}
if n.Stmt == nil {
if n.Where != nil {
node, ok := n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
if n.OrderBy != nil {
node, ok := n.OrderBy.Accept(v)
if !ok {
return n, false
}
n.OrderBy = node.(*OrderByClause)
}
if n.Limit != nil {
node, ok := n.Limit.Accept(v)
if !ok {
return n, false
}
n.Limit = node.(*Limit)
}
return v.Leave(n)
}
node, ok := n.Stmt.Accept(v)
if !ok {
return n, false
}
n.Stmt = node.(StmtNode)
return v.Leave(n)
}
type CompactReplicaKind string
const (
// CompactReplicaKindAll means compacting both TiKV and TiFlash replicas.
CompactReplicaKindAll = "ALL"
// CompactReplicaKindTiFlash means compacting TiFlash replicas.
CompactReplicaKindTiFlash = "TIFLASH"
// CompactReplicaKindTiKV means compacting TiKV replicas.
CompactReplicaKindTiKV = "TIKV"
)
// CompactTableStmt is a statement to manually compact a table.
type CompactTableStmt struct {
stmtNode
Table *TableName
PartitionNames []model.CIStr
ReplicaKind CompactReplicaKind
}
// Restore implements Node interface.
func (n *CompactTableStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("ALTER TABLE ")
n.Table.restoreName(ctx)
ctx.WriteKeyWord(" COMPACT")
if len(n.PartitionNames) != 0 {
ctx.WriteKeyWord(" PARTITION ")
for i, partition := range n.PartitionNames {
if i != 0 {
ctx.WritePlain(",")
}
ctx.WriteName(partition.O)
}
}
if n.ReplicaKind != CompactReplicaKindAll {
ctx.WriteKeyWord(" ")
// Note: There is only TiFlash replica available now. TiKV will be added later.
ctx.WriteKeyWord(string(n.ReplicaKind))
ctx.WriteKeyWord(" REPLICA")
}
return nil
}
// Accept implements Node Accept interface.
func (n *CompactTableStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CompactTableStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
return v.Leave(n)
}
// PrepareStmt is a statement to prepares a SQL statement which contains placeholders,
// and it is executed with ExecuteStmt and released with DeallocateStmt.
// See https://dev.mysql.com/doc/refman/5.7/en/prepare.html
type PrepareStmt struct {
stmtNode
Name string
SQLText string
SQLVar *VariableExpr
}
// Restore implements Node interface.
func (n *PrepareStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("PREPARE ")
ctx.WriteName(n.Name)
ctx.WriteKeyWord(" FROM ")
if n.SQLText != "" {
ctx.WriteString(n.SQLText)
return nil
}
if n.SQLVar != nil {
if err := n.SQLVar.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore PrepareStmt.SQLVar")
}
return nil
}
return errors.New("An error occurred while restore PrepareStmt")
}
// Accept implements Node Accept interface.
func (n *PrepareStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PrepareStmt)
if n.SQLVar != nil {
node, ok := n.SQLVar.Accept(v)
if !ok {
return n, false
}
n.SQLVar = node.(*VariableExpr)
}
return v.Leave(n)
}
// DeallocateStmt is a statement to release PreparedStmt.
// See https://dev.mysql.com/doc/refman/5.7/en/deallocate-prepare.html
type DeallocateStmt struct {
stmtNode
Name string
}
// Restore implements Node interface.
func (n *DeallocateStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("DEALLOCATE PREPARE ")
ctx.WriteName(n.Name)
return nil
}
// Accept implements Node Accept interface.
func (n *DeallocateStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DeallocateStmt)
return v.Leave(n)
}
// Prepared represents a prepared statement.
type Prepared struct {
Stmt StmtNode
StmtType string
Params []ParamMarkerExpr
SchemaVersion int64
CachedPlan interface{}
CachedNames interface{}
}
// ExecuteStmt is a statement to execute PreparedStmt.
// See https://dev.mysql.com/doc/refman/5.7/en/execute.html
type ExecuteStmt struct {
stmtNode
Name string
UsingVars []ExprNode
BinaryArgs interface{}
PrepStmt interface{} // the corresponding prepared statement
IdxInMulti int
// FromGeneralStmt indicates whether this execute-stmt is converted from a general query.
// e.g. select * from t where a>2 --> execute 'select * from t where a>?' using 2
FromGeneralStmt bool
}
// Restore implements Node interface.
func (n *ExecuteStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("EXECUTE ")
ctx.WriteName(n.Name)
if len(n.UsingVars) > 0 {
ctx.WriteKeyWord(" USING ")
for i, val := range n.UsingVars {
if i != 0 {
ctx.WritePlain(",")
}
if err := val.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore ExecuteStmt.UsingVars index %d", i)
}
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *ExecuteStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ExecuteStmt)
for i, val := range n.UsingVars {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.UsingVars[i] = node.(ExprNode)
}
return v.Leave(n)
}
// BeginStmt is a statement to start a new transaction.
// See https://dev.mysql.com/doc/refman/5.7/en/commit.html
type BeginStmt struct {
stmtNode
Mode string
CausalConsistencyOnly bool
ReadOnly bool
// AS OF is used to read the data at a specific point of time.
// Should only be used when ReadOnly is true.
AsOf *AsOfClause
}
// Restore implements Node interface.
func (n *BeginStmt) Restore(ctx *format.RestoreCtx) error {
if n.Mode == "" {
if n.ReadOnly {
ctx.WriteKeyWord("START TRANSACTION READ ONLY")
if n.AsOf != nil {
ctx.WriteKeyWord(" ")
return n.AsOf.Restore(ctx)
}
} else if n.CausalConsistencyOnly {
ctx.WriteKeyWord("START TRANSACTION WITH CAUSAL CONSISTENCY ONLY")
} else {
ctx.WriteKeyWord("START TRANSACTION")
}
} else {
ctx.WriteKeyWord("BEGIN ")
ctx.WriteKeyWord(n.Mode)
}
return nil
}
// Accept implements Node Accept interface.
func (n *BeginStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
if n.AsOf != nil {
node, ok := n.AsOf.Accept(v)
if !ok {
return n, false
}
n.AsOf = node.(*AsOfClause)
}
n = newNode.(*BeginStmt)
return v.Leave(n)
}
// BinlogStmt is an internal-use statement.
// We just parse and ignore it.
// See http://dev.mysql.com/doc/refman/5.7/en/binlog.html
type BinlogStmt struct {
stmtNode
Str string
}
// Restore implements Node interface.
func (n *BinlogStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("BINLOG ")
ctx.WriteString(n.Str)
return nil
}
// Accept implements Node Accept interface.
func (n *BinlogStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*BinlogStmt)
return v.Leave(n)
}
// CompletionType defines completion_type used in COMMIT and ROLLBACK statements
type CompletionType int8
const (
// CompletionTypeDefault refers to NO_CHAIN
CompletionTypeDefault CompletionType = iota
CompletionTypeChain
CompletionTypeRelease
)
func (n CompletionType) Restore(ctx *format.RestoreCtx) error {
switch n {
case CompletionTypeDefault:
case CompletionTypeChain:
ctx.WriteKeyWord(" AND CHAIN")
case CompletionTypeRelease:
ctx.WriteKeyWord(" RELEASE")
}
return nil
}
// CommitStmt is a statement to commit the current transaction.
// See https://dev.mysql.com/doc/refman/5.7/en/commit.html
type CommitStmt struct {
stmtNode
// CompletionType overwrites system variable `completion_type` within transaction
CompletionType CompletionType
}
// Restore implements Node interface.
func (n *CommitStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("COMMIT")
if err := n.CompletionType.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CommitStmt.CompletionType")
}
return nil
}
// Accept implements Node Accept interface.
func (n *CommitStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CommitStmt)
return v.Leave(n)
}
// RollbackStmt is a statement to roll back the current transaction.
// See https://dev.mysql.com/doc/refman/5.7/en/commit.html
type RollbackStmt struct {
stmtNode
// CompletionType overwrites system variable `completion_type` within transaction
CompletionType CompletionType
// SavepointName is the savepoint name.
SavepointName string
}
// Restore implements Node interface.
func (n *RollbackStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("ROLLBACK")
if n.SavepointName != "" {
ctx.WritePlain(" TO ")
ctx.WritePlain(n.SavepointName)
}
if err := n.CompletionType.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore RollbackStmt.CompletionType")
}
return nil
}
// Accept implements Node Accept interface.
func (n *RollbackStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RollbackStmt)
return v.Leave(n)
}
// UseStmt is a statement to use the DBName database as the current database.
// See https://dev.mysql.com/doc/refman/5.7/en/use.html
type UseStmt struct {
stmtNode
DBName string
}
// Restore implements Node interface.
func (n *UseStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("USE ")
ctx.WriteName(n.DBName)
return nil
}
// Accept implements Node Accept interface.
func (n *UseStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UseStmt)
return v.Leave(n)
}
const (
// SetNames is the const for set names stmt.
// If VariableAssignment.Name == Names, it should be set names stmt.
SetNames = "SetNAMES"
// SetCharset is the const for set charset stmt.
SetCharset = "SetCharset"
)
// VariableAssignment is a variable assignment struct.
type VariableAssignment struct {
node
Name string
Value ExprNode
IsGlobal bool
IsSystem bool
// ExtendValue is a way to store extended info.
// VariableAssignment should be able to store information for SetCharset/SetPWD Stmt.
// For SetCharsetStmt, Value is charset, ExtendValue is collation.
// TODO: Use SetStmt to implement set password statement.
ExtendValue ValueExpr
}
// Restore implements Node interface.
func (n *VariableAssignment) Restore(ctx *format.RestoreCtx) error {
if n.IsSystem {
ctx.WritePlain("@@")
if n.IsGlobal {
ctx.WriteKeyWord("GLOBAL")
} else {
ctx.WriteKeyWord("SESSION")
}
ctx.WritePlain(".")
} else if n.Name != SetNames && n.Name != SetCharset {
ctx.WriteKeyWord("@")
}
if n.Name == SetNames {
ctx.WriteKeyWord("NAMES ")
} else if n.Name == SetCharset {
ctx.WriteKeyWord("CHARSET ")
} else {
ctx.WriteName(n.Name)
ctx.WritePlain("=")
}
if err := n.Value.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore VariableAssignment.Value")
}
if n.ExtendValue != nil {
ctx.WriteKeyWord(" COLLATE ")
if err := n.ExtendValue.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore VariableAssignment.ExtendValue")
}
}
return nil
}
// Accept implements Node interface.
func (n *VariableAssignment) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*VariableAssignment)
node, ok := n.Value.Accept(v)
if !ok {
return n, false
}
n.Value = node.(ExprNode)
return v.Leave(n)
}
// FlushStmtType is the type for FLUSH statement.
type FlushStmtType int
// Flush statement types.
const (
FlushNone FlushStmtType = iota
FlushTables
FlushPrivileges
FlushStatus
FlushTiDBPlugin
FlushHosts
FlushLogs
FlushClientErrorsSummary
)
// LogType is the log type used in FLUSH statement.
type LogType int8
const (
LogTypeDefault LogType = iota
LogTypeBinary
LogTypeEngine
LogTypeError
LogTypeGeneral
LogTypeSlow
)
// FlushStmt is a statement to flush tables/privileges/optimizer costs and so on.
type FlushStmt struct {
stmtNode
Tp FlushStmtType // Privileges/Tables/...
NoWriteToBinLog bool
LogType LogType
Tables []*TableName // For FlushTableStmt, if Tables is empty, it means flush all tables.
ReadLock bool
Plugins []string
}
// Restore implements Node interface.
func (n *FlushStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("FLUSH ")
if n.NoWriteToBinLog {
ctx.WriteKeyWord("NO_WRITE_TO_BINLOG ")
}
switch n.Tp {
case FlushTables:
ctx.WriteKeyWord("TABLES")
for i, v := range n.Tables {
if i == 0 {
ctx.WritePlain(" ")
} else {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore FlushStmt.Tables[%d]", i)
}
}
if n.ReadLock {
ctx.WriteKeyWord(" WITH READ LOCK")
}
case FlushPrivileges:
ctx.WriteKeyWord("PRIVILEGES")
case FlushStatus:
ctx.WriteKeyWord("STATUS")
case FlushTiDBPlugin:
ctx.WriteKeyWord("TIDB PLUGINS")
for i, v := range n.Plugins {
if i == 0 {
ctx.WritePlain(" ")
} else {
ctx.WritePlain(", ")
}
ctx.WritePlain(v)
}
case FlushHosts:
ctx.WriteKeyWord("HOSTS")
case FlushLogs:
var logType string
switch n.LogType {
case LogTypeDefault:
logType = "LOGS"
case LogTypeBinary:
logType = "BINARY LOGS"
case LogTypeEngine:
logType = "ENGINE LOGS"
case LogTypeError:
logType = "ERROR LOGS"
case LogTypeGeneral:
logType = "GENERAL LOGS"
case LogTypeSlow:
logType = "SLOW LOGS"
}
ctx.WriteKeyWord(logType)
case FlushClientErrorsSummary:
ctx.WriteKeyWord("CLIENT_ERRORS_SUMMARY")
default:
return errors.New("Unsupported type of FlushStmt")
}
return nil
}
// Accept implements Node Accept interface.
func (n *FlushStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*FlushStmt)
return v.Leave(n)
}
// KillStmt is a statement to kill a query or connection.
type KillStmt struct {
stmtNode
// Query indicates whether terminate a single query on this connection or the whole connection.
// If Query is true, terminates the statement the connection is currently executing, but leaves the connection itself intact.
// If Query is false, terminates the connection associated with the given ConnectionID, after terminating any statement the connection is executing.
Query bool
ConnectionID uint64
// TiDBExtension is used to indicate whether the user knows he is sending kill statement to the right tidb-server.
// When the SQL grammar is "KILL TIDB [CONNECTION | QUERY] connectionID", TiDBExtension will be set.
// It's a special grammar extension in TiDB. This extension exists because, when the connection is:
// client -> LVS proxy -> TiDB, and type Ctrl+C in client, the following action will be executed:
// new a connection; kill xxx;
// kill command may send to the wrong TiDB, because the exists of LVS proxy, and kill the wrong session.
// So, "KILL TIDB" grammar is introduced, and it REQUIRES DIRECT client -> TiDB TOPOLOGY.
// TODO: The standard KILL grammar will be supported once we have global connectionID.
TiDBExtension bool
Expr ExprNode
}
// Restore implements Node interface.
func (n *KillStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("KILL")
if n.TiDBExtension {
ctx.WriteKeyWord(" TIDB")
}
if n.Query {
ctx.WriteKeyWord(" QUERY")
}
if n.Expr != nil {
ctx.WriteKeyWord(" ")
if err := n.Expr.Restore(ctx); err != nil {
return errors.Trace(err)
}
} else {
ctx.WritePlainf(" %d", n.ConnectionID)
}
return nil
}
// Accept implements Node Accept interface.
func (n *KillStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*KillStmt)
return v.Leave(n)
}
// SavepointStmt is the statement of SAVEPOINT.
type SavepointStmt struct {
stmtNode
// Name is the savepoint name.
Name string
}
// Restore implements Node interface.
func (n *SavepointStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SAVEPOINT ")
ctx.WritePlain(n.Name)
return nil
}
// Accept implements Node Accept interface.
func (n *SavepointStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
n = newNode.(*SavepointStmt)
return v.Leave(n)
}
// ReleaseSavepointStmt is the statement of RELEASE SAVEPOINT.
type ReleaseSavepointStmt struct {
stmtNode
// Name is the savepoint name.
Name string
}
// Restore implements Node interface.
func (n *ReleaseSavepointStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("RELEASE SAVEPOINT ")
ctx.WritePlain(n.Name)
return nil
}
// Accept implements Node Accept interface.
func (n *ReleaseSavepointStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
n = newNode.(*ReleaseSavepointStmt)
return v.Leave(n)
}
// SetStmt is the statement to set variables.
type SetStmt struct {
stmtNode
// Variables is the list of variable assignment.
Variables []*VariableAssignment
}
// Restore implements Node interface.
func (n *SetStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET ")
for i, v := range n.Variables {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore SetStmt.Variables[%d]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *SetStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetStmt)
for i, val := range n.Variables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Variables[i] = node.(*VariableAssignment)
}
return v.Leave(n)
}
// SetConfigStmt is the statement to set cluster configs.
type SetConfigStmt struct {
stmtNode
Type string // TiDB, TiKV, PD
Instance string // '127.0.0.1:3306'
Name string // the variable name
Value ExprNode
}
func (n *SetConfigStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET CONFIG ")
if n.Type != "" {
ctx.WriteKeyWord(n.Type)
} else {
ctx.WriteString(n.Instance)
}
ctx.WritePlain(" ")
ctx.WriteKeyWord(n.Name)
ctx.WritePlain(" = ")
return n.Value.Restore(ctx)
}
func (n *SetConfigStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetConfigStmt)
node, ok := n.Value.Accept(v)
if !ok {
return n, false
}
n.Value = node.(ExprNode)
return v.Leave(n)
}
// SetSessionStatesStmt is a statement to restore session states.
type SetSessionStatesStmt struct {
stmtNode
SessionStates string
}
func (n *SetSessionStatesStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET SESSION_STATES ")
ctx.WriteString(n.SessionStates)
return nil
}
func (n *SetSessionStatesStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetSessionStatesStmt)
return v.Leave(n)
}
/*
// SetCharsetStmt is a statement to assign values to character and collation variables.
// See https://dev.mysql.com/doc/refman/5.7/en/set-statement.html
type SetCharsetStmt struct {
stmtNode
Charset string
Collate string
}
// Accept implements Node Accept interface.
func (n *SetCharsetStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetCharsetStmt)
return v.Leave(n)
}
*/
// SetPwdStmt is a statement to assign a password to user account.
// See https://dev.mysql.com/doc/refman/5.7/en/set-password.html
type SetPwdStmt struct {
stmtNode
User *auth.UserIdentity
Password string
}
// Restore implements Node interface.
func (n *SetPwdStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET PASSWORD")
if n.User != nil {
ctx.WriteKeyWord(" FOR ")
if err := n.User.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore SetPwdStmt.User")
}
}
ctx.WritePlain("=")
ctx.WriteString(n.Password)
return nil
}
// SecureText implements SensitiveStatement interface.
func (n *SetPwdStmt) SecureText() string {
return fmt.Sprintf("set password for user %s", n.User)
}
// Accept implements Node Accept interface.
func (n *SetPwdStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetPwdStmt)
return v.Leave(n)
}
type ChangeStmt struct {
stmtNode
NodeType string
State string
NodeID string
}
// Restore implements Node interface.
func (n *ChangeStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("CHANGE ")
ctx.WriteKeyWord(n.NodeType)
ctx.WriteKeyWord(" TO NODE_STATE ")
ctx.WritePlain("=")
ctx.WriteString(n.State)
ctx.WriteKeyWord(" FOR NODE_ID ")
ctx.WriteString(n.NodeID)
return nil
}
// SecureText implements SensitiveStatement interface.
func (n *ChangeStmt) SecureText() string {
return fmt.Sprintf("change %s to node_state='%s' for node_id '%s'", strings.ToLower(n.NodeType), n.State, n.NodeID)
}
// Accept implements Node Accept interface.
func (n *ChangeStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ChangeStmt)
return v.Leave(n)
}
// SetRoleStmtType is the type for FLUSH statement.
type SetRoleStmtType int
// SetRole statement types.
const (
SetRoleDefault SetRoleStmtType = iota
SetRoleNone
SetRoleAll
SetRoleAllExcept
SetRoleRegular
)
type SetRoleStmt struct {
stmtNode
SetRoleOpt SetRoleStmtType
RoleList []*auth.RoleIdentity
}
func (n *SetRoleStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET ROLE")
switch n.SetRoleOpt {
case SetRoleDefault:
ctx.WriteKeyWord(" DEFAULT")
case SetRoleNone:
ctx.WriteKeyWord(" NONE")
case SetRoleAll:
ctx.WriteKeyWord(" ALL")
case SetRoleAllExcept:
ctx.WriteKeyWord(" ALL EXCEPT")
}
for i, role := range n.RoleList {
ctx.WritePlain(" ")
err := role.Restore(ctx)
if err != nil {
return errors.Annotate(err, "An error occurred while restore SetRoleStmt.RoleList")
}
if i != len(n.RoleList)-1 {
ctx.WritePlain(",")
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *SetRoleStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetRoleStmt)
return v.Leave(n)
}
type SetDefaultRoleStmt struct {
stmtNode
SetRoleOpt SetRoleStmtType
RoleList []*auth.RoleIdentity
UserList []*auth.UserIdentity
}
func (n *SetDefaultRoleStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET DEFAULT ROLE")
switch n.SetRoleOpt {
case SetRoleNone:
ctx.WriteKeyWord(" NONE")
case SetRoleAll:
ctx.WriteKeyWord(" ALL")
default:
}
for i, role := range n.RoleList {
ctx.WritePlain(" ")
err := role.Restore(ctx)
if err != nil {
return errors.Annotate(err, "An error occurred while restore SetDefaultRoleStmt.RoleList")
}
if i != len(n.RoleList)-1 {
ctx.WritePlain(",")
}
}
ctx.WritePlain(" TO")
for i, user := range n.UserList {
ctx.WritePlain(" ")
err := user.Restore(ctx)
if err != nil {
return errors.Annotate(err, "An error occurred while restore SetDefaultRoleStmt.UserList")
}
if i != len(n.UserList)-1 {
ctx.WritePlain(",")
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *SetDefaultRoleStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetDefaultRoleStmt)
return v.Leave(n)
}
// UserSpec is used for parsing create user statement.
type UserSpec struct {
User *auth.UserIdentity
AuthOpt *AuthOption
IsRole bool
}
// Restore implements Node interface.
func (n *UserSpec) Restore(ctx *format.RestoreCtx) error {
if err := n.User.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore UserSpec.User")
}
if n.AuthOpt != nil {
ctx.WritePlain(" ")
if err := n.AuthOpt.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore UserSpec.AuthOpt")
}
}
return nil
}
// SecurityString formats the UserSpec without password information.
func (n *UserSpec) SecurityString() string {
withPassword := false
if opt := n.AuthOpt; opt != nil {
if len(opt.AuthString) > 0 || len(opt.HashString) > 0 {
withPassword = true
}
}
if withPassword {
return fmt.Sprintf("{%s password = ***}", n.User)
}
return n.User.String()
}
// EncodedPassword returns the encoded password (which is the real data mysql.user).
// The boolean value indicates input's password format is legal or not.
func (n *UserSpec) EncodedPassword() (string, bool) {
if n.AuthOpt == nil {
return "", true
}
opt := n.AuthOpt
if opt.ByAuthString {
switch opt.AuthPlugin {
case mysql.AuthCachingSha2Password, mysql.AuthTiDBSM3Password:
return auth.NewHashPassword(opt.AuthString, opt.AuthPlugin), true
case mysql.AuthSocket:
return "", true
default:
return auth.EncodePassword(opt.AuthString), true
}
}
// store the LDAP dn directly in the password field
switch opt.AuthPlugin {
case mysql.AuthLDAPSimple, mysql.AuthLDAPSASL:
// TODO: validate the HashString to be a `dn` for LDAP
// It seems fine to not validate here, and LDAP server will give an error when the client'll try to login this user.
// The percona server implementation doesn't have a validation for this HashString.
// However, returning an error for obvious wrong format is more friendly.
return opt.HashString, true
}
// In case we have 'IDENTIFIED WITH <plugin>' but no 'BY <password>' to set an empty password.
if opt.HashString == "" {
return opt.HashString, true
}
// Not a legal password string.
switch opt.AuthPlugin {
case mysql.AuthCachingSha2Password:
if len(opt.HashString) != mysql.SHAPWDHashLen {
return "", false
}
case mysql.AuthTiDBSM3Password:
if len(opt.HashString) != mysql.SM3PWDHashLen {
return "", false
}
case "", mysql.AuthNativePassword:
if len(opt.HashString) != (mysql.PWDHashLen+1) || !strings.HasPrefix(opt.HashString, "*") {
return "", false
}
case mysql.AuthSocket:
default:
return "", false
}
return opt.HashString, true
}
type AuthTokenOrTLSOption struct {
Type AuthTokenOrTLSOptionType
Value string
}
func (t *AuthTokenOrTLSOption) Restore(ctx *format.RestoreCtx) error {
switch t.Type {
case TlsNone:
ctx.WriteKeyWord("NONE")
case Ssl:
ctx.WriteKeyWord("SSL")
case X509:
ctx.WriteKeyWord("X509")
case Cipher:
ctx.WriteKeyWord("CIPHER ")
ctx.WriteString(t.Value)
case Issuer:
ctx.WriteKeyWord("ISSUER ")
ctx.WriteString(t.Value)
case Subject:
ctx.WriteKeyWord("SUBJECT ")
ctx.WriteString(t.Value)
case SAN:
ctx.WriteKeyWord("SAN ")
ctx.WriteString(t.Value)
case TokenIssuer:
ctx.WriteKeyWord("TOKEN_ISSUER ")
ctx.WriteString(t.Value)
default:
return errors.Errorf("Unsupported AuthTokenOrTLSOption.Type %d", t.Type)
}
return nil
}
type AuthTokenOrTLSOptionType int
const (
TlsNone AuthTokenOrTLSOptionType = iota
Ssl
X509
Cipher
Issuer
Subject
SAN
TokenIssuer
)
func (t AuthTokenOrTLSOptionType) String() string {
switch t {
case TlsNone:
return "NONE"
case Ssl:
return "SSL"
case X509:
return "X509"
case Cipher:
return "CIPHER"
case Issuer:
return "ISSUER"
case Subject:
return "SUBJECT"
case SAN:
return "SAN"
case TokenIssuer:
return "TOKEN_ISSUER"
default:
return "UNKNOWN"
}
}
const (
MaxQueriesPerHour = iota + 1
MaxUpdatesPerHour
MaxConnectionsPerHour
MaxUserConnections
)
type ResourceOption struct {
Type int
Count int64
}
func (r *ResourceOption) Restore(ctx *format.RestoreCtx) error {
switch r.Type {
case MaxQueriesPerHour:
ctx.WriteKeyWord("MAX_QUERIES_PER_HOUR ")
case MaxUpdatesPerHour:
ctx.WriteKeyWord("MAX_UPDATES_PER_HOUR ")
case MaxConnectionsPerHour:
ctx.WriteKeyWord("MAX_CONNECTIONS_PER_HOUR ")
case MaxUserConnections:
ctx.WriteKeyWord("MAX_USER_CONNECTIONS ")
default:
return errors.Errorf("Unsupported ResourceOption.Type %d", r.Type)
}
ctx.WritePlainf("%d", r.Count)
return nil
}
const (
PasswordExpire = iota + 1
PasswordExpireDefault
PasswordExpireNever
PasswordExpireInterval
PasswordHistory
PasswordHistoryDefault
PasswordReuseInterval
PasswordReuseDefault
Lock
Unlock
FailedLoginAttempts
PasswordLockTime
PasswordLockTimeUnbounded
UserCommentType
UserAttributeType
UserResourceGroupName
)
type PasswordOrLockOption struct {
Type int
Count int64
}
func (p *PasswordOrLockOption) Restore(ctx *format.RestoreCtx) error {
switch p.Type {
case PasswordExpire:
ctx.WriteKeyWord("PASSWORD EXPIRE")
case PasswordExpireDefault:
ctx.WriteKeyWord("PASSWORD EXPIRE DEFAULT")
case PasswordExpireNever:
ctx.WriteKeyWord("PASSWORD EXPIRE NEVER")
case PasswordExpireInterval:
ctx.WriteKeyWord("PASSWORD EXPIRE INTERVAL")
ctx.WritePlainf(" %d", p.Count)
ctx.WriteKeyWord(" DAY")
case Lock:
ctx.WriteKeyWord("ACCOUNT LOCK")
case Unlock:
ctx.WriteKeyWord("ACCOUNT UNLOCK")
case FailedLoginAttempts:
ctx.WriteKeyWord("FAILED_LOGIN_ATTEMPTS")
ctx.WritePlainf(" %d", p.Count)
case PasswordLockTime:
ctx.WriteKeyWord("PASSWORD_LOCK_TIME")
ctx.WritePlainf(" %d", p.Count)
case PasswordLockTimeUnbounded:
ctx.WriteKeyWord("PASSWORD_LOCK_TIME UNBOUNDED")
case PasswordHistory:
ctx.WriteKeyWord("PASSWORD HISTORY")
ctx.WritePlainf(" %d", p.Count)
case PasswordHistoryDefault:
ctx.WriteKeyWord("PASSWORD HISTORY DEFAULT")
case PasswordReuseInterval:
ctx.WriteKeyWord("PASSWORD REUSE INTERVAL")
ctx.WritePlainf(" %d", p.Count)
ctx.WriteKeyWord(" DAY")
case PasswordReuseDefault:
ctx.WriteKeyWord("PASSWORD REUSE INTERVAL DEFAULT")
default:
return errors.Errorf("Unsupported PasswordOrLockOption.Type %d", p.Type)
}
return nil
}
type CommentOrAttributeOption struct {
Type int
Value string
}
func (c *CommentOrAttributeOption) Restore(ctx *format.RestoreCtx) error {
if c.Type == UserCommentType {
ctx.WriteKeyWord(" COMMENT ")
ctx.WriteString(c.Value)
} else if c.Type == UserAttributeType {
ctx.WriteKeyWord(" ATTRIBUTE ")
ctx.WriteString(c.Value)
}
return nil
}
type ResourceGroupNameOption struct {
Value string
}
func (c *ResourceGroupNameOption) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(" RESOURCE GROUP ")
ctx.WriteName(c.Value)
return nil
}
// CreateUserStmt creates user account.
// See https://dev.mysql.com/doc/refman/8.0/en/create-user.html
type CreateUserStmt struct {
stmtNode
IsCreateRole bool
IfNotExists bool
Specs []*UserSpec
AuthTokenOrTLSOptions []*AuthTokenOrTLSOption
ResourceOptions []*ResourceOption
PasswordOrLockOptions []*PasswordOrLockOption
CommentOrAttributeOption *CommentOrAttributeOption
ResourceGroupNameOption *ResourceGroupNameOption
}
// Restore implements Node interface.
func (n *CreateUserStmt) Restore(ctx *format.RestoreCtx) error {
if n.IsCreateRole {
ctx.WriteKeyWord("CREATE ROLE ")
} else {
ctx.WriteKeyWord("CREATE USER ")
}
if n.IfNotExists {
ctx.WriteKeyWord("IF NOT EXISTS ")
}
for i, v := range n.Specs {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.Specs[%d]", i)
}
}
if len(n.AuthTokenOrTLSOptions) != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
for i, option := range n.AuthTokenOrTLSOptions {
if i != 0 {
ctx.WriteKeyWord(" AND ")
}
if err := option.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.AuthTokenOrTLSOptions[%d]", i)
}
}
if len(n.ResourceOptions) != 0 {
ctx.WriteKeyWord(" WITH")
}
for i, v := range n.ResourceOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.ResourceOptions[%d]", i)
}
}
for i, v := range n.PasswordOrLockOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.PasswordOrLockOptions[%d]", i)
}
}
if n.CommentOrAttributeOption != nil {
if err := n.CommentOrAttributeOption.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.CommentOrAttributeOption")
}
}
if n.ResourceGroupNameOption != nil {
if err := n.ResourceGroupNameOption.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateUserStmt.ResourceGroupNameOption")
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *CreateUserStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateUserStmt)
return v.Leave(n)
}
// SecureText implements SensitiveStatement interface.
func (n *CreateUserStmt) SecureText() string {
var buf bytes.Buffer
buf.WriteString("create user")
for _, user := range n.Specs {
buf.WriteString(" ")
buf.WriteString(user.SecurityString())
}
return buf.String()
}
// AlterUserStmt modifies user account.
// See https://dev.mysql.com/doc/refman/8.0/en/alter-user.html
type AlterUserStmt struct {
stmtNode
IfExists bool
CurrentAuth *AuthOption
Specs []*UserSpec
AuthTokenOrTLSOptions []*AuthTokenOrTLSOption
ResourceOptions []*ResourceOption
PasswordOrLockOptions []*PasswordOrLockOption
CommentOrAttributeOption *CommentOrAttributeOption
ResourceGroupNameOption *ResourceGroupNameOption
}
// Restore implements Node interface.
func (n *AlterUserStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("ALTER USER ")
if n.IfExists {
ctx.WriteKeyWord("IF EXISTS ")
}
if n.CurrentAuth != nil {
ctx.WriteKeyWord("USER")
ctx.WritePlain("() ")
if err := n.CurrentAuth.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AlterUserStmt.CurrentAuth")
}
}
for i, v := range n.Specs {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.Specs[%d]", i)
}
}
if len(n.AuthTokenOrTLSOptions) != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
for i, option := range n.AuthTokenOrTLSOptions {
if i != 0 {
ctx.WriteKeyWord(" AND ")
}
if err := option.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.AuthTokenOrTLSOptions[%d]", i)
}
}
if len(n.ResourceOptions) != 0 {
ctx.WriteKeyWord(" WITH")
}
for i, v := range n.ResourceOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.ResourceOptions[%d]", i)
}
}
for i, v := range n.PasswordOrLockOptions {
ctx.WritePlain(" ")
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.PasswordOrLockOptions[%d]", i)
}
}
if n.CommentOrAttributeOption != nil {
if err := n.CommentOrAttributeOption.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.CommentOrAttributeOption")
}
}
if n.ResourceGroupNameOption != nil {
if err := n.ResourceGroupNameOption.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AlterUserStmt.ResourceGroupNameOption")
}
}
return nil
}
// SecureText implements SensitiveStatement interface.
func (n *AlterUserStmt) SecureText() string {
var buf bytes.Buffer
buf.WriteString("alter user")
for _, user := range n.Specs {
buf.WriteString(" ")
buf.WriteString(user.SecurityString())
}
return buf.String()
}
// Accept implements Node Accept interface.
func (n *AlterUserStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AlterUserStmt)
return v.Leave(n)
}
// AlterInstanceStmt modifies instance.
// See https://dev.mysql.com/doc/refman/8.0/en/alter-instance.html
type AlterInstanceStmt struct {
stmtNode
ReloadTLS bool
NoRollbackOnError bool
}
// Restore implements Node interface.
func (n *AlterInstanceStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("ALTER INSTANCE")
if n.ReloadTLS {
ctx.WriteKeyWord(" RELOAD TLS")
}
if n.NoRollbackOnError {
ctx.WriteKeyWord(" NO ROLLBACK ON ERROR")
}
return nil
}
// Accept implements Node Accept interface.
func (n *AlterInstanceStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AlterInstanceStmt)
return v.Leave(n)
}
// DropUserStmt creates user account.
// See http://dev.mysql.com/doc/refman/5.7/en/drop-user.html
type DropUserStmt struct {
stmtNode
IfExists bool
IsDropRole bool
UserList []*auth.UserIdentity
}
// Restore implements Node interface.
func (n *DropUserStmt) Restore(ctx *format.RestoreCtx) error {
if n.IsDropRole {
ctx.WriteKeyWord("DROP ROLE ")
} else {
ctx.WriteKeyWord("DROP USER ")
}
if n.IfExists {
ctx.WriteKeyWord("IF EXISTS ")
}
for i, v := range n.UserList {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore DropUserStmt.UserList[%d]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *DropUserStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropUserStmt)
return v.Leave(n)
}
// CreateBindingStmt creates sql binding hint.
type CreateBindingStmt struct {
stmtNode
GlobalScope bool
OriginNode StmtNode
HintedNode StmtNode
PlanDigest string
}
func (n *CreateBindingStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("CREATE ")
if n.GlobalScope {
ctx.WriteKeyWord("GLOBAL ")
} else {
ctx.WriteKeyWord("SESSION ")
}
if n.OriginNode == nil {
ctx.WriteKeyWord("BINDING FROM HISTORY USING PLAN DIGEST ")
ctx.WriteString(n.PlanDigest)
} else {
ctx.WriteKeyWord("BINDING FOR ")
if err := n.OriginNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
ctx.WriteKeyWord(" USING ")
if err := n.HintedNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
}
return nil
}
func (n *CreateBindingStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateBindingStmt)
if n.OriginNode != nil {
origNode, ok := n.OriginNode.Accept(v)
if !ok {
return n, false
}
n.OriginNode = origNode.(StmtNode)
hintedNode, ok := n.HintedNode.Accept(v)
if !ok {
return n, false
}
n.HintedNode = hintedNode.(StmtNode)
}
return v.Leave(n)
}
// DropBindingStmt deletes sql binding hint.
type DropBindingStmt struct {
stmtNode
GlobalScope bool
OriginNode StmtNode
HintedNode StmtNode
SQLDigest string
}
func (n *DropBindingStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("DROP ")
if n.GlobalScope {
ctx.WriteKeyWord("GLOBAL ")
} else {
ctx.WriteKeyWord("SESSION ")
}
ctx.WriteKeyWord("BINDING FOR ")
if n.OriginNode == nil {
ctx.WriteKeyWord("SQL DIGEST ")
ctx.WriteString(n.SQLDigest)
} else {
if err := n.OriginNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
if n.HintedNode != nil {
ctx.WriteKeyWord(" USING ")
if err := n.HintedNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (n *DropBindingStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropBindingStmt)
if n.OriginNode != nil {
// OriginNode is nil means we build drop binding by sql digest
origNode, ok := n.OriginNode.Accept(v)
if !ok {
return n, false
}
n.OriginNode = origNode.(StmtNode)
if n.HintedNode != nil {
hintedNode, ok := n.HintedNode.Accept(v)
if !ok {
return n, false
}
n.HintedNode = hintedNode.(StmtNode)
}
}
return v.Leave(n)
}
// BindingStatusType defines the status type for the binding
type BindingStatusType int8
// Binding status types.
const (
BindingStatusTypeEnabled BindingStatusType = iota
BindingStatusTypeDisabled
)
// SetBindingStmt sets sql binding status.
type SetBindingStmt struct {
stmtNode
BindingStatusType BindingStatusType
OriginNode StmtNode
HintedNode StmtNode
SQLDigest string
}
func (n *SetBindingStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET ")
ctx.WriteKeyWord("BINDING ")
switch n.BindingStatusType {
case BindingStatusTypeEnabled:
ctx.WriteKeyWord("ENABLED ")
case BindingStatusTypeDisabled:
ctx.WriteKeyWord("DISABLED ")
}
ctx.WriteKeyWord("FOR ")
if n.OriginNode == nil {
ctx.WriteKeyWord("SQL DIGEST ")
ctx.WriteString(n.SQLDigest)
} else {
if err := n.OriginNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
if n.HintedNode != nil {
ctx.WriteKeyWord(" USING ")
if err := n.HintedNode.Restore(ctx); err != nil {
return errors.Trace(err)
}
}
}
return nil
}
func (n *SetBindingStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetBindingStmt)
if n.OriginNode != nil {
// OriginNode is nil means we set binding stmt by sql digest
origNode, ok := n.OriginNode.Accept(v)
if !ok {
return n, false
}
n.OriginNode = origNode.(StmtNode)
if n.HintedNode != nil {
hintedNode, ok := n.HintedNode.Accept(v)
if !ok {
return n, false
}
n.HintedNode = hintedNode.(StmtNode)
}
}
return v.Leave(n)
}
// Extended statistics types.
const (
StatsTypeCardinality uint8 = iota
StatsTypeDependency
StatsTypeCorrelation
)
// StatisticsSpec is the specification for ADD /DROP STATISTICS.
type StatisticsSpec struct {
StatsName string
StatsType uint8
Columns []*ColumnName
}
// CreateStatisticsStmt is a statement to create extended statistics.
// Examples:
//
// CREATE STATISTICS stats1 (cardinality) ON t(a, b, c);
// CREATE STATISTICS stats2 (dependency) ON t(a, b);
// CREATE STATISTICS stats3 (correlation) ON t(a, b);
type CreateStatisticsStmt struct {
stmtNode
IfNotExists bool
StatsName string
StatsType uint8
Table *TableName
Columns []*ColumnName
}
// Restore implements Node interface.
func (n *CreateStatisticsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("CREATE STATISTICS ")
if n.IfNotExists {
ctx.WriteKeyWord("IF NOT EXISTS ")
}
ctx.WriteName(n.StatsName)
switch n.StatsType {
case StatsTypeCardinality:
ctx.WriteKeyWord(" (cardinality) ")
case StatsTypeDependency:
ctx.WriteKeyWord(" (dependency) ")
case StatsTypeCorrelation:
ctx.WriteKeyWord(" (correlation) ")
}
ctx.WriteKeyWord("ON ")
if err := n.Table.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CreateStatisticsStmt.Table")
}
ctx.WritePlain("(")
for i, col := range n.Columns {
if i != 0 {
ctx.WritePlain(", ")
}
if err := col.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore CreateStatisticsStmt.Columns: [%v]", i)
}
}
ctx.WritePlain(")")
return nil
}
// Accept implements Node Accept interface.
func (n *CreateStatisticsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CreateStatisticsStmt)
node, ok := n.Table.Accept(v)
if !ok {
return n, false
}
n.Table = node.(*TableName)
for i, col := range n.Columns {
node, ok = col.Accept(v)
if !ok {
return n, false
}
n.Columns[i] = node.(*ColumnName)
}
return v.Leave(n)
}
// DropStatisticsStmt is a statement to drop extended statistics.
// Examples:
//
// DROP STATISTICS stats1;
type DropStatisticsStmt struct {
stmtNode
StatsName string
}
// Restore implements Node interface.
func (n *DropStatisticsStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("DROP STATISTICS ")
ctx.WriteName(n.StatsName)
return nil
}
// Accept implements Node Accept interface.
func (n *DropStatisticsStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DropStatisticsStmt)
return v.Leave(n)
}
// DoStmt is the struct for DO statement.
type DoStmt struct {
stmtNode
Exprs []ExprNode
}
// Restore implements Node interface.
func (n *DoStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("DO ")
for i, v := range n.Exprs {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore DoStmt.Exprs[%d]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *DoStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DoStmt)
for i, val := range n.Exprs {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Exprs[i] = node.(ExprNode)
}
return v.Leave(n)
}
// AdminStmtType is the type for admin statement.
type AdminStmtType int
// Admin statement types.
const (
AdminShowDDL = iota + 1
AdminCheckTable
AdminShowDDLJobs
AdminCancelDDLJobs
AdminPauseDDLJobs
AdminResumeDDLJobs
AdminCheckIndex
AdminRecoverIndex
AdminCleanupIndex
AdminCheckIndexRange
AdminShowDDLJobQueries
AdminShowDDLJobQueriesWithRange
AdminChecksumTable
AdminShowSlow
AdminShowNextRowID
AdminReloadExprPushdownBlacklist
AdminReloadOptRuleBlacklist
AdminPluginDisable
AdminPluginEnable
AdminFlushBindings
AdminCaptureBindings
AdminEvolveBindings
AdminReloadBindings
AdminShowTelemetry
AdminResetTelemetryID
AdminReloadStatistics
AdminFlushPlanCache
)
// HandleRange represents a range where handle value >= Begin and < End.
type HandleRange struct {
Begin int64
End int64
}
type StatementScope int
const (
StatementScopeNone StatementScope = iota
StatementScopeSession
StatementScopeInstance
StatementScopeGlobal
)
// ShowSlowType defines the type for SlowSlow statement.
type ShowSlowType int
const (
// ShowSlowTop is a ShowSlowType constant.
ShowSlowTop ShowSlowType = iota
// ShowSlowRecent is a ShowSlowType constant.
ShowSlowRecent
)
// ShowSlowKind defines the kind for SlowSlow statement when the type is ShowSlowTop.
type ShowSlowKind int
const (
// ShowSlowKindDefault is a ShowSlowKind constant.
ShowSlowKindDefault ShowSlowKind = iota
// ShowSlowKindInternal is a ShowSlowKind constant.
ShowSlowKindInternal
// ShowSlowKindAll is a ShowSlowKind constant.
ShowSlowKindAll
)
// ShowSlow is used for the following command:
//
// admin show slow top [ internal | all] N
// admin show slow recent N
type ShowSlow struct {
Tp ShowSlowType
Count uint64
Kind ShowSlowKind
}
// Restore implements Node interface.
func (n *ShowSlow) Restore(ctx *format.RestoreCtx) error {
switch n.Tp {
case ShowSlowRecent:
ctx.WriteKeyWord("RECENT ")
case ShowSlowTop:
ctx.WriteKeyWord("TOP ")
switch n.Kind {
case ShowSlowKindDefault:
// do nothing
case ShowSlowKindInternal:
ctx.WriteKeyWord("INTERNAL ")
case ShowSlowKindAll:
ctx.WriteKeyWord("ALL ")
default:
return errors.New("Unsupported kind of ShowSlowTop")
}
default:
return errors.New("Unsupported type of ShowSlow")
}
ctx.WritePlainf("%d", n.Count)
return nil
}
// LimitSimple is the struct for Admin statement limit option.
type LimitSimple struct {
Count uint64
Offset uint64
}
// AdminStmt is the struct for Admin statement.
type AdminStmt struct {
stmtNode
Tp AdminStmtType
Index string
Tables []*TableName
JobIDs []int64
JobNumber int64
HandleRanges []HandleRange
ShowSlow *ShowSlow
Plugins []string
Where ExprNode
StatementScope StatementScope
LimitSimple LimitSimple
}
// Restore implements Node interface.
func (n *AdminStmt) Restore(ctx *format.RestoreCtx) error {
restoreTables := func() error {
for i, v := range n.Tables {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore AdminStmt.Tables[%d]", i)
}
}
return nil
}
restoreJobIDs := func() {
for i, v := range n.JobIDs {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WritePlainf("%d", v)
}
}
ctx.WriteKeyWord("ADMIN ")
switch n.Tp {
case AdminShowDDL:
ctx.WriteKeyWord("SHOW DDL")
case AdminShowDDLJobs:
ctx.WriteKeyWord("SHOW DDL JOBS")
if n.JobNumber != 0 {
ctx.WritePlainf(" %d", n.JobNumber)
}
if n.Where != nil {
ctx.WriteKeyWord(" WHERE ")
if err := n.Where.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore ShowStmt.Where")
}
}
case AdminShowNextRowID:
ctx.WriteKeyWord("SHOW ")
if err := restoreTables(); err != nil {
return err
}
ctx.WriteKeyWord(" NEXT_ROW_ID")
case AdminCheckTable:
ctx.WriteKeyWord("CHECK TABLE ")
if err := restoreTables(); err != nil {
return err
}
case AdminCheckIndex:
ctx.WriteKeyWord("CHECK INDEX ")
if err := restoreTables(); err != nil {
return err
}
ctx.WritePlainf(" %s", n.Index)
case AdminRecoverIndex:
ctx.WriteKeyWord("RECOVER INDEX ")
if err := restoreTables(); err != nil {
return err
}
ctx.WritePlainf(" %s", n.Index)
case AdminCleanupIndex:
ctx.WriteKeyWord("CLEANUP INDEX ")
if err := restoreTables(); err != nil {
return err
}
ctx.WritePlainf(" %s", n.Index)
case AdminCheckIndexRange:
ctx.WriteKeyWord("CHECK INDEX ")
if err := restoreTables(); err != nil {
return err
}
ctx.WritePlainf(" %s", n.Index)
if n.HandleRanges != nil {
ctx.WritePlain(" ")
for i, v := range n.HandleRanges {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WritePlainf("(%d,%d)", v.Begin, v.End)
}
}
case AdminChecksumTable:
ctx.WriteKeyWord("CHECKSUM TABLE ")
if err := restoreTables(); err != nil {
return err
}
case AdminCancelDDLJobs:
ctx.WriteKeyWord("CANCEL DDL JOBS ")
restoreJobIDs()
case AdminPauseDDLJobs:
ctx.WriteKeyWord("PAUSE DDL JOBS ")
restoreJobIDs()
case AdminResumeDDLJobs:
ctx.WriteKeyWord("RESUME DDL JOBS ")
restoreJobIDs()
case AdminShowDDLJobQueries:
ctx.WriteKeyWord("SHOW DDL JOB QUERIES ")
restoreJobIDs()
case AdminShowDDLJobQueriesWithRange:
ctx.WriteKeyWord("SHOW DDL JOB QUERIES LIMIT ")
ctx.WritePlainf("%d, %d", n.LimitSimple.Offset, n.LimitSimple.Count)
case AdminShowSlow:
ctx.WriteKeyWord("SHOW SLOW ")
if err := n.ShowSlow.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore AdminStmt.ShowSlow")
}
case AdminReloadExprPushdownBlacklist:
ctx.WriteKeyWord("RELOAD EXPR_PUSHDOWN_BLACKLIST")
case AdminReloadOptRuleBlacklist:
ctx.WriteKeyWord("RELOAD OPT_RULE_BLACKLIST")
case AdminPluginEnable:
ctx.WriteKeyWord("PLUGINS ENABLE")
for i, v := range n.Plugins {
if i == 0 {
ctx.WritePlain(" ")
} else {
ctx.WritePlain(", ")
}
ctx.WritePlain(v)
}
case AdminPluginDisable:
ctx.WriteKeyWord("PLUGINS DISABLE")
for i, v := range n.Plugins {
if i == 0 {
ctx.WritePlain(" ")
} else {
ctx.WritePlain(", ")
}
ctx.WritePlain(v)
}
case AdminFlushBindings:
ctx.WriteKeyWord("FLUSH BINDINGS")
case AdminCaptureBindings:
ctx.WriteKeyWord("CAPTURE BINDINGS")
case AdminEvolveBindings:
ctx.WriteKeyWord("EVOLVE BINDINGS")
case AdminReloadBindings:
ctx.WriteKeyWord("RELOAD BINDINGS")
case AdminShowTelemetry:
ctx.WriteKeyWord("SHOW TELEMETRY")
case AdminResetTelemetryID:
ctx.WriteKeyWord("RESET TELEMETRY_ID")
case AdminReloadStatistics:
ctx.WriteKeyWord("RELOAD STATS_EXTENDED")
case AdminFlushPlanCache:
if n.StatementScope == StatementScopeSession {
ctx.WriteKeyWord("FLUSH SESSION PLAN_CACHE")
} else if n.StatementScope == StatementScopeInstance {
ctx.WriteKeyWord("FLUSH INSTANCE PLAN_CACHE")
} else if n.StatementScope == StatementScopeGlobal {
ctx.WriteKeyWord("FLUSH GLOBAL PLAN_CACHE")
}
default:
return errors.New("Unsupported AdminStmt type")
}
return nil
}
// Accept implements Node Accept interface.
func (n *AdminStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*AdminStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
if n.Where != nil {
node, ok := n.Where.Accept(v)
if !ok {
return n, false
}
n.Where = node.(ExprNode)
}
return v.Leave(n)
}
// RoleOrPriv is a temporary structure to be further processed into auth.RoleIdentity or PrivElem
type RoleOrPriv struct {
Symbols string // hold undecided symbols
Node interface{} // hold auth.RoleIdentity or PrivElem that can be sure when parsing
}
func (n *RoleOrPriv) ToRole() (*auth.RoleIdentity, error) {
if n.Node != nil {
if r, ok := n.Node.(*auth.RoleIdentity); ok {
return r, nil
}
return nil, errors.Errorf("can't convert to RoleIdentity, type %T", n.Node)
}
return &auth.RoleIdentity{Username: n.Symbols, Hostname: "%"}, nil
}
func (n *RoleOrPriv) ToPriv() (*PrivElem, error) {
if n.Node != nil {
if p, ok := n.Node.(*PrivElem); ok {
return p, nil
}
return nil, errors.Errorf("can't convert to PrivElem, type %T", n.Node)
}
if len(n.Symbols) == 0 {
return nil, errors.New("symbols should not be length 0")
}
return &PrivElem{Priv: mysql.ExtendedPriv, Name: n.Symbols}, nil
}
// PrivElem is the privilege type and optional column list.
type PrivElem struct {
node
Priv mysql.PrivilegeType
Cols []*ColumnName
Name string
}
// Restore implements Node interface.
func (n *PrivElem) Restore(ctx *format.RestoreCtx) error {
if n.Priv == mysql.AllPriv {
ctx.WriteKeyWord("ALL")
} else if n.Priv == mysql.ExtendedPriv {
ctx.WriteKeyWord(n.Name)
} else {
str, ok := mysql.Priv2Str[n.Priv]
if !ok {
return errors.New("Undefined privilege type")
}
ctx.WriteKeyWord(str)
}
if n.Cols != nil {
ctx.WritePlain(" (")
for i, v := range n.Cols {
if i != 0 {
ctx.WritePlain(",")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore PrivElem.Cols[%d]", i)
}
}
ctx.WritePlain(")")
}
return nil
}
// Accept implements Node Accept interface.
func (n *PrivElem) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*PrivElem)
for i, val := range n.Cols {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Cols[i] = node.(*ColumnName)
}
return v.Leave(n)
}
// ObjectTypeType is the type for object type.
type ObjectTypeType int
const (
// ObjectTypeNone is for empty object type.
ObjectTypeNone ObjectTypeType = iota + 1
// ObjectTypeTable means the following object is a table.
ObjectTypeTable
// ObjectTypeFunction means the following object is a stored function.
ObjectTypeFunction
// ObjectTypeProcedure means the following object is a stored procedure.
ObjectTypeProcedure
)
// Restore implements Node interface.
func (n ObjectTypeType) Restore(ctx *format.RestoreCtx) error {
switch n {
case ObjectTypeNone:
// do nothing
case ObjectTypeTable:
ctx.WriteKeyWord("TABLE")
case ObjectTypeFunction:
ctx.WriteKeyWord("FUNCTION")
case ObjectTypeProcedure:
ctx.WriteKeyWord("PROCEDURE")
default:
return errors.New("Unsupported object type")
}
return nil
}
// GrantLevelType is the type for grant level.
type GrantLevelType int
const (
// GrantLevelNone is the dummy const for default value.
GrantLevelNone GrantLevelType = iota + 1
// GrantLevelGlobal means the privileges are administrative or apply to all databases on a given server.
GrantLevelGlobal
// GrantLevelDB means the privileges apply to all objects in a given database.
GrantLevelDB
// GrantLevelTable means the privileges apply to all columns in a given table.
GrantLevelTable
)
// GrantLevel is used for store the privilege scope.
type GrantLevel struct {
Level GrantLevelType
DBName string
TableName string
}
// Restore implements Node interface.
func (n *GrantLevel) Restore(ctx *format.RestoreCtx) error {
switch n.Level {
case GrantLevelDB:
if n.DBName == "" {
ctx.WritePlain("*")
} else {
ctx.WriteName(n.DBName)
ctx.WritePlain(".*")
}
case GrantLevelGlobal:
ctx.WritePlain("*.*")
case GrantLevelTable:
if n.DBName != "" {
ctx.WriteName(n.DBName)
ctx.WritePlain(".")
}
ctx.WriteName(n.TableName)
}
return nil
}
// RevokeStmt is the struct for REVOKE statement.
type RevokeStmt struct {
stmtNode
Privs []*PrivElem
ObjectType ObjectTypeType
Level *GrantLevel
Users []*UserSpec
}
// Restore implements Node interface.
func (n *RevokeStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("REVOKE ")
for i, v := range n.Privs {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore RevokeStmt.Privs[%d]", i)
}
}
ctx.WriteKeyWord(" ON ")
if n.ObjectType != ObjectTypeNone {
if err := n.ObjectType.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore RevokeStmt.ObjectType")
}
ctx.WritePlain(" ")
}
if err := n.Level.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore RevokeStmt.Level")
}
ctx.WriteKeyWord(" FROM ")
for i, v := range n.Users {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore RevokeStmt.Users[%d]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *RevokeStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RevokeStmt)
for i, val := range n.Privs {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Privs[i] = node.(*PrivElem)
}
return v.Leave(n)
}
// RevokeStmt is the struct for REVOKE statement.
type RevokeRoleStmt struct {
stmtNode
Roles []*auth.RoleIdentity
Users []*auth.UserIdentity
}
// Restore implements Node interface.
func (n *RevokeRoleStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("REVOKE ")
for i, role := range n.Roles {
if i != 0 {
ctx.WritePlain(", ")
}
if err := role.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore RevokeRoleStmt.Roles[%d]", i)
}
}
ctx.WriteKeyWord(" FROM ")
for i, v := range n.Users {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore RevokeRoleStmt.Users[%d]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *RevokeRoleStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RevokeRoleStmt)
return v.Leave(n)
}
// GrantStmt is the struct for GRANT statement.
type GrantStmt struct {
stmtNode
Privs []*PrivElem
ObjectType ObjectTypeType
Level *GrantLevel
Users []*UserSpec
AuthTokenOrTLSOptions []*AuthTokenOrTLSOption
WithGrant bool
}
// Restore implements Node interface.
func (n *GrantStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("GRANT ")
for i, v := range n.Privs {
if i != 0 && v.Priv != 0 {
ctx.WritePlain(", ")
} else if v.Priv == 0 {
ctx.WritePlain(" ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantStmt.Privs[%d]", i)
}
}
ctx.WriteKeyWord(" ON ")
if n.ObjectType != ObjectTypeNone {
if err := n.ObjectType.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore GrantStmt.ObjectType")
}
ctx.WritePlain(" ")
}
if err := n.Level.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore GrantStmt.Level")
}
ctx.WriteKeyWord(" TO ")
for i, v := range n.Users {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantStmt.Users[%d]", i)
}
}
if n.AuthTokenOrTLSOptions != nil {
if len(n.AuthTokenOrTLSOptions) != 0 {
ctx.WriteKeyWord(" REQUIRE ")
}
for i, option := range n.AuthTokenOrTLSOptions {
if i != 0 {
ctx.WriteKeyWord(" AND ")
}
if err := option.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantStmt.AuthTokenOrTLSOptions[%d]", i)
}
}
}
if n.WithGrant {
ctx.WriteKeyWord(" WITH GRANT OPTION")
}
return nil
}
// SecureText implements SensitiveStatement interface.
func (n *GrantStmt) SecureText() string {
text := n.text
// Filter "identified by xxx" because it would expose password information.
idx := strings.Index(strings.ToLower(text), "identified")
if idx > 0 {
text = text[:idx]
}
return text
}
// Accept implements Node Accept interface.
func (n *GrantStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*GrantStmt)
for i, val := range n.Privs {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Privs[i] = node.(*PrivElem)
}
return v.Leave(n)
}
// GrantProxyStmt is the struct for GRANT PROXY statement.
type GrantProxyStmt struct {
stmtNode
LocalUser *auth.UserIdentity
ExternalUsers []*auth.UserIdentity
WithGrant bool
}
// Accept implements Node Accept interface.
func (n *GrantProxyStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*GrantProxyStmt)
return v.Leave(n)
}
// Restore implements Node interface.
func (n *GrantProxyStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("GRANT PROXY ON ")
if err := n.LocalUser.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantProxyStmt.LocalUser")
}
ctx.WriteKeyWord(" TO ")
for i, v := range n.ExternalUsers {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantProxyStmt.ExternalUsers[%d]", i)
}
}
if n.WithGrant {
ctx.WriteKeyWord(" WITH GRANT OPTION")
}
return nil
}
// GrantRoleStmt is the struct for GRANT TO statement.
type GrantRoleStmt struct {
stmtNode
Roles []*auth.RoleIdentity
Users []*auth.UserIdentity
}
// Accept implements Node Accept interface.
func (n *GrantRoleStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*GrantRoleStmt)
return v.Leave(n)
}
// Restore implements Node interface.
func (n *GrantRoleStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("GRANT ")
if len(n.Roles) > 0 {
for i, role := range n.Roles {
if i != 0 {
ctx.WritePlain(", ")
}
if err := role.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantRoleStmt.Roles[%d]", i)
}
}
}
ctx.WriteKeyWord(" TO ")
for i, v := range n.Users {
if i != 0 {
ctx.WritePlain(", ")
}
if err := v.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore GrantStmt.Users[%d]", i)
}
}
return nil
}
// SecureText implements SensitiveStatement interface.
func (n *GrantRoleStmt) SecureText() string {
text := n.text
// Filter "identified by xxx" because it would expose password information.
idx := strings.Index(strings.ToLower(text), "identified")
if idx > 0 {
text = text[:idx]
}
return text
}
// ShutdownStmt is a statement to stop the TiDB server.
// See https://dev.mysql.com/doc/refman/5.7/en/shutdown.html
type ShutdownStmt struct {
stmtNode
}
// Restore implements Node interface.
func (n *ShutdownStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SHUTDOWN")
return nil
}
// Accept implements Node Accept interface.
func (n *ShutdownStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*ShutdownStmt)
return v.Leave(n)
}
// RestartStmt is a statement to restart the TiDB server.
// See https://dev.mysql.com/doc/refman/8.0/en/restart.html
type RestartStmt struct {
stmtNode
}
// Restore implements Node interface.
func (n *RestartStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("RESTART")
return nil
}
// Accept implements Node Accept interface.
func (n *RestartStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RestartStmt)
return v.Leave(n)
}
// HelpStmt is a statement for server side help
// See https://dev.mysql.com/doc/refman/8.0/en/help.html
type HelpStmt struct {
stmtNode
Topic string
}
// Restore implements Node interface.
func (n *HelpStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("HELP ")
ctx.WriteString(n.Topic)
return nil
}
// Accept implements Node Accept interface.
func (n *HelpStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*HelpStmt)
return v.Leave(n)
}
// RenameUserStmt is a statement to rename a user.
// See http://dev.mysql.com/doc/refman/5.7/en/rename-user.html
type RenameUserStmt struct {
stmtNode
UserToUsers []*UserToUser
}
// Restore implements Node interface.
func (n *RenameUserStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("RENAME USER ")
for index, user2user := range n.UserToUsers {
if index != 0 {
ctx.WritePlain(", ")
}
if err := user2user.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore RenameUserStmt.UserToUsers")
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *RenameUserStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*RenameUserStmt)
for i, t := range n.UserToUsers {
node, ok := t.Accept(v)
if !ok {
return n, false
}
n.UserToUsers[i] = node.(*UserToUser)
}
return v.Leave(n)
}
// UserToUser represents renaming old user to new user used in RenameUserStmt.
type UserToUser struct {
node
OldUser *auth.UserIdentity
NewUser *auth.UserIdentity
}
// Restore implements Node interface.
func (n *UserToUser) Restore(ctx *format.RestoreCtx) error {
if err := n.OldUser.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore UserToUser.OldUser")
}
ctx.WriteKeyWord(" TO ")
if err := n.NewUser.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore UserToUser.NewUser")
}
return nil
}
// Accept implements Node Accept interface.
func (n *UserToUser) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*UserToUser)
return v.Leave(n)
}
type BRIEKind uint8
type BRIEOptionType uint16
const (
BRIEKindBackup BRIEKind = iota
BRIEKindCancelJob
BRIEKindStreamStart
BRIEKindStreamMetaData
BRIEKindStreamStatus
BRIEKindStreamPause
BRIEKindStreamResume
BRIEKindStreamStop
BRIEKindStreamPurge
BRIEKindRestore
BRIEKindRestorePIT
BRIEKindShowJob
BRIEKindShowQuery
BRIEKindShowBackupMeta
// common BRIE options
BRIEOptionRateLimit BRIEOptionType = iota + 1
BRIEOptionConcurrency
BRIEOptionChecksum
BRIEOptionSendCreds
BRIEOptionCheckpoint
BRIEOptionStartTS
BRIEOptionUntilTS
// backup options
BRIEOptionBackupTimeAgo
BRIEOptionBackupTS
BRIEOptionBackupTSO
BRIEOptionLastBackupTS
BRIEOptionLastBackupTSO
BRIEOptionGCTTL
// restore options
BRIEOptionOnline
BRIEOptionFullBackupStorage
BRIEOptionRestoredTS
// import options
BRIEOptionAnalyze
BRIEOptionBackend
BRIEOptionOnDuplicate
BRIEOptionSkipSchemaFiles
BRIEOptionStrictFormat
BRIEOptionTiKVImporter
BRIEOptionResume
// CSV options
BRIEOptionCSVBackslashEscape
BRIEOptionCSVDelimiter
BRIEOptionCSVHeader
BRIEOptionCSVNotNull
BRIEOptionCSVNull
BRIEOptionCSVSeparator
BRIEOptionCSVTrimLastSeparators
BRIECSVHeaderIsColumns = ^uint64(0)
)
type BRIEOptionLevel uint64
const (
BRIEOptionLevelOff BRIEOptionLevel = iota // equals FALSE
BRIEOptionLevelRequired // equals TRUE
BRIEOptionLevelOptional
)
func (kind BRIEKind) String() string {
switch kind {
case BRIEKindBackup:
return "BACKUP"
case BRIEKindRestore:
return "RESTORE"
case BRIEKindStreamStart:
return "BACKUP LOGS"
case BRIEKindStreamStop:
return "STOP BACKUP LOGS"
case BRIEKindStreamPause:
return "PAUSE BACKUP LOGS"
case BRIEKindStreamResume:
return "RESUME BACKUP LOGS"
case BRIEKindStreamStatus:
return "SHOW BACKUP LOGS STATUS"
case BRIEKindStreamMetaData:
return "SHOW BACKUP LOGS METADATA"
case BRIEKindStreamPurge:
return "PURGE BACKUP LOGS"
case BRIEKindRestorePIT:
return "RESTORE POINT"
case BRIEKindShowJob:
return "SHOW BR JOB"
case BRIEKindShowQuery:
return "SHOW BR JOB QUERY"
case BRIEKindCancelJob:
return "CANCEL BR JOB"
case BRIEKindShowBackupMeta:
return "SHOW BACKUP METADATA"
default:
return ""
}
}
func (kind BRIEOptionType) String() string {
switch kind {
case BRIEOptionRateLimit:
return "RATE_LIMIT"
case BRIEOptionConcurrency:
return "CONCURRENCY"
case BRIEOptionChecksum:
return "CHECKSUM"
case BRIEOptionSendCreds:
return "SEND_CREDENTIALS_TO_TIKV"
case BRIEOptionBackupTimeAgo, BRIEOptionBackupTS, BRIEOptionBackupTSO:
return "SNAPSHOT"
case BRIEOptionLastBackupTS, BRIEOptionLastBackupTSO:
return "LAST_BACKUP"
case BRIEOptionOnline:
return "ONLINE"
case BRIEOptionCheckpoint:
return "CHECKPOINT"
case BRIEOptionAnalyze:
return "ANALYZE"
case BRIEOptionBackend:
return "BACKEND"
case BRIEOptionOnDuplicate:
return "ON_DUPLICATE"
case BRIEOptionSkipSchemaFiles:
return "SKIP_SCHEMA_FILES"
case BRIEOptionStrictFormat:
return "STRICT_FORMAT"
case BRIEOptionTiKVImporter:
return "TIKV_IMPORTER"
case BRIEOptionResume:
return "RESUME"
case BRIEOptionCSVBackslashEscape:
return "CSV_BACKSLASH_ESCAPE"
case BRIEOptionCSVDelimiter:
return "CSV_DELIMITER"
case BRIEOptionCSVHeader:
return "CSV_HEADER"
case BRIEOptionCSVNotNull:
return "CSV_NOT_NULL"
case BRIEOptionCSVNull:
return "CSV_NULL"
case BRIEOptionCSVSeparator:
return "CSV_SEPARATOR"
case BRIEOptionCSVTrimLastSeparators:
return "CSV_TRIM_LAST_SEPARATORS"
case BRIEOptionFullBackupStorage:
return "FULL_BACKUP_STORAGE"
case BRIEOptionRestoredTS:
return "RESTORED_TS"
case BRIEOptionStartTS:
return "START_TS"
case BRIEOptionUntilTS:
return "UNTIL_TS"
case BRIEOptionGCTTL:
return "GC_TTL"
default:
return ""
}
}
func (level BRIEOptionLevel) String() string {
switch level {
case BRIEOptionLevelOff:
return "OFF"
case BRIEOptionLevelOptional:
return "OPTIONAL"
case BRIEOptionLevelRequired:
return "REQUIRED"
default:
return ""
}
}
type BRIEOption struct {
Tp BRIEOptionType
StrValue string
UintValue uint64
}
func (opt *BRIEOption) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(opt.Tp.String())
ctx.WritePlain(" = ")
switch opt.Tp {
case BRIEOptionBackupTS, BRIEOptionLastBackupTS, BRIEOptionBackend, BRIEOptionOnDuplicate, BRIEOptionTiKVImporter, BRIEOptionCSVDelimiter, BRIEOptionCSVNull, BRIEOptionCSVSeparator, BRIEOptionFullBackupStorage, BRIEOptionRestoredTS, BRIEOptionStartTS, BRIEOptionUntilTS, BRIEOptionGCTTL:
ctx.WriteString(opt.StrValue)
case BRIEOptionBackupTimeAgo:
ctx.WritePlainf("%d ", opt.UintValue/1000)
ctx.WriteKeyWord("MICROSECOND AGO")
case BRIEOptionRateLimit:
ctx.WritePlainf("%d ", opt.UintValue/1048576)
ctx.WriteKeyWord("MB")
ctx.WritePlain("/")
ctx.WriteKeyWord("SECOND")
case BRIEOptionCSVHeader:
if opt.UintValue == BRIECSVHeaderIsColumns {
ctx.WriteKeyWord("COLUMNS")
} else {
ctx.WritePlainf("%d", opt.UintValue)
}
case BRIEOptionChecksum, BRIEOptionAnalyze:
// BACKUP/RESTORE doesn't support OPTIONAL value for now, should warn at executor
ctx.WriteKeyWord(BRIEOptionLevel(opt.UintValue).String())
default:
ctx.WritePlainf("%d", opt.UintValue)
}
return nil
}
// BRIEStmt is a statement for backup, restore, import and export.
type BRIEStmt struct {
stmtNode
Kind BRIEKind
Schemas []string
Tables []*TableName
Storage string
JobID int64
Options []*BRIEOption
}
func (n *BRIEStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*BRIEStmt)
for i, val := range n.Tables {
node, ok := val.Accept(v)
if !ok {
return n, false
}
n.Tables[i] = node.(*TableName)
}
return v.Leave(n)
}
func (n *BRIEStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(n.Kind.String())
switch n.Kind {
case BRIEKindRestore, BRIEKindBackup:
switch {
case len(n.Tables) != 0:
ctx.WriteKeyWord(" TABLE ")
for index, table := range n.Tables {
if index != 0 {
ctx.WritePlain(", ")
}
if err := table.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while restore BRIEStmt.Tables[%d]", index)
}
}
case len(n.Schemas) != 0:
ctx.WriteKeyWord(" DATABASE ")
for index, schema := range n.Schemas {
if index != 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(schema)
}
default:
ctx.WriteKeyWord(" DATABASE")
ctx.WritePlain(" *")
}
if n.Kind == BRIEKindBackup {
ctx.WriteKeyWord(" TO ")
ctx.WriteString(n.Storage)
} else {
ctx.WriteKeyWord(" FROM ")
ctx.WriteString(n.Storage)
}
case BRIEKindCancelJob, BRIEKindShowJob, BRIEKindShowQuery:
ctx.WritePlainf(" %d", n.JobID)
case BRIEKindStreamStart:
ctx.WriteKeyWord(" TO ")
ctx.WriteString(n.Storage)
case BRIEKindRestorePIT, BRIEKindStreamMetaData, BRIEKindShowBackupMeta, BRIEKindStreamPurge:
ctx.WriteKeyWord(" FROM ")
ctx.WriteString(n.Storage)
}
for _, opt := range n.Options {
ctx.WritePlain(" ")
if err := opt.Restore(ctx); err != nil {
return err
}
}
return nil
}
// RedactURL redacts the secret tokens in the URL. only S3 url need redaction for now.
// if the url is not a valid url, return the original string.
func RedactURL(str string) string {
// FIXME: this solution is not scalable, and duplicates some logic from BR.
u, err := url.Parse(str)
if err != nil {
return str
}
scheme := u.Scheme
failpoint.Inject("forceRedactURL", func() {
scheme = "s3"
})
if strings.ToLower(scheme) == "s3" {
values := u.Query()
for k := range values {
// see below on why we normalize key
// https://github.com/pingcap/tidb/blob/a7c0d95f16ea2582bb569278c3f829403e6c3a7e/br/pkg/storage/parse.go#L163
normalizedKey := strings.ToLower(strings.ReplaceAll(k, "_", "-"))
if normalizedKey == "access-key" || normalizedKey == "secret-access-key" {
values[k] = []string{"xxxxxx"}
}
}
u.RawQuery = values.Encode()
}
return u.String()
}
// SecureText implements SensitiveStmtNode
func (n *BRIEStmt) SecureText() string {
redactedStmt := &BRIEStmt{
Kind: n.Kind,
Schemas: n.Schemas,
Tables: n.Tables,
Storage: RedactURL(n.Storage),
Options: n.Options,
}
var sb strings.Builder
_ = redactedStmt.Restore(format.NewRestoreCtx(format.DefaultRestoreFlags, &sb))
return sb.String()
}
type LoadDataActionTp int
const (
LoadDataPause LoadDataActionTp = iota
LoadDataResume
LoadDataCancel
LoadDataDrop
)
// LoadDataActionStmt represent PAUSE/RESUME/CANCEL/DROP LOAD DATA JOB statement.
type LoadDataActionStmt struct {
stmtNode
Tp LoadDataActionTp
JobID int64
}
func (n *LoadDataActionStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
return v.Leave(newNode)
}
func (n *LoadDataActionStmt) Restore(ctx *format.RestoreCtx) error {
switch n.Tp {
case LoadDataPause:
ctx.WriteKeyWord("PAUSE LOAD DATA JOB ")
case LoadDataResume:
ctx.WriteKeyWord("RESUME LOAD DATA JOB ")
case LoadDataCancel:
ctx.WriteKeyWord("CANCEL LOAD DATA JOB ")
case LoadDataDrop:
ctx.WriteKeyWord("DROP LOAD DATA JOB ")
default:
return errors.Errorf("invalid load data action type: %d", n.Tp)
}
ctx.WritePlainf("%d", n.JobID)
return nil
}
type ImportIntoActionTp string
const (
ImportIntoCancel ImportIntoActionTp = "cancel"
)
// ImportIntoActionStmt represent CANCEL IMPORT INTO JOB statement.
// will support pause/resume/drop later.
type ImportIntoActionStmt struct {
stmtNode
Tp ImportIntoActionTp
JobID int64
}
func (n *ImportIntoActionStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
return v.Leave(newNode)
}
func (n *ImportIntoActionStmt) Restore(ctx *format.RestoreCtx) error {
if n.Tp != ImportIntoCancel {
return errors.Errorf("invalid IMPORT INTO action type: %s", n.Tp)
}
ctx.WriteKeyWord("CANCEL IMPORT JOB ")
ctx.WritePlainf("%d", n.JobID)
return nil
}
// Ident is the table identifier composed of schema name and table name.
type Ident struct {
Schema model.CIStr
Name model.CIStr
}
// String implements fmt.Stringer interface.
func (i Ident) String() string {
if i.Schema.O == "" {
return i.Name.O
}
return fmt.Sprintf("%s.%s", i.Schema, i.Name)
}
// SelectStmtOpts wrap around select hints and switches
type SelectStmtOpts struct {
Distinct bool
SQLBigResult bool
SQLBufferResult bool
SQLCache bool
SQLSmallResult bool
CalcFoundRows bool
StraightJoin bool
Priority mysql.PriorityEnum
TableHints []*TableOptimizerHint
ExplicitAll bool
}
// TableOptimizerHint is Table level optimizer hint
type TableOptimizerHint struct {
node
// HintName is the name or alias of the table(s) which the hint will affect.
// Table hints has no schema info
// It allows only table name or alias (if table has an alias)
HintName model.CIStr
// HintData is the payload of the hint. The actual type of this field
// is defined differently as according `HintName`. Define as following:
//
// Statement Execution Time Optimizer Hints
// See https://dev.mysql.com/doc/refman/5.7/en/optimizer-hints.html#optimizer-hints-execution-time
// - MAX_EXECUTION_TIME => uint64
// - MEMORY_QUOTA => int64
// - QUERY_TYPE => model.CIStr
//
// Time Range is used to hint the time range of inspection tables
// e.g: select /*+ time_range('','') */ * from information_schema.inspection_result.
// - TIME_RANGE => ast.HintTimeRange
// - READ_FROM_STORAGE => model.CIStr
// - USE_TOJA => bool
// - NTH_PLAN => int64
HintData interface{}
// QBName is the default effective query block of this hint.
QBName model.CIStr
Tables []HintTable
Indexes []model.CIStr
}
// HintTimeRange is the payload of `TIME_RANGE` hint
type HintTimeRange struct {
From string
To string
}
// HintSetVar is the payload of `SET_VAR` hint
type HintSetVar struct {
VarName string
Value string
}
// HintTable is table in the hint. It may have query block info.
type HintTable struct {
DBName model.CIStr
TableName model.CIStr
QBName model.CIStr
PartitionList []model.CIStr
}
func (ht *HintTable) Restore(ctx *format.RestoreCtx) {
if ht.DBName.L != "" {
ctx.WriteName(ht.DBName.String())
ctx.WriteKeyWord(".")
}
ctx.WriteName(ht.TableName.String())
if ht.QBName.L != "" {
ctx.WriteKeyWord("@")
ctx.WriteName(ht.QBName.String())
}
if len(ht.PartitionList) > 0 {
ctx.WriteKeyWord(" PARTITION")
ctx.WritePlain("(")
for i, p := range ht.PartitionList {
if i > 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(p.String())
}
ctx.WritePlain(")")
}
}
// Restore implements Node interface.
func (n *TableOptimizerHint) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord(n.HintName.String())
ctx.WritePlain("(")
if n.QBName.L != "" {
if n.HintName.L != "qb_name" {
ctx.WriteKeyWord("@")
}
ctx.WriteName(n.QBName.String())
}
if n.HintName.L == "qb_name" && len(n.Tables) == 0 {
ctx.WritePlain(")")
return nil
}
// Hints without args except query block.
switch n.HintName.L {
case "mpp_1phase_agg", "mpp_2phase_agg", "hash_agg", "stream_agg", "agg_to_cop", "read_consistent_replica", "no_index_merge", "ignore_plan_cache", "limit_to_cop", "straight_join", "merge", "no_decorrelate":
ctx.WritePlain(")")
return nil
}
if n.QBName.L != "" {
ctx.WritePlain(" ")
}
// Hints with args except query block.
switch n.HintName.L {
case "max_execution_time":
ctx.WritePlainf("%d", n.HintData.(uint64))
case "tidb_kv_read_timeout":
ctx.WritePlainf("%d", n.HintData.(uint64))
case "resource_group":
ctx.WriteName(n.HintData.(string))
case "nth_plan":
ctx.WritePlainf("%d", n.HintData.(int64))
case "tidb_hj", "tidb_smj", "tidb_inlj", "hash_join", "hash_join_build", "hash_join_probe", "merge_join", "inl_join", "broadcast_join", "shuffle_join", "inl_hash_join", "inl_merge_join", "leading":
for i, table := range n.Tables {
if i != 0 {
ctx.WritePlain(", ")
}
table.Restore(ctx)
}
case "use_index", "ignore_index", "use_index_merge", "force_index", "order_index", "no_order_index":
n.Tables[0].Restore(ctx)
ctx.WritePlain(" ")
for i, index := range n.Indexes {
if i != 0 {
ctx.WritePlain(", ")
}
ctx.WriteName(index.String())
}
case "qb_name":
if len(n.Tables) > 0 {
ctx.WritePlain(", ")
for i, table := range n.Tables {
if i != 0 {
ctx.WritePlain(". ")
}
table.Restore(ctx)
}
}
case "use_toja", "use_cascades":
if n.HintData.(bool) {
ctx.WritePlain("TRUE")
} else {
ctx.WritePlain("FALSE")
}
case "query_type":
ctx.WriteKeyWord(n.HintData.(model.CIStr).String())
case "memory_quota":
ctx.WritePlainf("%d MB", n.HintData.(int64)/1024/1024)
case "read_from_storage":
ctx.WriteKeyWord(n.HintData.(model.CIStr).String())
for i, table := range n.Tables {
if i == 0 {
ctx.WritePlain("[")
}
table.Restore(ctx)
if i == len(n.Tables)-1 {
ctx.WritePlain("]")
} else {
ctx.WritePlain(", ")
}
}
case "time_range":
hintData := n.HintData.(HintTimeRange)
ctx.WriteString(hintData.From)
ctx.WritePlain(", ")
ctx.WriteString(hintData.To)
case "set_var":
hintData := n.HintData.(HintSetVar)
ctx.WritePlain(hintData.VarName)
ctx.WritePlain(" = ")
ctx.WritePlain(hintData.Value)
}
ctx.WritePlain(")")
return nil
}
// Accept implements Node Accept interface.
func (n *TableOptimizerHint) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*TableOptimizerHint)
return v.Leave(n)
}
// TextString represent a string, it can be a binary literal.
type TextString struct {
Value string
IsBinaryLiteral bool
}
type BinaryLiteral interface {
ToString() string
}
// NewDecimal creates a types.Decimal value, it's provided by parser driver.
var NewDecimal func(string) (interface{}, error)
// NewHexLiteral creates a types.HexLiteral value, it's provided by parser driver.
var NewHexLiteral func(string) (interface{}, error)
// NewBitLiteral creates a types.BitLiteral value, it's provided by parser driver.
var NewBitLiteral func(string) (interface{}, error)
// SetResourceGroupStmt is a statement to set the resource group name for current session.
type SetResourceGroupStmt struct {
stmtNode
Name model.CIStr
}
func (n *SetResourceGroupStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("SET RESOURCE GROUP ")
ctx.WriteName(n.Name.O)
return nil
}
// Accept implements Node Accept interface.
func (n *SetResourceGroupStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*SetResourceGroupStmt)
return v.Leave(n)
}
// CalibrateResourceType is the type for CalibrateResource statement.
type CalibrateResourceType int
// calibrate resource [ workload < TPCC | OLTP_READ_WRITE | OLTP_READ_ONLY | OLTP_WRITE_ONLY> ]
const (
WorkloadNone CalibrateResourceType = iota
TPCC
OLTPREADWRITE
OLTPREADONLY
OLTPWRITEONLY
)
func (n CalibrateResourceType) Restore(ctx *format.RestoreCtx) error {
switch n {
case TPCC:
ctx.WriteKeyWord(" WORKLOAD TPCC")
case OLTPREADWRITE:
ctx.WriteKeyWord(" WORKLOAD OLTP_READ_WRITE")
case OLTPREADONLY:
ctx.WriteKeyWord(" WORKLOAD OLTP_READ_ONLY")
case OLTPWRITEONLY:
ctx.WriteKeyWord(" WORKLOAD OLTP_WRITE_ONLY")
}
return nil
}
// CalibrateResourceStmt is a statement to fetch the cluster RU capacity
type CalibrateResourceStmt struct {
stmtNode
DynamicCalibrateResourceOptionList []*DynamicCalibrateResourceOption
Tp CalibrateResourceType
}
// Restore implements Node interface.
func (n *CalibrateResourceStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("CALIBRATE RESOURCE")
if err := n.Tp.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore CalibrateResourceStmt.CalibrateResourceType")
}
for i, option := range n.DynamicCalibrateResourceOptionList {
ctx.WritePlain(" ")
if err := option.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while splicing DynamicCalibrateResourceOption: [%v]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *CalibrateResourceStmt) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*CalibrateResourceStmt)
for _, val := range n.DynamicCalibrateResourceOptionList {
_, ok := val.Accept(v)
if !ok {
return n, false
}
}
return v.Leave(n)
}
type DynamicCalibrateType int
const (
// specific time
CalibrateStartTime = iota
CalibrateEndTime
CalibrateDuration
)
type DynamicCalibrateResourceOption struct {
stmtNode
Tp DynamicCalibrateType
StrValue string
Ts ExprNode
Unit TimeUnitType
}
func (n *DynamicCalibrateResourceOption) Restore(ctx *format.RestoreCtx) error {
switch n.Tp {
case CalibrateStartTime:
ctx.WriteKeyWord("START_TIME ")
if err := n.Ts.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while splicing DynamicCalibrateResourceOption StartTime")
}
case CalibrateEndTime:
ctx.WriteKeyWord("END_TIME ")
if err := n.Ts.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while splicing DynamicCalibrateResourceOption EndTime")
}
case CalibrateDuration:
ctx.WriteKeyWord("DURATION ")
if len(n.StrValue) > 0 {
ctx.WriteString(n.StrValue)
} else {
ctx.WriteKeyWord("INTERVAL ")
if err := n.Ts.Restore(ctx); err != nil {
return errors.Annotate(err, "An error occurred while restore DynamicCalibrateResourceOption DURATION TS")
}
ctx.WritePlain(" ")
ctx.WriteKeyWord(n.Unit.String())
}
default:
return errors.Errorf("invalid DynamicCalibrateResourceOption: %d", n.Tp)
}
return nil
}
// Accept implements Node Accept interface.
func (n *DynamicCalibrateResourceOption) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*DynamicCalibrateResourceOption)
if n.Ts != nil {
node, ok := n.Ts.Accept(v)
if !ok {
return n, false
}
n.Ts = node.(ExprNode)
}
return v.Leave(n)
}
// DropQueryWatchStmt is a statement to drop a runaway watch item.
type DropQueryWatchStmt struct {
stmtNode
IntValue int64
}
func (n *DropQueryWatchStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("QUERY WATCH REMOVE ")
ctx.WritePlainf("%d", n.IntValue)
return nil
}
// Accept implements Node Accept interface.
func (n *DropQueryWatchStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
n = newNode.(*DropQueryWatchStmt)
return v.Leave(n)
}
// AddQueryWatchStmt is a statement to add a runaway watch item.
type AddQueryWatchStmt struct {
stmtNode
QueryWatchOptionList []*QueryWatchOption
}
func (n *AddQueryWatchStmt) Restore(ctx *format.RestoreCtx) error {
ctx.WriteKeyWord("QUERY WATCH ADD")
for i, option := range n.QueryWatchOptionList {
ctx.WritePlain(" ")
if err := option.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while splicing QueryWatchOptionList: [%v]", i)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *AddQueryWatchStmt) Accept(v Visitor) (Node, bool) {
newNode, _ := v.Enter(n)
n = newNode.(*AddQueryWatchStmt)
for _, val := range n.QueryWatchOptionList {
_, ok := val.Accept(v)
if !ok {
return n, false
}
}
return v.Leave(n)
}
type QueryWatchOptionType int
const (
QueryWatchResourceGroup QueryWatchOptionType = iota
QueryWatchAction
QueryWatchType
)
// QueryWatchOption is used for parsing manual management of watching runaway queries option.
type QueryWatchOption struct {
stmtNode
Tp QueryWatchOptionType
StrValue model.CIStr
IntValue int32
ExprValue ExprNode
BoolValue bool
}
func (n *QueryWatchOption) Restore(ctx *format.RestoreCtx) error {
switch n.Tp {
case QueryWatchResourceGroup:
ctx.WriteKeyWord("RESOURCE GROUP ")
if n.ExprValue != nil {
if err := n.ExprValue.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while splicing ExprValue: [%v]", n.ExprValue)
}
} else {
ctx.WriteName(n.StrValue.O)
}
case QueryWatchAction:
ctx.WriteKeyWord("ACTION ")
ctx.WritePlain("= ")
ctx.WriteKeyWord(model.RunawayActionType(n.IntValue).String())
case QueryWatchType:
if n.BoolValue {
ctx.WriteKeyWord("SQL TEXT ")
ctx.WriteKeyWord(model.RunawayWatchType(n.IntValue).String())
ctx.WriteKeyWord(" TO ")
} else {
switch n.IntValue {
case int32(model.WatchSimilar):
ctx.WriteKeyWord("SQL DIGEST ")
case int32(model.WatchPlan):
ctx.WriteKeyWord("PLAN DIGEST ")
}
}
if err := n.ExprValue.Restore(ctx); err != nil {
return errors.Annotatef(err, "An error occurred while splicing ExprValue: [%v]", n.ExprValue)
}
}
return nil
}
// Accept implements Node Accept interface.
func (n *QueryWatchOption) Accept(v Visitor) (Node, bool) {
newNode, skipChildren := v.Enter(n)
if skipChildren {
return v.Leave(newNode)
}
n = newNode.(*QueryWatchOption)
if n.ExprValue != nil {
node, ok := n.ExprValue.Accept(v)
if !ok {
return n, false
}
n.ExprValue = node.(ExprNode)
}
return v.Leave(n)
}
func CheckQueryWatchAppend(ops []*QueryWatchOption, newOp *QueryWatchOption) bool {
for _, op := range ops {
if op.Tp == newOp.Tp {
return false
}
}
return true
}
|
package main
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/rwcarlsen/goexif/exif"
"github.com/rwcarlsen/goexif/tiff"
)
var bucketName = "waldo-recruiting"
type PhotoReader struct {
svc *s3.S3
bucket *string
}
func NewReader() (*PhotoReader, error) {
sess, err := session.NewSession()
if err != nil {
return nil, err
}
svc := s3.New(sess)
return &PhotoReader{svc: svc, bucket: &bucketName}, nil
}
func (r *PhotoReader) List() ([]*s3.Object, error) {
//SDK Ref: http://docs.aws.amazon.com/sdk-for-go/api/service/s3/#ListObjectsOutput
params := &s3.ListObjectsInput{Bucket: r.bucket}
resp, err := r.svc.ListObjects(params)
return resp.Contents, err
}
type Walker struct {
tags map[string]string
}
func (w Walker) Walk(name exif.FieldName, tag *tiff.Tag) error {
val, _ := tag.StringVal()
if val != "" {
w.tags[string(name)] = val
}
return nil
}
type PhotoResult struct {
tags map[string]string
key string
}
func (r *PhotoReader) worker(id int, jobs chan string, out chan *PhotoResult) {
for k := range jobs {
fmt.Println("worker", id, "reading photo", k)
p, _ := r.FetchEXIF(k)
out <- p
}
}
//Fetch Photo data and parse EXIF
func (r *PhotoReader) FetchEXIF(key string) (*PhotoResult, error) {
//Read
params := &s3.GetObjectInput{
Bucket: r.bucket, // Required
Key: &key, // Required
}
resp, e := r.svc.GetObject(params)
if e != nil {
fmt.Println("Error photo ", key, "Details: ", e.Error())
return nil, e
}
//Parse EXIF Data
x, err := exif.Decode(resp.Body)
if err != nil {
fmt.Println("Error photo ", key, "Details: ", err.Error())
return nil, err
}
w := &Walker{tags: make(map[string]string)}
x.Walk(w)
return &PhotoResult{tags: w.tags, key: key}, err
}
|
package lib
import (
"errors"
"io"
"log"
"sync"
)
//实现一个有缓冲通道的资源池,可以管理在任意多个 goroutine之间的资源共享,比如网络连接和数据库连接等。
//每个 goroutine 可以向资源池里申请资源,然后使用完之后放回资源池里。
type Pool struct {
m sync.Mutex //互斥锁,这主要是用来保证在多个goroutine访问资源时,池内的值是安全的。
res chan io.Closer //有缓冲的通道,用来保存共享的资源
factory func() (io.Closer, error) //当需要一个新的资源时,可以通过这个函数创建
closed bool //资源池是否被关闭,如果被关闭的话,再访问是会有错误的
}
var ErrorPoolClosed = errors.New("资源已经关闭")
//创建一个资源池
func New(fn func() (io.Closer, error), size uint) (*Pool, error) {
if size <= 0 {
return nil, errors.New("资源池太小了")
}
return &Pool{
res: make(chan io.Closer, size),
factory: fn,
}, nil
}
//从资源池里获取一个资源
func (p *Pool) Acquire() (io.Closer, error) {
select {
case r, ok := <-p.res:
log.Println("Acquire:共享资源")
if !ok {
return nil, ErrorPoolClosed
}
return r, nil
default:
log.Println("Aquire: 新生资源")
return p.factory()
}
}
//关闭资源池
func (p *Pool) Close() {
p.m.Lock()
defer p.m.Unlock()
if p.closed {
return
}
p.closed = true
close(p.res)
for r := range p.res {
r.Close()
}
}
//然后释放资源池里的资源
func (p *Pool) Release(r io.Closer) {
//保证操作是安全的
p.m.Lock()
defer p.m.Unlock()
if p.closed {
//如果 Close()操作同时在进行,那么能保证只有其中一个在操作。
r.Close()
return
}
select {
case p.res <- r:
log.Println("资源池释放到池子里了")
default:
log.Println("资源满了,释放这个资源吧")
r.Close()
}
}
|
// Package argsort implements a variant of the sort function that returns a slice of indices that would sort the array.
//
// The name comes from the popular Python numpy.Argsort function.
package argsort
import (
"reflect"
"sort"
)
// SortInto sorts s and populates the indices slice with the indices that would sort the input slice.
func SortInto(s sort.Interface, indices []int) {
for i := 0; i < s.Len(); i++ {
indices[i] = i
}
sort.Stable(argsorter{s: s, m: indices})
}
// Sort returns the indices that would sort s.
func Sort(s sort.Interface) []int {
indices := make([]int, s.Len())
SortInto(s, indices)
return indices
}
// SortSliceInto sorts a slice and populates the indices slice with the indices that would sort the input slice.
func SortSliceInto(slice interface{}, indices []int, less func(i, j int) bool) {
SortInto(dyn{slice, less}, indices)
}
// SortSlice return the indices that would sort a slice given a comparison function.
func SortSlice(slice interface{}, less func(i, j int) bool) []int {
v := reflect.ValueOf(slice)
indices := make([]int, v.Len())
SortSliceInto(slice, indices, less)
return indices
}
type argsorter struct {
s sort.Interface
m []int
}
func (a argsorter) Less(i, j int) bool { return a.s.Less(a.m[i], a.m[j]) }
func (a argsorter) Len() int { return a.s.Len() }
func (a argsorter) Swap(i, j int) { a.m[i], a.m[j] = a.m[j], a.m[i] }
type dyn struct {
slice interface{}
less func(i, j int) bool
}
func (d dyn) Less(i, j int) bool { return d.less(i, j) }
func (d dyn) Len() int { return reflect.ValueOf(d.slice).Len() }
func (d dyn) Swap(i, j int) { panic("unnecessary") }
|
package main
import (
"fmt"
"github.com/go-macaron/binding"
"github.com/go-macaron/cache"
"github.com/go-macaron/session"
"gopkg.in/macaron.v1"
"html/template"
"net/http"
)
//HTTPConfig has webserver config options
type HTTPConfig struct {
Port int `toml:"port"`
AdminUser string `toml:"adminuser"`
AdminPassword string `toml:"adminpassword"`
}
//UserLogin for login purposes
type UserLogin struct {
UserName string `form:"username" binding:"Required"`
Password string `form:"password" binding:"Required"`
}
func webServer(port int) {
bind := binding.Bind
/* jwtMiddleware := jwtmiddleware.New(jwtmiddleware.Options{
ValidationKeyGetter: func(token *jwt.Token) (interface{}, error) {
return []byte("My Secret"), nil
},
// When set, the middleware verifies that tokens are signed with the specific signing algorithm
// If the signing method is not constant the ValidationKeyGetter callback can be used to implement additional checks
// Important to avoid security issues described here: https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/
SigningMethod: jwt.SigningMethodHS256,
})*/
// initiate the app
m := macaron.Classic()
// register middleware
m.Use(macaron.Recovery())
// m.Use(gzip.Gziper())
m.Use(macaron.Static("public",
macaron.StaticOptions{
// Prefix is the optional prefix used to serve the static directory content. Default is empty string.
Prefix: "public",
// SkipLogging will disable [Static] log messages when a static file is served. Default is false.
SkipLogging: true,
// IndexFile defines which file to serve as index if it exists. Default is "index.html".
IndexFile: "index.html",
// Expires defines which user-defined function to use for producing a HTTP Expires Header. Default is nil.
// https://developers.google.com/speed/docs/insights/LeverageBrowserCaching
Expires: func() string { return "max-age=0" },
}))
m.Use(session.Sessioner(session.Options{
// Name of provider. Default is "memory".
Provider: "memory",
// Provider configuration, it's corresponding to provider.
ProviderConfig: "",
// Cookie name to save session ID. Default is "MacaronSession".
CookieName: "snmpcollector-session",
// Cookie path to store. Default is "/".
CookiePath: "/",
// GC interval time in seconds. Default is 3600.
Gclifetime: 3600,
// Max life time in seconds. Default is whatever GC interval time is.
Maxlifetime: 3600,
// Use HTTPS only. Default is false.
Secure: false,
// Cookie life time. Default is 0.
CookieLifeTime: 0,
// Cookie domain name. Default is empty.
Domain: "",
// Session ID length. Default is 16.
IDLength: 16,
// Configuration section name. Default is "session".
Section: "session",
}))
m.Use(macaron.Renderer(macaron.RenderOptions{
// Directory to load templates. Default is "templates".
Directory: "pkg/templates",
// Extensions to parse template files from. Defaults are [".tmpl", ".html"].
Extensions: []string{".tmpl", ".html"},
// Funcs is a slice of FuncMaps to apply to the template upon compilation. Default is [].
Funcs: []template.FuncMap{map[string]interface{}{
"AppName": func() string {
return "snmpcollector"
},
"AppVer": func() string {
return "0.5.1"
},
}},
// Delims sets the action delimiters to the specified strings. Defaults are ["{{", "}}"].
Delims: macaron.Delims{"{{", "}}"},
// Appends the given charset to the Content-Type header. Default is "UTF-8".
Charset: "UTF-8",
// Outputs human readable JSON. Default is false.
IndentJSON: true,
// Outputs human readable XML. Default is false.
IndentXML: true,
// Prefixes the JSON output with the given bytes. Default is no prefix.
// PrefixJSON: []byte("macaron"),
// Prefixes the XML output with the given bytes. Default is no prefix.
// PrefixXML: []byte("macaron"),
// Allows changing of output to XHTML instead of HTML. Default is "text/html".
HTMLContentType: "text/html",
}))
m.Use(cache.Cacher(cache.Options{
// Name of adapter. Default is "memory".
Adapter: "memory",
// Adapter configuration, it's corresponding to adapter.
AdapterConfig: "",
// GC interval time in seconds. Default is 60.
Interval: 60,
// Configuration section name. Default is "cache".
Section: "cache",
}))
m.Post("/session/create", bind(UserLogin{}), myLoginHandler)
m.Group("/metric", func() {
m.Get("/", GetMetrics)
m.Post("/", bind(SnmpMetricCfg{}), AddMetric)
m.Put("/:id", bind(SnmpMetricCfg{}), UpdateMetric)
m.Delete("/:id", DeleteMetric)
m.Get("/:id", GetMetricByID)
})
m.Group("/measurement", func() {
m.Get("/", GetMeas)
m.Post("/", bind(InfluxMeasurementCfg{}), AddMeas)
m.Put("/:id", bind(InfluxMeasurementCfg{}), UpdateMeas)
m.Delete("/:id", DeleteMeas)
m.Get("/:id", GetMeasByID)
})
m.Group("/measgroups", func() {
m.Get("/", GetMeasGroup)
m.Post("/", bind(MGroupsCfg{}), AddMeasGroup)
m.Put("/:id", bind(MGroupsCfg{}), UpdateMeasGroup)
m.Delete("/:id", DeleteMeasGroup)
m.Get("/:id", GetMeasGroupByID)
})
m.Group("/measfilters", func() {
m.Get("/", GetMeasFilter)
m.Post("/", bind(MeasFilterCfg{}), AddMeasFilter)
m.Put("/:id", bind(MeasFilterCfg{}), UpdateMeasFilter)
m.Delete("/:id", DeleteMeasFilter)
m.Get("/:id", GetMeasFilterByID)
})
m.Group("/influxservers", func() {
m.Get("/", GetInfluxServer)
m.Post("/", bind(InfluxCfg{}), AddInfluxServer)
m.Put("/:id", bind(InfluxCfg{}), UpdateInfluxServer)
m.Delete("/:id", DeleteInfluxServer)
m.Get("/:id", GetInfluxServerByID)
m.Get("/ckeckondel/:id", GetInfluxAffectOnDel)
})
// Data sources
m.Group("/snmpdevice", func() {
m.Get("/", GetSNMPDevices)
m.Post("/", bind(SnmpDeviceCfg{}), AddSNMPDevice)
m.Put("/:id", bind(SnmpDeviceCfg{}), UpdateSNMPDevice)
m.Delete("/:id", DeleteSNMPDevice)
m.Get("/:id", GetSNMPDeviceByID)
})
m.Group("/runtime", func() {
m.Get("/version/", RTGetVersion)
m.Get("/info/", RTGetInfo)
m.Get("/info/:id", RTGetInfo)
m.Put("/activatedev/:id", RTActivateDev)
m.Put("/deactivatedev/:id", RTDeactivateDev)
m.Put("/actsnmpdbg/:id", RTActSnmpDebugDev)
m.Put("/deactsnmpdbg/:id", RTDeactSnmpDebugDev)
m.Put("/setloglevel/:id/:level", RTSetLogLevelDev)
})
log.Printf("Server is running on localhost:%d...", port)
httpServer := fmt.Sprintf("0.0.0.0:%d", port)
log.Println(http.ListenAndServe(httpServer, m))
}
/****************/
/*Runtime Info
/****************/
//RTActivateDev xx
func RTSetLogLevelDev(ctx *macaron.Context) {
id := ctx.Params(":id")
level := ctx.Params(":level")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("set runtime log level from device id %s : %s", id, level)
dev.RTSetLogLevel(level)
ctx.JSON(200, dev)
}
}
//RTActivateDev xx
func RTActivateDev(ctx *macaron.Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating runtime on device %s", id)
dev.RTActivate(true)
ctx.JSON(200, dev)
}
}
//RTDeactivateDev xx
func RTDeactivateDev(ctx *macaron.Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating runtime on device %s", id)
dev.RTActivate(false)
ctx.JSON(200, dev)
}
}
//RTActSnmpDebugDev xx
func RTActSnmpDebugDev(ctx *macaron.Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("activating snmpdebug %s", id)
dev.RTActSnmpDebug(true)
ctx.JSON(200, dev)
}
}
//RTDeactSnmpDebugDev xx
func RTDeactSnmpDebugDev(ctx *macaron.Context) {
id := ctx.Params(":id")
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("deactivating snmpdebug %s", id)
dev.RTActSnmpDebug(false)
ctx.JSON(200, dev)
}
}
//RTGetInfo xx
func RTGetInfo(ctx *macaron.Context) {
id := ctx.Params(":id")
if len(id) > 0 {
if dev, ok := devices[id]; !ok {
ctx.JSON(404, fmt.Errorf("there is not any device with id %s running", id))
return
} else {
log.Infof("get runtime data from id %s", id)
ctx.JSON(200, dev)
}
//get only one device info
} else {
ctx.JSON(200, &devices)
}
return
}
type RInfo struct {
InstanceID string
Version string
Commit string
Branch string
BuildStamp string
}
//RTGetVersion xx
func RTGetVersion(ctx *macaron.Context) {
info := &RInfo{
InstanceID: cfg.General.InstanceID,
Version: version,
Commit: commit,
Branch: branch,
BuildStamp: buildstamp,
}
ctx.JSON(200, &info)
}
/****************/
/*SNMP DEVICES
/****************/
// GetSNMPDevices Return snmpdevice list to frontend
func GetSNMPDevices(ctx *macaron.Context) {
devcfgarray, err := cfg.Database.GetSnmpDeviceCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Devices :%+s", err)
return
}
ctx.JSON(200, &devcfgarray)
log.Debugf("Getting DEVICEs %+v", &devcfgarray)
}
// AddSNMPDevice Insert new snmpdevice to de internal BBDD --pending--
func AddSNMPDevice(ctx *macaron.Context, dev SnmpDeviceCfg) {
log.Printf("ADDING DEVICE %+v", dev)
affected, err := cfg.Database.AddSnmpDeviceCfg(dev)
if err != nil {
log.Warningf("Error on insert for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateSNMPDevice --pending--
func UpdateSNMPDevice(ctx *macaron.Context, dev SnmpDeviceCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpDeviceCfg(id, dev)
if err != nil {
log.Warningf("Error on update for device %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteSNMPDevice --pending--
func DeleteSNMPDevice(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpDeviceCfg(id)
if err != nil {
log.Warningf("Error on delete1 for device %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetSNMPDeviceByID --pending--
func GetSNMPDeviceByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpDeviceCfgByID(id)
if err != nil {
log.Warningf("Error on get Device for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*SNMP METRICS
/****************/
// GetMetrics Return metrics list to frontend
func GetMetrics(ctx *macaron.Context) {
cfgarray, err := cfg.Database.GetSnmpMetricCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Metrics :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Metrics %+v", &cfgarray)
}
// AddMetric Insert new metric to de internal BBDD --pending--
func AddMetric(ctx *macaron.Context, dev SnmpMetricCfg) {
log.Printf("ADDING Metric %+v", dev)
affected, err := cfg.Database.AddSnmpMetricCfg(dev)
if err != nil {
log.Warningf("Error on insert Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMetric --pending--
func UpdateMetric(ctx *macaron.Context, dev SnmpMetricCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateSnmpMetricCfg(id, dev)
if err != nil {
log.Warningf("Error on update Metric %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMetric --pending--
func DeleteMetric(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelSnmpMetricCfg(id)
if err != nil {
log.Warningf("Error on delete Metric %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMetricByID --pending--
func GetMetricByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetSnmpMetricCfgByID(id)
if err != nil {
log.Warningf("Error on get Metric for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*INFLUX MEASUREMENTS
/****************/
// GetMeas Return measurements list to frontend
func GetMeas(ctx *macaron.Context) {
cfgarray, err := cfg.Database.GetInfluxMeasurementCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx Measurements :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurements %+v", &cfgarray)
}
// AddMeas Insert new measurement to de internal BBDD --pending--
func AddMeas(ctx *macaron.Context, dev InfluxMeasurementCfg) {
log.Printf("ADDING Measurement %+v", dev)
affected, err := cfg.Database.AddInfluxMeasurementCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeas --pending--
func UpdateMeas(ctx *macaron.Context, dev InfluxMeasurementCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxMeasurementCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeas --pending--
func DeleteMeas(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxMeasurementCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasByID --pending--
func GetMeasByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxMeasurementCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/*MEASUREMENT GROUPS
/****************/
// GetMeasGroup Return measurements groups list to frontend
func GetMeasGroup(ctx *macaron.Context) {
cfgarray, err := cfg.Database.GetMGroupsCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Group :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Meas Group %+v", &cfgarray)
}
// AddMeasGroup Insert new measurement groups to de internal BBDD --pending--
func AddMeasGroup(ctx *macaron.Context, dev MGroupsCfg) {
log.Printf("ADDING Measurement Group %+v", dev)
affected, err := cfg.Database.AddMGroupsCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasGroup --pending--
func UpdateMeasGroup(ctx *macaron.Context, dev MGroupsCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMGroupsCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurement Group %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasGroup --pending--
func DeleteMeasGroup(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMGroupsCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Group %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasGroupByID --pending--
func GetMeasGroupByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMGroupsCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Group for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/********************/
/*MEASUREMENT FILTERS
/********************/
// GetMeasFilter Return measurements groups list to frontend
func GetMeasFilter(ctx *macaron.Context) {
cfgarray, err := cfg.Database.GetMeasFilterCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Measurement Filter :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting Measurement Filter %+v", &cfgarray)
}
// AddMeasFilter Insert new measurement groups to de internal BBDD --pending--
func AddMeasFilter(ctx *macaron.Context, dev MeasFilterCfg) {
log.Printf("ADDING measurement Filter %+v", dev)
affected, err := cfg.Database.AddMeasFilterCfg(dev)
if err != nil {
log.Warningf("Error on insert Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateMeasFilter --pending--
func UpdateMeasFilter(ctx *macaron.Context, dev MeasFilterCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateMeasFilterCfg(id, dev)
if err != nil {
log.Warningf("Error on update Measurment Filter %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteMeasFilter --pending--
func DeleteMeasFilter(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelMeasFilterCfg(id)
if err != nil {
log.Warningf("Error on delete Measurement Filter %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetMeasFilterByID --pending--
func GetMeasFilterByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetMeasFilterCfgByID(id)
if err != nil {
log.Warningf("Error on get Measurement Filter for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
/****************/
/* INFLUX SERVERS
/****************/
// GetInfluxServer Return Server Array
func GetInfluxServer(ctx *macaron.Context) {
cfgarray, err := cfg.Database.GetInfluxCfgArray("")
if err != nil {
ctx.JSON(404, err)
log.Errorf("Error on get Influx db :%+s", err)
return
}
ctx.JSON(200, &cfgarray)
log.Debugf("Getting DEVICEs %+v", &cfgarray)
}
// AddInfluxServer Insert new measurement groups to de internal BBDD --pending--
func AddInfluxServer(ctx *macaron.Context, dev InfluxCfg) {
log.Printf("ADDING Influx Backend %+v", dev)
affected, err := cfg.Database.AddInfluxCfg(dev)
if err != nil {
log.Warningf("Error on insert new Backend %s , affected : %+v , error: %s", dev.ID, affected, err)
ctx.JSON(404, err)
} else {
//TODO: review if needed return data or affected
ctx.JSON(200, &dev)
}
}
// UpdateInfluxServer --pending--
func UpdateInfluxServer(ctx *macaron.Context, dev InfluxCfg) {
id := ctx.Params(":id")
log.Debugf("Tying to update: %+v", dev)
affected, err := cfg.Database.UpdateInfluxCfg(id, dev)
if err != nil {
log.Warningf("Error on update Influx db %s , affected : %+v , error: %s", dev.ID, affected, err)
} else {
//TODO: review if needed return device data
ctx.JSON(200, &dev)
}
}
//DeleteInfluxServer --pending--
func DeleteInfluxServer(ctx *macaron.Context) {
id := ctx.Params(":id")
log.Debugf("Tying to delete: %+v", id)
affected, err := cfg.Database.DelInfluxCfg(id)
if err != nil {
log.Warningf("Error on delete influx db %s , affected : %+v , error: %s", id, affected, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, "deleted")
}
}
//GetInfluxServerByID --pending--
func GetInfluxServerByID(ctx *macaron.Context) {
id := ctx.Params(":id")
dev, err := cfg.Database.GetInfluxCfgByID(id)
if err != nil {
log.Warningf("Error on get Influx db data for device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &dev)
}
}
//GetInfluxAffectOnDel --pending--
func GetInfluxAffectOnDel(ctx *macaron.Context) {
id := ctx.Params(":id")
obarray, err := cfg.Database.GetInfluxCfgAffectOnDel(id)
if err != nil {
log.Warningf("Error on get object array for influx device %s , error: %s", id, err)
ctx.JSON(404, err)
} else {
ctx.JSON(200, &obarray)
}
}
/****************/
/*LOGIN
/****************/
func myLoginHandler(ctx *macaron.Context, user UserLogin) {
fmt.Printf("USER LOGIN: USER: +%#v (Config: %#v)", user, cfg.HTTP)
if user.UserName == cfg.HTTP.AdminUser && user.Password == cfg.HTTP.AdminPassword {
fmt.Println("OK")
ctx.JSON(200, "OK")
} else {
fmt.Println("ERROR")
ctx.JSON(404, "ERROR")
}
}
|
package product
import (
"database/sql"
"fmt"
"github.com/atang152/go_webapp/config"
"net/http"
)
func Index(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, http.StatusText(405), http.StatusMethodNotAllowed)
return
}
products, err := AllProduct()
if err != nil {
http.Error(w, http.StatusText(500), http.StatusInternalServerError)
return
}
config.TPL.ExecuteTemplate(w, "index.html", products)
}
func Show(w http.ResponseWriter, r *http.Request) {
if r.Method != "GET" {
http.Error(w, http.StatusText(405), http.StatusMethodNotAllowed)
return
}
p, err := OneProduct(r)
switch {
case err == sql.ErrNoRows:
http.NotFound(w, r)
return
case err != nil:
http.Error(w, http.StatusText(500), http.StatusInternalServerError)
return
}
config.TPL.ExecuteTemplate(w, "product.html", p)
}
func AddToCart(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
//Form submitted
r.ParseForm()
fmt.Println(r.Form["product-size"])
fmt.Println(r.Form["product-color"])
} else {
http.Error(w, http.StatusText(405), http.StatusMethodNotAllowed)
return
}
}
|
package api
import (
"fmt"
"net/http"
"github.com/Sirupsen/logrus"
"github.com/gorilla/mux"
"github.com/pkg/errors"
"github.com/rancher/go-rancher/api"
"github.com/rancher/longhorn-manager/types"
)
type BackupsHandlers struct {
man types.VolumeManager
}
func (bh *BackupsHandlers) ListVolume(w http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
settings, err := bh.man.Settings().GetSettings()
if err != nil || settings == nil {
return errors.New("cannot backup: unable to read settings")
}
backupTarget := settings.BackupTarget
if backupTarget == "" {
return errors.New("cannot backup: backupTarget not set")
}
backups := bh.man.ManagerBackupOps(backupTarget)
volumes, err := backups.ListVolumes()
if err != nil {
return errors.Wrapf(err, "error listing backups, backupTarget '%s'", backupTarget)
}
logrus.Debugf("success: list backup volumes, backupTarget '%s'", backupTarget)
apiContext.Write(toBackupVolumeCollection(volumes, apiContext))
return nil
}
func (bh *BackupsHandlers) GetVolume(w http.ResponseWriter, req *http.Request) error {
apiContext := api.GetApiContext(req)
volName := mux.Vars(req)["volName"]
settings, err := bh.man.Settings().GetSettings()
if err != nil || settings == nil {
return errors.New("cannot backup: unable to read settings")
}
backupTarget := settings.BackupTarget
if backupTarget == "" {
return errors.New("cannot backup: backupTarget not set")
}
backups := bh.man.ManagerBackupOps(backupTarget)
bv, err := backups.GetVolume(volName)
if err != nil {
return errors.Wrapf(err, "error get backup volume, backupTarget '%s', volume '%s'", backupTarget, volName)
}
logrus.Debugf("success: get backup volume, volume '%s', backupTarget '%s'", volName, backupTarget)
apiContext.Write(toBackupVolumeResource(bv, apiContext))
return nil
}
func (bh *BackupsHandlers) List(w http.ResponseWriter, req *http.Request) error {
volName := mux.Vars(req)["volName"]
settings, err := bh.man.Settings().GetSettings()
if err != nil || settings == nil {
return errors.New("cannot backup: unable to read settings")
}
backupTarget := settings.BackupTarget
if backupTarget == "" {
return errors.New("cannot backup: backupTarget not set")
}
backups := bh.man.ManagerBackupOps(backupTarget)
bs, err := backups.List(volName)
if err != nil {
return errors.Wrapf(err, "error listing backups, backupTarget '%s', volume '%s'", backupTarget, volName)
}
logrus.Debugf("success: list backups, volume '%s', backupTarget '%s'", volName, backupTarget)
api.GetApiContext(req).Write(toBackupCollection(bs))
return nil
}
func backupURL(backupTarget, backupName, volName string) string {
return fmt.Sprintf("%s?backup=%s&volume=%s", backupTarget, backupName, volName)
}
func (bh *BackupsHandlers) Get(w http.ResponseWriter, req *http.Request) error {
var input BackupInput
apiContext := api.GetApiContext(req)
if err := apiContext.Read(&input); err != nil {
return err
}
if input.Name == "" {
return errors.Errorf("empty backup name is not allowed")
}
volName := mux.Vars(req)["volName"]
settings, err := bh.man.Settings().GetSettings()
if err != nil || settings == nil {
return errors.New("cannot backup: unable to read settings")
}
backupTarget := settings.BackupTarget
if backupTarget == "" {
return errors.New("cannot backup: backupTarget not set")
}
backups := bh.man.ManagerBackupOps(backupTarget)
url := backupURL(backupTarget, input.Name, volName)
backup, err := backups.Get(url)
if err != nil {
return errors.Wrapf(err, "error getting backup '%s'", url)
}
if backup == nil {
logrus.Warnf("not found: backup '%s'", url)
w.WriteHeader(http.StatusNotFound)
return nil
}
logrus.Debugf("success: got backup '%s'", url)
apiContext.Write(toBackupResource(backup))
return nil
}
func (bh *BackupsHandlers) Delete(w http.ResponseWriter, req *http.Request) error {
var input BackupInput
apiContext := api.GetApiContext(req)
if err := apiContext.Read(&input); err != nil {
return err
}
if input.Name == "" {
return errors.Errorf("empty backup name is not allowed")
}
volName := mux.Vars(req)["volName"]
settings, err := bh.man.Settings().GetSettings()
if err != nil || settings == nil {
return errors.New("cannot backup: unable to read settings")
}
backupTarget := settings.BackupTarget
if backupTarget == "" {
return errors.New("cannot backup: backupTarget not set")
}
backups := bh.man.ManagerBackupOps(backupTarget)
url := backupURL(backupTarget, input.Name, volName)
if err := backups.Delete(url); err != nil {
return errors.Wrapf(err, "error deleting backup '%s'", url)
}
logrus.Debugf("success: removed backup '%s'", url)
apiContext.Write(&Empty{})
return nil
}
|
package main
import "fmt"
func main() {
fmt.Println("Greg is cool!")
fmt.Println(sum_all_integers())
//fmt.Println(sum(abundant_numbers()))
//fmt.Println(proper_divisors(28))
//fmt.Println(sum(proper_divisors(28)))
//fmt.Println(sum_all_integers() - sum(abundant_numbers()))
fmt.Println(sum(abundant_sums(abundant_numbers())))
}
func abundant_sums(aN []int) []int{
counter := 0
zzz := 0
aS := make([]int, 1000000)
for i := range aN {
//for i := 0; i <= len(aN); i ++ {
for j := range aN {
//for j := 0; j <= len(aN); j ++ {
fmt.Println("aN[i] = ", aN[i])
fmt.Println("aN[j] = ", aN[j])
zzz = aN[i] + aN[j]
if zzz < 28123 {
aS[counter] = zzz
counter += 1
}
}
}
return aS[:counter]
}
func abundant_numbers() []int{
number := 281
counter := 1
L := make([]int, number)
for i := 1; i <= number; i ++{
if sum(proper_divisors(i)) > i{
L[counter] = i
counter += 1
}
}
return L[:counter]
}
func sum(pd []int) (int){
sum := 0
for i := range pd {
sum += pd[i]
}
return sum
}
func proper_divisors(number int) (factors []int) {
L := make([]int, number)
counter := 0
for i := 1; i < ((number / 2) + 1); i++ {
if number%i == 0 {
L[counter] = i
counter += 1
}
}
return L[:counter]
}
func sum_all_integers() int {
sai := 0
for i := 1; i < 28123; i ++ {
sai += i
}
return sai
}
|
// Copyright 2016 Martin Hebnes Pedersen (LA5NTA). All rights reserved.
// Use of this source code is governed by the MIT-license that can be
// found in the LICENSE file.
// A portable Winlink client for amateur radio email.
package main
import (
"context"
"fmt"
"io"
"log"
"net"
"os"
"os/exec"
"os/signal"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/la5nta/pat/cfg"
"github.com/la5nta/pat/internal/buildinfo"
"github.com/la5nta/pat/internal/debug"
"github.com/la5nta/pat/internal/directories"
"github.com/la5nta/pat/internal/forms"
"github.com/la5nta/pat/internal/gpsd"
"github.com/la5nta/wl2k-go/catalog"
"github.com/la5nta/wl2k-go/fbb"
"github.com/la5nta/wl2k-go/mailbox"
"github.com/la5nta/wl2k-go/rigcontrol/hamlib"
"github.com/spf13/pflag"
)
const (
MethodArdop = "ardop"
MethodTelnet = "telnet"
MethodPactor = "pactor"
MethodVaraHF = "varahf"
MethodVaraFM = "varafm"
MethodAX25 = "ax25"
MethodAX25AGWPE = MethodAX25 + "+agwpe"
MethodAX25Linux = MethodAX25 + "+linux"
MethodAX25SerialTNC = MethodAX25 + "+serial-tnc"
// TODO: Remove after some release cycles (2023-05-21)
MethodSerialTNCDeprecated = "serial-tnc"
)
var commands = []Command{
{
Str: "connect",
Desc: "Connect to a remote station.",
HandleFunc: connectHandle,
Usage: UsageConnect,
Example: ExampleConnect,
MayConnect: true,
},
{
Str: "interactive",
Desc: "Run interactive mode.",
Usage: "[options]",
Options: map[string]string{
"--http, -h": "Start http server for web UI in the background.",
},
HandleFunc: InteractiveHandle,
MayConnect: true,
LongLived: true,
},
{
Str: "http",
Desc: "Run http server for web UI.",
Usage: "[options]",
Options: map[string]string{
"--addr, -a": "Listen address. Default is :8080.",
},
HandleFunc: httpHandle,
MayConnect: true,
LongLived: true,
},
{
Str: "compose",
Desc: "Compose a new message.",
Usage: "[options]\n" +
"\tIf no options are passed, composes interactively.\n" +
"\tIf options are passed, reads message from stdin similar to mail(1).",
Options: map[string]string{
"--from, -r": "Address to send from. Default is your call from config or --mycall, but can be specified to use tactical addresses.",
"--subject, -s": "Subject",
"--attachment , -a": "Attachment path (may be repeated)",
"--cc, -c": "CC Address(es) (may be repeated)",
"--p2p-only": "Send over peer to peer links only (avoid CMS)",
"": "Recipient address (may be repeated)",
},
HandleFunc: composeMessage,
},
{
Str: "read",
Desc: "Read messages.",
HandleFunc: func(ctx context.Context, args []string) {
readMail(ctx)
},
},
{
Str: "composeform",
Aliases: []string{"formPath"},
Desc: "Post form-based report.",
Usage: "[options]",
Options: map[string]string{
"--template": "path to the form template file. Uses the --forms directory as root. Defaults to 'ICS USA Forms/ICS213.txt'",
},
HandleFunc: composeFormReport,
},
{
Str: "position",
Aliases: []string{"pos"},
Desc: "Post a position report (GPSd or manual entry).",
Usage: "[options]",
Options: map[string]string{
"--latlon": "latitude,longitude in decimal degrees for manual entry. Will use GPSd if this is empty.",
"--comment, -c": "Comment to be included in the position report.",
},
Example: ExamplePosition,
HandleFunc: posReportHandle,
},
{
Str: "extract",
Desc: "Extract attachments from a message file.",
Usage: "file",
HandleFunc: extractMessageHandle,
},
{
Str: "rmslist",
Desc: "Print/search in list of RMS nodes.",
Usage: "[options] [search term]",
Options: map[string]string{
"--mode, -m": "Mode filter.",
"--band, -b": "Band filter (e.g. '80m').",
"--force-download, -d": "Force download of latest list from winlink.org.",
"--sort-distance, -s": "Sort by distance",
},
HandleFunc: rmsListHandle,
},
{
Str: "updateforms",
Desc: "Download the latest form templates from winlink.org.",
HandleFunc: func(ctx context.Context, args []string) {
if _, err := formsMgr.UpdateFormTemplates(ctx); err != nil {
log.Printf("%v", err)
}
},
},
{
Str: "configure",
Desc: "Open configuration file for editing.",
HandleFunc: configureHandle,
},
{
Str: "version",
Desc: "Print the application version.",
HandleFunc: func(_ context.Context, args []string) {
fmt.Printf("%s %s\n", buildinfo.AppName, buildinfo.VersionString())
},
},
{
Str: "env",
Desc: "List environment variables.",
HandleFunc: envHandle,
},
{
Str: "help",
Desc: "Print detailed help for a given command.",
// Avoid initialization loop by invoking helpHandler in main
},
}
var (
config cfg.Config
rigs map[string]hamlib.VFO
logWriter io.Writer
eventLog *EventLogger
exchangeChan chan ex // The channel that the exchange loop is listening on
exchangeConn net.Conn // Pointer to the active session connection (exchange)
mbox *mailbox.DirHandler // The mailbox
listenHub *ListenerHub
promptHub *PromptHub
formsMgr *forms.Manager
)
var fOptions struct {
IgnoreBusy bool // Move to connect?
SendOnly bool // Move to connect?
RadioOnly bool
Robust bool
MyCall string
Listen string
MailboxPath string
ConfigPath string
LogPath string
EventLogPath string
FormsPath string
}
func optionsSet() *pflag.FlagSet {
set := pflag.NewFlagSet("options", pflag.ExitOnError)
set.StringVar(&fOptions.MyCall, "mycall", "", "Your callsign (winlink user).")
set.StringVarP(&fOptions.Listen, "listen", "l", "", "Comma-separated list of methods to listen on (e.g. ardop,telnet,ax25).")
set.BoolVarP(&fOptions.SendOnly, "send-only", "s", false, "Download inbound messages later, send only.")
set.BoolVarP(&fOptions.RadioOnly, "radio-only", "", false, "Radio Only mode (Winlink Hybrid RMS only).")
set.BoolVar(&fOptions.IgnoreBusy, "ignore-busy", false, "Don't wait for clear channel before connecting to a node.")
defaultMBox := filepath.Join(directories.DataDir(), "mailbox")
defaultFormsPath := filepath.Join(directories.DataDir(), "Standard_Forms")
defaultConfigPath := filepath.Join(directories.ConfigDir(), "config.json")
defaultLogPath := filepath.Join(directories.StateDir(), strings.ToLower(buildinfo.AppName+".log"))
defaultEventLogPath := filepath.Join(directories.StateDir(), "eventlog.json")
set.StringVar(&fOptions.MailboxPath, "mbox", defaultMBox, "Path to mailbox directory.")
set.StringVar(&fOptions.FormsPath, "forms", defaultFormsPath, "Path to forms directory.")
set.StringVar(&fOptions.ConfigPath, "config", defaultConfigPath, "Path to config file.")
set.StringVar(&fOptions.LogPath, "log", defaultLogPath, "Path to log file. The file is truncated on each startup.")
set.StringVar(&fOptions.EventLogPath, "event-log", defaultEventLogPath, "Path to event log file.")
return set
}
func init() {
listenHub = NewListenerHub()
promptHub = NewPromptHub()
pflag.Usage = func() {
fmt.Fprintf(os.Stderr, "%s is a client for the Winlink 2000 Network.\n\n", buildinfo.AppName)
fmt.Fprintf(os.Stderr, "Usage:\n %s [options] command [arguments]\n", os.Args[0])
fmt.Fprintln(os.Stderr, "\nCommands:")
for _, cmd := range commands {
fmt.Fprintf(os.Stderr, " %-15s %s\n", cmd.Str, cmd.Desc)
}
fmt.Fprintln(os.Stderr, "\nOptions:")
optionsSet().PrintDefaults()
fmt.Fprint(os.Stderr, "\n")
}
}
func main() {
cmd, args := parseFlags(os.Args)
debug.Printf("Version: %s", buildinfo.VersionString())
debug.Printf("Command: %s %v", cmd.Str, args)
fOptions.MailboxPath = filepath.Clean(fOptions.MailboxPath)
fOptions.FormsPath = filepath.Clean(fOptions.FormsPath)
fOptions.ConfigPath = filepath.Clean(fOptions.ConfigPath)
fOptions.LogPath = filepath.Clean(fOptions.LogPath)
fOptions.EventLogPath = filepath.Clean(fOptions.EventLogPath)
debug.Printf("Mailbox dir is\t'%s'", fOptions.MailboxPath)
debug.Printf("Forms dir is\t'%s'", fOptions.FormsPath)
debug.Printf("Config file is\t'%s'", fOptions.ConfigPath)
debug.Printf("Log file is \t'%s'", fOptions.LogPath)
debug.Printf("Event log file is\t'%s'", fOptions.EventLogPath)
directories.MigrateLegacyDataDir()
// Graceful shutdown by cancelling background context on interrupt.
//
// If we have an active connection, cancel that instead.
sig := make(chan os.Signal, 1)
signal.Notify(sig, os.Interrupt)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go func() {
dirtyDisconnectNext := false // So we can do a dirty disconnect on the second interrupt
for {
<-sig
if ok := abortActiveConnection(dirtyDisconnectNext); ok {
dirtyDisconnectNext = !dirtyDisconnectNext
} else {
break
}
}
cancel()
}()
// Skip initialization for some commands
switch cmd.Str {
case "help":
helpHandle(args)
return
case "configure", "version":
cmd.HandleFunc(ctx, args)
return
}
// Enable the GZIP extension experiment by default
if _, ok := os.LookupEnv("GZIP_EXPERIMENT"); !ok {
os.Setenv("GZIP_EXPERIMENT", "1")
}
// Parse configuration file
var err error
config, err = LoadConfig(fOptions.ConfigPath, cfg.DefaultConfig)
if err != nil {
log.Fatalf("Unable to load/write config: %s", err)
}
// Initialize logger
f, err := os.Create(fOptions.LogPath)
if err != nil {
log.Fatal(err)
}
logWriter = io.MultiWriter(f, os.Stdout)
log.SetOutput(logWriter)
eventLog, err = NewEventLogger(fOptions.EventLogPath)
if err != nil {
log.Fatal("Unable to open event log file:", err)
}
// Read command line options from config if unset
if fOptions.MyCall == "" && config.MyCall == "" {
fmt.Fprint(os.Stderr, "Missing mycall\n")
os.Exit(1)
} else if fOptions.MyCall == "" {
fOptions.MyCall = config.MyCall
}
// Ensure mycall is all upper case.
fOptions.MyCall = strings.ToUpper(fOptions.MyCall)
// Don't use config password if we don't use config mycall
if !strings.EqualFold(fOptions.MyCall, config.MyCall) {
config.SecureLoginPassword = ""
}
// Replace placeholders in connect aliases
for k, v := range config.ConnectAliases {
config.ConnectAliases[k] = strings.ReplaceAll(v, cfg.PlaceholderMycall, fOptions.MyCall)
}
if fOptions.Listen == "" && len(config.Listen) > 0 {
fOptions.Listen = strings.Join(config.Listen, ",")
}
// init forms subsystem
formsMgr = forms.NewManager(forms.Config{
FormsPath: fOptions.FormsPath,
MyCall: fOptions.MyCall,
Locator: config.Locator,
AppVersion: buildinfo.VersionStringShort(),
UserAgent: buildinfo.UserAgent(),
LineReader: readLine,
GPSd: config.GPSd,
})
// Make sure we clean up on exit, closing any open resources etc.
defer cleanup()
// Load the mailbox handler
loadMBox()
if cmd.MayConnect {
rigs = loadHamlibRigs()
exchangeChan = exchangeLoop()
go func() {
if config.VersionReportingDisabled {
return
}
for { // Check every 6 hours, but it won't post more frequent than 24h.
postVersionUpdate() // Ignore errors
time.Sleep(6 * time.Hour)
}
}()
}
if cmd.LongLived {
if fOptions.Listen != "" {
Listen(fOptions.Listen)
}
scheduleLoop()
}
// Start command execution
cmd.HandleFunc(ctx, args)
}
func configureHandle(ctx context.Context, args []string) {
// Ensure config file has been written
_, err := ReadConfig(fOptions.ConfigPath)
if os.IsNotExist(err) {
err = WriteConfig(cfg.DefaultConfig, fOptions.ConfigPath)
if err != nil {
log.Fatalf("Unable to write default config: %s", err)
}
}
cmd := exec.CommandContext(ctx, EditorName(), fOptions.ConfigPath)
cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr
if err := cmd.Run(); err != nil {
log.Fatalf("Unable to start editor: %s", err)
}
}
func InteractiveHandle(ctx context.Context, args []string) {
var http string
set := pflag.NewFlagSet("interactive", pflag.ExitOnError)
set.StringVar(&http, "http", "", "HTTP listen address")
set.Lookup("http").NoOptDefVal = config.HTTPAddr
set.Parse(args)
if http == "" {
Interactive(ctx)
return
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
if err := ListenAndServe(ctx, http); err != nil {
log.Println(err)
}
}()
time.Sleep(time.Second)
Interactive(ctx)
}
func httpHandle(ctx context.Context, args []string) {
addr := config.HTTPAddr
if addr == "" {
addr = ":8080" // For backwards compatibility (remove in future)
}
set := pflag.NewFlagSet("http", pflag.ExitOnError)
set.StringVarP(&addr, "addr", "a", addr, "Listen address.")
set.Parse(args)
if addr == "" {
set.Usage()
os.Exit(1)
}
promptHub.OmitTerminal(true)
if err := ListenAndServe(ctx, addr); err != nil {
log.Println(err)
}
}
func connectHandle(_ context.Context, args []string) {
if args[0] == "" {
fmt.Println("Missing argument, try 'connect help'.")
}
if success := Connect(args[0]); !success {
os.Exit(1)
}
}
func helpHandle(args []string) {
arg := args[0]
var cmd *Command
for _, c := range commands {
if c.Str == arg {
cmd = &c
break
}
}
if arg == "" || cmd == nil {
pflag.Usage()
return
}
cmd.PrintUsage()
}
func cleanup() {
debug.Printf("Starting cleanup")
defer debug.Printf("Cleanup done")
abortActiveConnection(false)
listenHub.Close()
if adTNC != nil {
if err := adTNC.Close(); err != nil {
log.Printf("Failure to close ardop TNC: %s", err)
}
}
if pModem != nil {
if err := pModem.Close(); err != nil {
log.Printf("Failure to close pactor modem: %s", err)
}
}
if varaFMModem != nil {
if err := varaFMModem.Close(); err != nil {
log.Printf("Failure to close varafm modem: %s", err)
}
}
if varaHFModem != nil {
if err := varaHFModem.Close(); err != nil {
log.Printf("Failure to close varahf modem: %s", err)
}
}
if agwpeTNC != nil {
if err := agwpeTNC.Close(); err != nil {
log.Printf("Failure to close AGWPE TNC: %s", err)
}
}
eventLog.Close()
}
func loadMBox() {
mbox = mailbox.NewDirHandler(
filepath.Join(fOptions.MailboxPath, fOptions.MyCall),
fOptions.SendOnly,
)
// Ensure the mailbox handler is ready
if err := mbox.Prepare(); err != nil {
log.Fatal(err)
}
}
func loadHamlibRigs() map[string]hamlib.VFO {
rigs := make(map[string]hamlib.VFO, len(config.HamlibRigs))
for name, conf := range config.HamlibRigs {
if conf.Address == "" {
log.Printf("Missing address-field for rig '%s', skipping.", name)
continue
}
rig, err := hamlib.Open(conf.Network, conf.Address)
if err != nil {
log.Printf("Initialization hamlib rig %s failed: %s.", name, err)
continue
}
var vfo hamlib.VFO
switch strings.ToUpper(conf.VFO) {
case "A", "VFOA":
vfo, err = rig.VFOA()
case "B", "VFOB":
vfo, err = rig.VFOB()
case "":
vfo = rig.CurrentVFO()
default:
log.Printf("Cannot load rig '%s': Unrecognized VFO identifier '%s'", name, conf.VFO)
continue
}
if err != nil {
log.Printf("Cannot load rig '%s': Unable to select VFO: %s", name, err)
continue
}
f, err := vfo.GetFreq()
if err != nil {
log.Printf("Unable to get frequency from rig %s: %s.", name, err)
} else {
log.Printf("%s ready. Dial frequency is %s.", name, Frequency(f))
}
rigs[name] = vfo
}
return rigs
}
func extractMessageHandle(_ context.Context, args []string) {
if len(args) == 0 || args[0] == "" {
panic("TODO: usage")
}
file, _ := os.Open(args[0])
defer file.Close()
msg := new(fbb.Message)
if err := msg.ReadFrom(file); err != nil {
log.Fatal(err)
} else {
fmt.Println(msg)
for _, f := range msg.Files() {
if err := os.WriteFile(f.Name(), f.Data(), 0o664); err != nil {
log.Fatal(err)
}
}
}
}
func EditorName() string {
if e := os.Getenv("EDITOR"); e != "" {
return e
} else if e := os.Getenv("VISUAL"); e != "" {
return e
}
switch runtime.GOOS {
case "windows":
return "notepad"
case "linux":
if path, err := exec.LookPath("editor"); err == nil {
return path
}
}
return "vi"
}
func posReportHandle(ctx context.Context, args []string) {
var latlon, comment string
set := pflag.NewFlagSet("position", pflag.ExitOnError)
set.StringVar(&latlon, "latlon", "", "")
set.StringVarP(&comment, "comment", "c", "", "")
set.Parse(args)
report := catalog.PosReport{Comment: comment}
if latlon != "" {
parts := strings.Split(latlon, ",")
if len(parts) != 2 {
log.Fatal(`Invalid position format. Expected "latitude,longitude".`)
}
lat, err := strconv.ParseFloat(parts[0], 64)
if err != nil {
log.Fatal(err)
}
report.Lat = &lat
lon, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
log.Fatal(err)
}
report.Lon = &lon
} else if config.GPSd.Addr != "" {
conn, err := gpsd.Dial(config.GPSd.Addr)
if err != nil {
log.Fatalf("GPSd daemon: %s", err)
}
defer conn.Close()
conn.Watch(true)
posChan := make(chan gpsd.Position)
go func() {
defer close(posChan)
pos, err := conn.NextPos()
if err != nil {
log.Printf("GPSd: %s", err)
return
}
posChan <- pos
}()
log.Println("Waiting for position from GPSd...") // TODO: Spinning bar?
var pos gpsd.Position
select {
case p := <-posChan:
pos = p
case <-ctx.Done():
log.Println("Cancelled")
return
}
report.Lat = &pos.Lat
report.Lon = &pos.Lon
if config.GPSd.UseServerTime {
report.Date = time.Now()
} else {
report.Date = pos.Time
}
// Course and speed is part of the spec, but does not seem to be
// supported by winlink.org anymore. Ignore it for now.
if false && pos.Track != 0 {
course := CourseFromFloat64(pos.Track, false)
report.Course = &course
}
} else {
fmt.Println("No position available. See --help")
os.Exit(1)
}
if report.Date.IsZero() {
report.Date = time.Now()
}
postMessage(report.Message(fOptions.MyCall))
}
func CourseFromFloat64(f float64, magnetic bool) catalog.Course {
c := catalog.Course{Magnetic: magnetic}
str := fmt.Sprintf("%03.0f", f)
for i := 0; i < 3; i++ {
c.Digits[i] = str[i]
}
return c
}
func postMessage(msg *fbb.Message) {
if err := msg.Validate(); err != nil {
fmt.Printf("WARNING - Message does not validate: %s\n", err)
}
if err := mbox.AddOut(msg); err != nil {
log.Fatal(err)
}
fmt.Println("Message posted")
}
|
package service
import (
"github.com/Highway-Project/highway/config"
"github.com/Highway-Project/highway/pkg/service"
)
func NewBackends(specs []config.BackendSpec) ([]service.Backend, error) {
backends := make([]service.Backend, 0)
for _, spec := range specs {
backend := service.Backend{
Name: spec.BackendName,
Addr: spec.Address,
Weight: spec.Weight,
Status: service.Available,
}
backends = append(backends, backend)
}
return backends, nil
}
|
package log
import (
"io/ioutil"
"os"
"path/filepath"
"time"
"github.com/sirupsen/logrus"
)
const (
LIMITS = 5
PREFIX = "application-csv2geojson-"
EXTENSION = ".log"
DIRECTORY = "tmp"
)
var (
AppLogger = Logger{}
)
func init() {
wd, err := os.Getwd()
if err != nil {
panic(err)
}
path := filepath.Join(wd, DIRECTORY)
if _, err := os.Stat(path); os.IsNotExist(err) {
if err := os.Mkdir(DIRECTORY, os.ModePerm); err != nil {
panic(err)
}
} else {
if info, err := ioutil.ReadDir(path); err != nil {
panic(err)
} else if len(info) >= LIMITS {
target := filepath.Join(path, info[0].Name())
if err := os.Remove(target); err != nil {
panic(err)
}
}
}
path = filepath.Join(path, PREFIX+time.Now().Format("20060102150405")+EXTENSION)
fp, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
if err != nil {
panic(err)
}
Init(fp, "app", false)
}
type Logger struct {
Name string
logrus.Logger
}
func Init(out *os.File, name string, verbose bool) {
AppLogger.Name = name
AppLogger.Out = out
formatter := new(logrus.TextFormatter)
formatter.FullTimestamp = true
formatter.DisableColors = true
formatter.QuoteEmptyFields = true
AppLogger.Formatter = formatter
if verbose {
AppLogger.SetLevel(logrus.DebugLevel)
} else {
AppLogger.SetLevel(logrus.InfoLevel)
}
}
func (l *Logger) SetName(name string) *Logger {
l.Name = name
return l
}
func (l *Logger) SetVerbose(verbose bool) *Logger {
if verbose {
AppLogger.SetLevel(logrus.DebugLevel)
} else {
AppLogger.SetLevel(logrus.InfoLevel)
}
return l
}
|
package hls
import (
"common/httputils"
"fmt"
"io"
"net/url"
"os"
"path"
"sort"
"strings"
"sync"
"time"
)
const (
StreamTsCountMax = 200 //ts存放的数量;
StreamTsCountReduce = 100 //一次性删除的数量;
)
const (
ErrorCodeBase = iota + 1000
ErrorCodeM3u8DownloadFail
ErrorCodeM3u8FormatError
ErrorCodeTsDownloadRetry
ErrorCodeTsDownloadFail
)
type Error struct {
Code int
Data interface{}
Err string
}
type OnStreamError func(stream *Stream, err Error)
type OnStreamM3u8New func(stream *Stream, m3u8 *M3u8)
type OnStreamM3u8TsDownloaded func(stream *Stream, m3u8 *M3u8)
type OnStreamTsNew func(stream *Stream, ts *Ts)
type StreamCallback struct {
OnError OnStreamError
OnM3u8New OnStreamM3u8New
OnM3u8TsDownloaded OnStreamM3u8TsDownloaded
OnTsNew OnStreamTsNew
}
type Stream struct {
Key string
M3u8Url string
M3u8Name string
LocalPath string
LocalFile string
Timeout int
RetryCount int
RetryWait int
LastM3u8String string
IsTop bool
Lock sync.Mutex
//top m3u8
Streams map[string]*Stream
//second m3u8
TsMap map[string]*Ts
//callbacks
Callback StreamCallback
}
func NewStream(key, m3u8Url, m3u8Name, localPath string) *Stream {
localPath = strings.TrimSpace(localPath)
if strings.HasSuffix(localPath, "/") {
localPath = localPath[:len(localPath)-1]
}
stream := &Stream{
Key: key,
M3u8Url: m3u8Url,
M3u8Name: m3u8Name,
LocalPath: localPath,
Timeout: 5,
RetryCount: 3,
RetryWait: 2,
Streams: make(map[string]*Stream),
TsMap: make(map[string]*Ts),
}
if localPath != "" {
stream.LocalFile = localPath + "/" + m3u8Name
}
return stream
}
func (stream *Stream) SetCallback(callback StreamCallback) *Stream {
stream.Callback = callback
return stream
}
func (stream *Stream) SetTimeout(timeout int) *Stream {
stream.Timeout = timeout
return stream
}
func (stream *Stream) SetRetryCount(retryCount int) *Stream {
stream.RetryCount = retryCount
return stream
}
func (stream *Stream) SetRetryWait(retryWait int) *Stream {
stream.RetryWait = retryWait
return stream
}
func (stream *Stream) onError(code int, data interface{}, format string, args ...interface{}) {
if stream.Callback.OnError != nil {
stream.Callback.OnError(stream, Error{code, data, fmt.Sprintf(format, args...)})
}
}
func (stream *Stream) onM3u8New(m3u8 *M3u8) {
if stream.Callback.OnM3u8New != nil {
stream.Callback.OnM3u8New(stream, m3u8)
}
}
func (stream *Stream) onTsNew(ts *Ts) {
if stream.Callback.OnTsNew != nil {
stream.Callback.OnTsNew(stream, ts)
}
}
func (stream *Stream) onM3u8TsDownloaded(m3u8 *M3u8) {
if stream.Callback.OnM3u8TsDownloaded != nil {
stream.Callback.OnM3u8TsDownloaded(stream, m3u8)
}
}
func (stream *Stream) FindStream(m3u8Name string) *Stream {
stream.Lock.Lock()
defer stream.Lock.Unlock()
return stream.Streams[m3u8Name]
}
func (stream *Stream) AddStream(m3u8Url, m3u8Name string) *Stream {
stream.Lock.Lock()
defer stream.Lock.Unlock()
stream.Streams[m3u8Name] = NewStream(stream.Key, m3u8Url, m3u8Name, stream.LocalPath).
SetTimeout(stream.Timeout).
SetRetryCount(stream.RetryCount).
SetRetryWait(stream.RetryWait).
SetCallback(stream.Callback)
return stream.Streams[m3u8Name]
}
func (stream *Stream) doDownloadTs(tsUrl, tsLocalFile string) (int64, error) {
size, err := httputils.DownloadFile(tsUrl, tsLocalFile, stream.Timeout)
if err != nil {
return 0, err
}
return size, nil
}
func (stream *Stream) downloadTs(ts *Ts, results chan<- *Ts) {
ts.TsUrl = GetRemoteFilePath(stream.M3u8Url) + "/" + ts.Name
ts.LocalFile = stream.LocalPath + "/" + ts.Name
retryCount := 0
for retryCount < stream.RetryCount {
size, err := stream.doDownloadTs(ts.TsUrl, ts.LocalFile)
if err == nil {
ts.Status = TsStatusOk
ts.Size = size
stream.onTsNew(ts)
break
}
retryCount++
stream.onError(ErrorCodeTsDownloadRetry, ts, "TsDownload downloading err=%s tsName=%s tsUrl=%s tsLocalFile=%s timeout=%d retryCount=%d",
err.Error(), ts.Name, ts.TsUrl, ts.LocalFile, stream.Timeout, retryCount)
if retryCount >= stream.RetryCount {
break
}
}
if ts.Status != TsStatusOk {
ts.Status = TsStatusFail
}
results <- ts
}
func (stream *Stream) tsExists(tsName string) bool {
stream.Lock.Lock()
defer stream.Lock.Unlock()
_, exists := stream.TsMap[tsName]
return exists
}
func (stream *Stream) tsAdd(ts *Ts) {
stream.Lock.Lock()
defer stream.Lock.Unlock()
//清理
if len(stream.TsMap) >= StreamTsCountMax {
var keys []string
for key, _ := range stream.TsMap {
keys = append(keys, key)
}
sort.Strings(keys)
deleteCount := 0
for _, key := range keys {
delete(stream.TsMap, key)
deleteCount++
if deleteCount >= StreamTsCountReduce {
break
}
}
}
stream.TsMap[ts.Name] = ts
}
func (stream *Stream) downloadM3u8Ts(m3u8 *M3u8) {
tsCount := 0
results := make(chan *Ts, tsCount)
for _, ts := range m3u8.TsEntries {
if !stream.tsExists(ts.Name) {
stream.tsAdd(ts)
go stream.downloadTs(ts, results)
tsCount++
}
}
// waiting ts download finish
finish := 0
for finish < tsCount {
select {
case ts := <-results:
finish++
if ts.Status != TsStatusOk {
stream.onError(ErrorCodeTsDownloadFail, ts, "TsDownload fail err=ts downlad fail! tsName=%s retryCount=%d",
ts.Name, stream.RetryCount)
}
}
if finish >= tsCount {
break
}
}
}
func (stream *Stream) Pull() {
go stream.pull()
}
func (stream *Stream) DownloadM3u8() (*M3u8, error) {
m3u8String, err := httputils.HttpGet(stream.M3u8Url, stream.Timeout)
if err != nil {
return nil, err
}
m3u8 := NewM3u8()
m3u8.M3u8Url = stream.M3u8Url
m3u8.Parse(m3u8String)
if !m3u8.IsSecond() && !m3u8.IsTop() {
return nil, fmt.Errorf("invalid m3u8 format!")
}
//save file
if stream.LocalFile != "" {
SaveFile(m3u8String, stream.LocalFile)
m3u8.LocalFile = stream.LocalFile
}
return m3u8, nil
}
func (stream *Stream) pullM3u8() {
m3u8, err := stream.DownloadM3u8()
if err != nil {
stream.onError(ErrorCodeM3u8DownloadFail, nil, "M3u8Download err=%s", err.Error())
return
}
//判断内容是否相同
if m3u8.M3u8String == stream.LastM3u8String {
return
}
if !m3u8.IsTop() && !m3u8.IsSecond() {
stream.onError(ErrorCodeM3u8FormatError, m3u8, "M3u8Format err=unknown m3u8 format!")
return
}
stream.LastM3u8String = m3u8.M3u8String
stream.onM3u8New(m3u8)
if m3u8.IsTop() {
stream.IsTop = true
urlInfo, _ := url.Parse(stream.M3u8Url)
urlDir := urlInfo.Scheme + "://" + urlInfo.Host + path.Dir(urlInfo.Path)
for _, entry := range m3u8.M3u8Entries {
m3u8Url := urlDir + "/" + entry.Name
secondStream := stream.FindStream(entry.Name)
if secondStream != nil {
continue
}
secondStream = stream.AddStream(m3u8Url, entry.Name)
secondStream.Pull()
}
} else if m3u8.IsSecond() {
stream.downloadM3u8Ts(m3u8)
stream.onM3u8TsDownloaded(m3u8)
}
}
func (stream *Stream) pull() {
for {
go stream.pullM3u8()
time.Sleep(time.Second)
}
}
//得到文件路径(不包含文件名 /20171226/700/20171226T143444.ts -> /20171226/700/)
func GetRemoteFilePath(url string) string {
index := strings.LastIndex(url, "/")
if index == -1 {
return ""
}
return string(url[0:index])
}
func SaveFile(content, localFile string) error {
localPath := path.Dir(localFile)
err := os.MkdirAll(localPath, os.ModePerm)
if err != nil {
return err
}
localFileTmp := localFile + ".tmp"
file, err := os.Create(localFileTmp)
if err != nil {
return err
}
io.WriteString(file, content)
file.Close()
os.Rename(localFileTmp, localFile)
return nil
}
|
package oss
type Bucket struct {
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fixcontrol
import (
"strconv"
"strings"
)
const (
// Fix44262 controls whether to allow to use dynamic-mode to access partitioning tables without global-stats (#44262).
Fix44262 uint64 = 44262
// Fix44389 controls whether to consider non-point ranges of some CNF item when building ranges.
Fix44389 uint64 = 44389
// Fix44830 controls whether to allow to cache Batch/PointGet from some complex scenarios.
// See #44830 for more details.
Fix44830 uint64 = 44830
// Fix44823 controls the maximum number of parameters for a query that can be cached in the Plan Cache.
Fix44823 uint64 = 44823
// Fix44855 controls whether to use a more accurate upper bound when estimating row count of index
// range scan under inner side of index join.
Fix44855 uint64 = 44855
)
// GetStr fetches the given key from the fix control map as a string type.
func GetStr(fixControlMap map[uint64]string, key uint64) (value string, exists bool) {
if fixControlMap == nil {
return "", false
}
rawValue, ok := fixControlMap[key]
if !ok {
return "", false
}
return rawValue, true
}
// GetStrWithDefault fetches the given key from the fix control map as a string type,
// and a default value would be returned when fail to fetch the expected key.
func GetStrWithDefault(fixControlMap map[uint64]string, key uint64, defaultVal string) string {
value, exists := GetStr(fixControlMap, key)
if !exists {
return defaultVal
}
return value
}
// GetBool fetches the given key from the fix control map as a boolean type.
func GetBool(fixControlMap map[uint64]string, key uint64) (value bool, exists bool) {
if fixControlMap == nil {
return false, false
}
rawValue, ok := fixControlMap[key]
if !ok {
return false, false
}
// The same as TiDBOptOn in sessionctx/variable.
value = strings.EqualFold(rawValue, "ON") || rawValue == "1"
return value, true
}
// GetBoolWithDefault fetches the given key from the fix control map as a boolean type,
// and a default value would be returned when fail to fetch the expected key.
func GetBoolWithDefault(fixControlMap map[uint64]string, key uint64, defaultVal bool) bool {
value, exists := GetBool(fixControlMap, key)
if !exists {
return defaultVal
}
return value
}
// GetInt fetches the given key from the fix control map as an uint64 type.
func GetInt(fixControlMap map[uint64]string, key uint64) (value int64, exists bool, parseErr error) {
if fixControlMap == nil {
return 0, false, nil
}
rawValue, ok := fixControlMap[key]
if !ok {
return 0, false, nil
}
// The same as TidbOptInt64 in sessionctx/variable.
value, parseErr = strconv.ParseInt(rawValue, 10, 64)
return value, true, parseErr
}
// GetIntWithDefault fetches the given key from the fix control map as an uint64 type,
// // and a default value would be returned when fail to fetch the expected key.
func GetIntWithDefault(fixControlMap map[uint64]string, key uint64, defaultVal int64) int64 {
value, exists, err := GetInt(fixControlMap, key)
if !exists || err != nil {
return defaultVal
}
return value
}
// GetFloat fetches the given key from the fix control map as a float64 type.
func GetFloat(fixControlMap map[uint64]string, key uint64) (value float64, exists bool, parseErr error) {
if fixControlMap == nil {
return 0, false, nil
}
rawValue, ok := fixControlMap[key]
if !ok {
return 0, false, nil
}
// The same as tidbOptFloat64 in sessionctx/variable.
value, parseErr = strconv.ParseFloat(rawValue, 64)
return value, true, parseErr
}
// GetFloatWithDefault fetches the given key from the fix control map as a float64 type,
// // and a default value would be returned when fail to fetch the expected key.
func GetFloatWithDefault(fixControlMap map[uint64]string, key uint64, defaultVal float64) float64 {
value, exists, err := GetFloat(fixControlMap, key)
if !exists || err != nil {
return defaultVal
}
return value
}
|
package driveapicollector
import (
"fmt"
"github.com/scjalliance/drivestream/resource"
drive "google.golang.org/api/drive/v3"
)
// MarshalDrive marshals the given team drive as a resource.
func MarshalDrive(d *drive.TeamDrive) (resource.Drive, error) {
created, err := parseRFC3339(d.CreatedTime)
if err != nil {
return resource.Drive{}, fmt.Errorf("invalid creation time: %v", err)
}
return resource.Drive{
ID: resource.ID(d.Id),
DriveData: resource.DriveData{
Name: d.Name,
Created: created,
},
}, nil
}
|
// Copyright 2018 Diego Bernardes. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package flare
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestDocumentNewer(t *testing.T) {
Convey("Feature: Check if a document is newer", t, func() {
Convey("Given a list of newer documents", func() {
tests := []struct {
reference *Document
target Document
}{
{
nil,
Document{},
},
{
&Document{Revision: 1},
Document{Revision: 2},
},
}
Convey("Should be newer", func() {
for _, tt := range tests {
So(tt.target.Newer(tt.reference), ShouldBeTrue)
}
})
})
Convey("Given a list of older documents", func() {
tests := []struct {
reference *Document
target Document
}{
{
&Document{Revision: 2},
Document{Revision: 1},
},
}
Convey("Should be older", func() {
for _, tt := range tests {
So(tt.target.Newer(tt.reference), ShouldBeFalse)
}
})
})
})
}
|
package main
import (
"fmt"
"math"
)
func isprime(n int) bool {
x := 2
for x < n {
if math.Mod(float64(n), float64(x)) == 0 {
return false
} else {
x++
}
}
return true
}
func main() {
sum := 0
x := 2
for x <= 1000000 {
if isprime(x){
sum += x
}
x++
}
fmt.Println(sum)
}
|
// Copyright 2019-2023 The sakuracloud_exporter Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"fmt"
"github.com/alexflint/go-arg"
)
const (
maximumRateLimit = 10
defaultRateLimit = 5
)
// Config gets its content from env and passes it on to different packages
type Config struct {
Trace bool `arg:"env:TRACE" help:"Enable output of trace log of Sakura cloud API call"`
Debug bool `arg:"env:DEBUG" help:"Enable output of debug level log"`
FakeMode string `arg:"--fake-mode,env:FAKE_MODE" help:"File path to fetch/store fake data. If this flag is specified, enable fake-mode"`
Token string `arg:"required,env:SAKURACLOUD_ACCESS_TOKEN" help:"Token for using the SakuraCloud API"`
Secret string `arg:"required,env:SAKURACLOUD_ACCESS_TOKEN_SECRET" help:"Secret for using the SakuraCloud API"`
Zones []string `arg:"-"` // TODO zones parameter is not implements.
WebAddr string `arg:"env:WEB_ADDR"`
WebPath string `arg:"env:WEB_PATH"`
RateLimit int `arg:"env:SAKURACLOUD_RATE_LIMIT" help:"Rate limit per second for SakuraCloud API calls"`
NoCollectorAutoBackup bool `arg:"--no-collector.auto-backup" help:"Disable the AutoBackup collector"`
NoCollectorBill bool `arg:"--no-collector.bill" help:"Disable the Bill collector"`
NoCollectorCoupon bool `arg:"--no-collector.coupon" help:"Disable the Coupon collector"`
NoCollectorDatabase bool `arg:"--no-collector.database" help:"Disable the Database collector"`
NoCollectorESME bool `arg:"--no-collector.esme" help:"Disable the ESME collector"`
NoCollectorInternet bool `arg:"--no-collector.internet" help:"Disable the Internet(Switch+Router) collector"`
NoCollectorLoadBalancer bool `arg:"--no-collector.load-balancer" help:"Disable the LoadBalancer collector"`
NoCollectorLocalRouter bool `arg:"--no-collector.local-router" help:"Disable the LocalRouter collector"`
NoCollectorMobileGateway bool `arg:"--no-collector.mobile-gateway" help:"Disable the MobileGateway collector"`
NoCollectorNFS bool `arg:"--no-collector.nfs" help:"Disable the NFS collector"`
NoCollectorProxyLB bool `arg:"--no-collector.proxy-lb" help:"Disable the ProxyLB(Enhanced LoadBalancer) collector"`
NoCollectorServer bool `arg:"--no-collector.server" help:"Disable the Server collector"`
NoCollectorServerExceptMaintenance bool `arg:"--no-collector.server.except-maintenance" help:"Disable the Server collector except for maintenance information"`
NoCollectorSIM bool `arg:"--no-collector.sim" help:"Disable the SIM collector"`
NoCollectorVPCRouter bool `arg:"--no-collector.vpc-router" help:"Disable the VPCRouter collector"`
NoCollectorZone bool `arg:"--no-collector.zone" help:"Disable the Zone collector"`
NoCollectorWebAccel bool `arg:"--no-collector.webaccel" help:"Disable the WebAccel collector"`
}
func InitConfig() (Config, error) {
c := Config{
WebPath: "/metrics",
WebAddr: ":9542",
Zones: []string{"is1a", "is1b", "tk1a", "tk1b", "tk1v"},
RateLimit: defaultRateLimit,
}
arg.MustParse(&c)
if c.Token == "" {
return c, errors.New("SakuraCloud API Token is required")
}
if c.Secret == "" {
return c, errors.New("SakuraCloud API Secret is required")
}
if c.RateLimit <= 0 {
c.RateLimit = defaultRateLimit
}
if c.RateLimit > maximumRateLimit {
return c, fmt.Errorf("--ratelimit must be 1 to %d", maximumRateLimit)
}
if c.NoCollectorServerExceptMaintenance && c.NoCollectorServer {
return c, fmt.Errorf("--no-collector.server.except-maintenance enabled and --no-collector-server are both enabled")
}
return c, nil
}
|
package status
import (
"github.com/gin-gonic/gin"
"github.com/wajox/gobase/internal/app/build"
"github.com/wajox/gobase/internal/web/controllers/apiv1"
"github.com/wajox/gobase/internal/web/render"
"net/http"
)
var (
_ apiv1.Controller = (*Controller)(nil)
)
// Controller is a controller implementation for status checks
type Controller struct {
apiv1.BaseController
buildInfo *build.Info
}
// NewController creates new status controller instance
func NewController(bi *build.Info) *Controller {
return &Controller{
buildInfo: bi,
}
}
// GetStatus godoc
// @Summary Get Application Status
// @Description get status
// @ID get-status
// @Accept json
// @Produce json
// @Success 200 {object} ResponseDoc
// @Router /api/v1/status [get]
func (ctrl *Controller) GetStatus(ctx *gin.Context) {
render.JSONAPIPayload(ctx, http.StatusOK, &Response{
Status: http.StatusText(http.StatusOK),
Build: ctrl.buildInfo,
})
}
// DefineRoutes adds controller routes to the router
func (ctrl *Controller) DefineRoutes(r gin.IRouter) {
r.GET("/api/v1/status", ctrl.GetStatus)
}
|
// Copyright © 2018-2020 Wei Shen <shenwei356@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"fmt"
"io"
"regexp"
"runtime"
"sort"
"strconv"
"github.com/shenwei356/bio/seq"
"github.com/shenwei356/bio/seqio/fastx"
"github.com/shenwei356/unikmer"
"github.com/spf13/cobra"
)
// countCmd represents
var countCmd = &cobra.Command{
Use: "count",
Short: "Count k-mers from FASTA/Q sequences",
Long: `Count k-mers from FASTA/Q sequences
`,
Run: func(cmd *cobra.Command, args []string) {
opt := getOptions(cmd)
runtime.GOMAXPROCS(opt.NumCPUs)
seq.ValidateSeq = false
var err error
outFile := getFlagString(cmd, "out-prefix")
circular := getFlagBool(cmd, "circular")
k := getFlagPositiveInt(cmd, "kmer-len")
if k > 32 {
checkError(fmt.Errorf("k > 32 not supported"))
}
canonical := getFlagBool(cmd, "canonical")
sortKmers := getFlagBool(cmd, "sort")
taxid := getFlagUint32(cmd, "taxid")
parseTaxid := getFlagBool(cmd, "parse-taxid")
parseTaxidRegexp := getFlagString(cmd, "parse-taxid-regexp")
repeated := getFlagBool(cmd, "repeated")
var reParseTaxid *regexp.Regexp
if parseTaxid {
if taxid > 0 {
checkError(fmt.Errorf("flag -t/--taxid and -T/--parse-taxid can not given simultaneously"))
}
if parseTaxidRegexp == "" {
checkError(fmt.Errorf("flag -r/--parse-taxid-regexp needed when given flag -T/--parse-taxid"))
}
if !regexp.MustCompile(`\(.+\)`).MatchString(parseTaxidRegexp) {
checkError(fmt.Errorf(`value of -r/--parse-taxid-regexp must contains "(" and ")" to capture taxid`))
}
reParseTaxid, err = regexp.Compile(parseTaxidRegexp)
if err != nil {
checkError(fmt.Errorf("invalid regular express: %s", parseTaxidRegexp))
}
}
if opt.Verbose {
log.Info("checking input files ...")
}
files := getFileListFromArgsAndFile(cmd, args, true, "infile-list", true)
if opt.Verbose {
if len(files) == 1 && isStdin(files[0]) {
log.Info("no files given, reading from stdin")
} else {
log.Infof("%d input file(s) given", len(files))
}
}
if !isStdout(outFile) {
outFile += extDataFile
}
outfh, gw, w, err := outStream(outFile, opt.Compress, opt.CompressionLevel)
checkError(err)
defer func() {
outfh.Flush()
if gw != nil {
gw.Close()
}
w.Close()
}()
var mode uint32
var writer *unikmer.Writer
if !parseTaxid && !sortKmers {
if sortKmers {
mode |= unikmer.UNIK_SORTED
} else if opt.Compact {
mode |= unikmer.UNIK_COMPACT
}
if canonical {
mode |= unikmer.UNIK_CANONICAL
}
if parseTaxid {
mode |= unikmer.UNIK_INCLUDETAXID
}
writer, err = unikmer.NewWriter(outfh, k, mode)
checkError(err)
writer.SetMaxTaxid(opt.MaxTaxid)
if taxid > 0 {
checkError(writer.SetGlobalTaxid(taxid))
}
}
var m map[uint64]struct{}
var taxondb *unikmer.Taxonomy
var mt map[uint64]uint32
// could use bloom filter
// a key exists means it appear once, value of true means it's appeared more than once.
var marks map[uint64]bool
if parseTaxid {
mt = make(map[uint64]uint32, mapInitSize)
taxondb = loadTaxonomy(opt, false)
} else {
m = make(map[uint64]struct{}, mapInitSize)
}
if repeated {
marks = make(map[uint64]bool, mapInitSize)
}
var sequence, kmer, preKmer []byte
var originalLen, l, end, e int
var record *fastx.Record
var fastxReader *fastx.Reader
var kcode, preKcode unikmer.KmerCode
var first bool
var i, j, iters int
var ok bool
var n int64
var founds [][][]byte
var val uint64
var lca uint32
var mark bool
var nseq int64
for _, file := range files {
if opt.Verbose {
log.Infof("reading sequence file: %s", file)
}
fastxReader, err = fastx.NewDefaultReader(file)
checkError(err)
for {
record, err = fastxReader.Read()
if err != nil {
if err == io.EOF {
break
}
checkError(err)
break
}
if parseTaxid {
founds = reParseTaxid.FindAllSubmatch(record.Name, 1)
if len(founds) == 0 {
checkError(fmt.Errorf("failed to parse taxid in header: %s", record.Name))
}
val, err = strconv.ParseUint(string(founds[0][1]), 10, 32)
taxid = uint32(val)
}
nseq++
if opt.Verbose {
if parseTaxid {
log.Infof("processing sequence #%d: %s, taxid: %d", nseq, record.ID, taxid)
} else {
log.Infof("processing sequence #%d: %s", nseq, record.ID)
}
}
if canonical {
iters = 1
} else {
iters = 2
}
for j = 0; j < iters; j++ {
if j == 0 { // sequence
sequence = record.Seq.Seq
} else { // reverse complement sequence
sequence = record.Seq.RevComInplace().Seq
}
originalLen = len(record.Seq.Seq)
l = len(sequence)
end = l - 1
if end < 0 {
end = 0
}
first = true
for i = 0; i <= end; i++ {
e = i + k
if e > originalLen {
if circular {
e = e - originalLen
kmer = sequence[i:]
kmer = append(kmer, sequence[0:e]...)
} else {
break
}
} else {
kmer = sequence[i : i+k]
}
if first {
kcode, err = unikmer.NewKmerCode(kmer)
first = false
} else {
kcode, err = unikmer.NewKmerCodeMustFromFormerOne(kmer, preKmer, preKcode)
}
if err != nil {
checkError(fmt.Errorf("fail to encode '%s': %s", kmer, err))
}
preKmer, preKcode = kmer, kcode
if canonical {
kcode = kcode.Canonical()
}
if parseTaxid {
if repeated {
if mark, ok = marks[kcode.Code]; !ok {
marks[kcode.Code] = false
} else if !mark {
if lca, ok = mt[kcode.Code]; !ok {
mt[kcode.Code] = taxid
} else {
mt[kcode.Code] = taxondb.LCA(lca, taxid) // update with LCA
}
marks[kcode.Code] = true
}
continue
}
if lca, ok = mt[kcode.Code]; !ok {
mt[kcode.Code] = taxid
} else {
mt[kcode.Code] = taxondb.LCA(lca, taxid) // update with LCA
}
continue
}
if repeated {
if mark, ok = marks[kcode.Code]; !ok {
marks[kcode.Code] = false
} else if !mark {
if !sortKmers {
writer.WriteCode(kcode.Code)
n++
} else {
m[kcode.Code] = struct{}{}
}
marks[kcode.Code] = true
}
continue
}
if _, ok = m[kcode.Code]; !ok {
m[kcode.Code] = struct{}{}
if !sortKmers {
writer.WriteCode(kcode.Code)
n++
}
}
}
}
}
}
if sortKmers || parseTaxid {
var mode uint32
if canonical {
mode |= unikmer.UNIK_CANONICAL
}
if parseTaxid {
mode |= unikmer.UNIK_INCLUDETAXID
}
if sortKmers {
mode |= unikmer.UNIK_SORTED
}
writer, err = unikmer.NewWriter(outfh, k, mode)
checkError(err)
writer.SetMaxTaxid(opt.MaxTaxid)
if taxid > 0 {
checkError(writer.SetGlobalTaxid(taxid))
}
if parseTaxid {
n = int64(len(mt))
} else {
n = int64(len(m))
}
writer.Number = int64(n)
}
var code uint64
if !sortKmers {
if parseTaxid {
for code, taxid = range mt {
writer.WriteCodeWithTaxid(code, taxid)
}
n = int64(len(mt))
}
} else {
if parseTaxid {
codesTaxids := make([]unikmer.CodeTaxid, len(mt))
i := 0
for code, taxid := range mt {
codesTaxids[i] = unikmer.CodeTaxid{Code: code, Taxid: taxid}
i++
}
if opt.Verbose {
log.Infof("sorting %d k-mers", len(codesTaxids))
}
sort.Sort(unikmer.CodeTaxidSlice(codesTaxids))
if opt.Verbose {
log.Infof("done sorting")
}
for _, codeT := range codesTaxids {
writer.WriteCodeWithTaxid(codeT.Code, codeT.Taxid)
}
n = int64(len(mt))
} else {
codes := make([]uint64, len(m))
i := 0
for code = range m {
codes[i] = code
i++
}
if opt.Verbose {
log.Infof("sorting %d k-mers", len(codes))
}
sort.Sort(unikmer.CodeSlice(codes))
if opt.Verbose {
log.Infof("done sorting")
}
for _, code := range codes {
writer.WriteCode(code)
}
n = int64(len(m))
}
}
checkError(writer.Flush())
if opt.Verbose {
log.Infof("%d unique k-mers saved to %s", n, outFile)
}
},
}
func init() {
RootCmd.AddCommand(countCmd)
countCmd.Flags().StringP("out-prefix", "o", "-", `out file prefix ("-" for stdout)`)
countCmd.Flags().IntP("kmer-len", "k", 0, "k-mer length")
countCmd.Flags().BoolP("circular", "", false, "circular genome")
countCmd.Flags().BoolP("canonical", "K", false, "only keep the canonical k-mers")
countCmd.Flags().BoolP("sort", "s", false, helpSort)
countCmd.Flags().Uint32P("taxid", "t", 0, "taxid")
countCmd.Flags().BoolP("parse-taxid", "T", false, `parse taxid from FASTA/Q header`)
countCmd.Flags().StringP("parse-taxid-regexp", "r", "", `regular expression for passing taxid`)
countCmd.Flags().BoolP("repeated", "d", false, `only count duplicated k-mers, for removing singleton in FASTQ`)
}
|
package main
import (
"crypto/hmac"
"crypto/sha512"
"encoding/base64"
"encoding/json"
"math"
"math/rand"
)
var (
STRENGTH = "1_strength"
INTELLIGENCE = "2_intelligence"
WISDOM = "3_wisdom"
DEXTERITY = "4_dexterity"
CONSTITUTION = "5_constitution"
CHARISMA = "6_charisma"
)
type Trait struct {
Name string
Abilities map[string]float64
Alleles map[string]Allele
Gender string
EyeColor string
HairColor string
Handedness string
Vision string
Height Height
AvgScore float64
}
type Height struct {
Feet int
Inches int
}
func (p *Person) GetHash() string {
jsonbody, _ := json.Marshal(p)
h := hmac.New(sha512.New, nil)
h.Write([]byte(jsonbody))
hash := base64.StdEncoding.EncodeToString(h.Sum(nil))
return hash
}
func (p *Person) setAbilityAverage() float64 {
total := 0.0
for k, _ := range p.Abilities {
total += p.Abilities[k]
}
return total / float64(len(p.Abilities))
}
type Person struct {
Trait
Father Trait
Mother Trait
}
func Procreate(name string, father Person, mother Person) (Person, error) {
fatherTraits := father.GetContributionTraits()
motherTraits := mother.GetContributionTraits()
return GeneratePerson(name, fatherTraits, motherTraits)
}
func (p *Person) GetContributionTraits() Trait {
result := Trait{}
result.Abilities = make(map[string]float64)
result.Alleles = make(map[string]Allele)
for key, _ := range p.Father.Abilities {
pick := rand.Float64()
if pick < .5 {
result.Abilities[key] = p.Father.Abilities[key]
} else {
result.Abilities[key] = p.Mother.Abilities[key]
}
}
result.Alleles = p.Alleles
if p.Gender == "X" {
result.Gender = "X"
} else {
result.Gender = GetGender()
}
return result
}
func (p *Person) setHeight() {
result := Height{}
totalinches := (48. + p.Abilities[STRENGTH] + p.Abilities[CONSTITUTION])
if p.Gender == "X" {
totalinches = totalinches * 0.9
}
feet, _ := math.Modf(totalinches / 12.0)
inches := math.Mod(totalinches, 12.0)
result.Feet = int(feet)
result.Inches = int(inches)
p.Height = result
}
func (p *Person) selectClasses() []string {
results := make([]string, 0)
classes := buildClassRequirements()
for _, value1 := range classes {
isClass := true
for _, value2 := range value1.Requirements {
if p.Abilities[value2.AbilityName] < value2.MininumScore {
isClass = false
}
}
if isClass {
results = append(results, value1.Name)
}
}
return results
}
|
/*
In this challenge, sort a list containing a series of dates given as strings. Each date is given in the format DD-MM-YYYY_HH:MM:
"12-02-2012_13:44"
The priority of criteria used for sorting will be:
Year
Month
Day
Hours
Minutes
Given a list lst and a string mode, implement a function that returns:
if mode is equal to "ASC", the list lst sorted in ascending order.
if mode is equal to "DSC", the list lst sorted in descending order.
Examples
sort_dates(["10-02-2018_12:30", "10-02-2016_12:30", "10-02-2018_12:15"], "ASC") ➞ ["10-02-2016_12:30", "10-02-2018_12:15", "10-02-2018_12:30"]
sort_dates(["10-02-2018_12:30", "10-02-2016_12:30", "10-02-2018_12:15"], "DSC") ➞ ["10-02-2018_12:30", "10-02-2018_12:15", "10-02-2016_12:30"]
sort_dates(["09-02-2000_10:03", "10-02-2000_18:29", "01-01-1999_00:55"], "ASC") ➞ ["01-01-1999_00:55", "09-02-2000_10:03", "10-02-2000_18:29"]
Notes
Remember: the date is in the format DD-MM-YYYY_HH:MM.
You can expect only valid formatted dates, without exceptions to handle.
*/
package main
import (
"fmt"
"reflect"
"sort"
"time"
)
func main() {
s1 := []string{
"18-10-2016_12:09", "01-12-2017_20:32", "18-10-2016_12:04",
"19-10-2017_16:20", "18-10-2017_16:19", "18-10-2016_16:19",
}
r1_1 := []string{
"18-10-2016_12:04", "18-10-2016_12:09", "18-10-2016_16:19",
"18-10-2017_16:19", "19-10-2017_16:20", "01-12-2017_20:32",
}
r1_2 := []string{
"01-12-2017_20:32", "19-10-2017_16:20", "18-10-2017_16:19",
"18-10-2016_16:19", "18-10-2016_12:09", "18-10-2016_12:04",
}
s2 := []string{
"11-02-2011_00:01", "21-04-2013_11:43", "02-09-2018_11:00",
"02-09-2018_10:00", "02-09-2018_10:30", "11-02-2011_00:00",
}
r2_1 := []string{
"02-09-2018_11:00", "02-09-2018_10:30", "02-09-2018_10:00",
"21-04-2013_11:43", "11-02-2011_00:01", "11-02-2011_00:00",
}
r2_2 := []string{
"11-02-2011_00:00", "11-02-2011_00:01", "21-04-2013_11:43",
"02-09-2018_10:00", "02-09-2018_10:30", "02-09-2018_11:00",
}
s3 := []string{
"03-04-1982_12:11", "09-08-2010_09:11", "14-12-2004_10:32",
"08-12-2004_08:00", "08-12-2004_08:45", "12-02-1985_00:58",
}
r3_1 := []string{
"03-04-1982_12:11", "12-02-1985_00:58", "08-12-2004_08:00",
"08-12-2004_08:45", "14-12-2004_10:32", "09-08-2010_09:11",
}
r3_2 := []string{
"09-08-2010_09:11", "14-12-2004_10:32", "08-12-2004_08:45",
"08-12-2004_08:00", "12-02-1985_00:58", "03-04-1982_12:11",
}
test(s1, "ASC", r1_1)
test(s1, "DSC", r1_2)
test(s2, "DSC", r2_1)
test(s2, "ASC", r2_2)
test(s3, "ASC", r3_1)
test(s3, "DSC", r3_2)
}
func test(s []string, m string, r []string) {
p := sortdates(s, m)
fmt.Println(p)
assert(reflect.DeepEqual(p, r))
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func sortdates(s []string, m string) []string {
const layout = "02-01-2006_15:04"
var err error
t := make([]time.Time, len(s))
for i := range s {
t[i], err = time.Parse(layout, s[i])
if err != nil {
return nil
}
}
sort.Slice(t, func(i, j int) bool {
if m == "ASC" {
return t[i].Before(t[j])
}
return t[i].After(t[j])
})
r := make([]string, len(t))
for i := range t {
r[i] = t[i].Format(layout)
}
return r
}
|
package utils
import "github.com/go-gomail/gomail"
// SendMailParam 邮件参数
type SendMailParam struct {
ToMail string
ToName string
Title string
Content string
}
// SendMail 发邮件
func SendMail(p SendMailParam) (err error) {
m := gomail.NewMessage()
m.SetHeader("From", "545397649@qq.com") // 发件人
m.SetHeader("To", // 收件人
m.FormatAddress(p.ToMail, p.ToName),
)
m.SetHeader("Subject", p.Title) // 主题
m.SetBody("text/html", p.Content) // 正文
d := gomail.NewDialer("smtp.qq.com", 465, "545397649@qq.com", "ookdjcmikjorbgaj")
if e := d.DialAndSend(m); e != nil {
err = e
panic(e)
}
return
}
|
package rest
import (
"github.com/jinmukeji/jiujiantang-services/pkg/rest"
proto "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
"github.com/kataras/iris/v12"
)
// ModifySecureEmailRequest 修改安全邮箱请求
type ModifySecureEmailRequest struct {
NewEmail string `json:"new_email"` // 新的邮箱
NewVerificationCode string `json:"new_verification_code"` // 验证码
NewSerialNumber string `json:"new_serial_number"` // 序列号
OldEmail string `json:"old_email"` // 旧的邮箱
OldVerificationNumber string `json:"old_verification_number"` // 旧的邮箱验证号
}
// 修改安全邮箱
func (h *webHandler) ModifySecureEmail(ctx iris.Context) {
// 用户登录后修改安全邮箱先要向旧邮箱发送验证码,此时调用发送邮件接口 Post("/notification/email/logged"),返回一个serial_number
// 然后验证用户填写的验证码是否正确,此时调用 Post("/user/validate_email_verification_code")
// 验证成功后返回一个verification_number
// 然后向用户填写的新邮箱发送验证码,此时调用发送邮件接口 Post("/notification/email/logged"),返回一个serial_number
// 然后把新邮箱NewEmail,新邮箱的验证码NewVerificationCode,新邮箱的serial_number为NewSerialNumber,
// 旧邮箱OldEmail,旧邮箱的verification_number为OldVerificationNumber,此时调用当前接口Post("/user/{user_id}/modify_secure_email")
userID, err := ctx.Params().GetInt("user_id")
if err != nil {
writeError(ctx, wrapError(ErrInvalidValue, "", err), false)
return
}
var reqModifySecureEmail ModifySecureEmailRequest
errReadJSON := ctx.ReadJSON(&reqModifySecureEmail)
if errReadJSON != nil {
writeError(ctx, wrapError(ErrParsingRequestFailed, "", errReadJSON), false)
return
}
req := new(proto.ModifySecureEmailRequest)
req.UserId = int32(userID)
req.NewEmail = reqModifySecureEmail.NewEmail
req.NewVerificationCode = reqModifySecureEmail.NewVerificationCode
req.NewSerialNumber = reqModifySecureEmail.NewSerialNumber
req.OldEmail = reqModifySecureEmail.OldEmail
req.OldVerificationNumber = reqModifySecureEmail.OldVerificationNumber
_, errSetModifyEmail := h.rpcSvc.ModifySecureEmail(newRPCContext(ctx), req)
if errSetModifyEmail != nil {
writeRpcInternalError(ctx, errSetModifyEmail, false)
return
}
rest.WriteOkJSON(ctx, nil)
}
|
package main
/**
109. 有序链表转换二叉搜索树
给定一个单链表,其中的元素按升序排序,将其转换为高度平衡的二叉搜索树。
本题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。
示例:
```
给定的有序链表: [-10, -3, 0, 5, 9],
一个可能的答案是:[0, -3, 9, -10, null, 5], 它可以表示下面这个高度平衡二叉搜索树:
0
/ \
-3 9
/ /
-10 5
```
*/
/*
咋就这么烦链表题呢
ERROR
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
/**
* Definition for singly-linked list.
* type ListNode struct {
* Val int
* Next *ListNode
* }
*/
/**
* Definition for a binary tree node.
* type TreeNode struct {
* Val int
* Left *TreeNode
* Right *TreeNode
* }
*/
var stack []int
func SortedListToBST(head *ListNode) *TreeNode {
w(head)
return nil
}
func bst(begin, end int) {
}
func w(root *ListNode) {
if root == nil {
return
}
stack = append(stack, root.Val)
w(root.Next)
}
type ListNode struct {
Val int
Next *ListNode
}
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
|
package main
import (
"flag"
"fmt"
"os"
"github.com/FactomProject/factom"
)
func main() {
var (
faAddress = flag.String("fa", "", "Factoid public key")
n = flag.Int("n", 100, "Number of addresses")
filename = flag.String("file", "addresses.txt", "File to output addresses")
amount = flag.Int("a", 100, "Amount of ec to send")
force = flag.Bool("f", true, "Force send")
host = flag.String("h", "localhost:8088", "factomd host")
wallet = flag.String("w", "localhost:8089", "wallet host")
)
flag.Parse()
factom.SetFactomdServer(*host)
factom.SetWalletServer(*wallet)
file, err := os.OpenFile(*filename, os.O_CREATE|os.O_RDWR, 0666)
if err != nil {
panic(err)
}
file.WriteString("-- EC List --\n")
for i := 0; i < *n; i++ {
ec, err := newECAddress()
if err != nil {
panic(err)
}
trans, err := buyEC(*faAddress, ec.PubString(), uint64(*amount), *force)
if err != nil {
panic(err)
}
space := fmt.Sprintf("%5s", "")
lines := []string{
fmt.Sprintf("%d: ECs: %d\n", i, *amount),
fmt.Sprintf("%sSecret: %s\n", space, ec.SecString()),
fmt.Sprintf("%sPublic: %s\n", space, ec.PubString()),
fmt.Sprintf("%sTransaction: %s\n", space, trans.TxID),
}
str := ""
for _, s := range lines {
str += s
}
file.WriteString(str)
}
}
func newECAddress() (*factom.ECAddress, error) {
return factom.GenerateECAddress()
}
func buyEC(fa, ec string, amount uint64, force bool) (*factom.Transaction, error) {
return factom.BuyExactEC(fa, ec, amount, force)
}
|
// it should print only those items which don't have the same successor
// 'a' 'b' 'a' 'a' 'a' 'c' 'd' 'e' 'f' 'g' => 'a' 'b' 'a' 'c' 'd' 'e' 'f' 'g'
package main
import "fmt"
func main() {
a := []string{"a", "b", "a", "a", "a", "c", "d", "e", "f", "g"}
first := ""
for _, s := range a {
if first != s {
fmt.Printf("%v", s)
first = s
}
}
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"net"
"os"
"sync/atomic"
)
func main() {
stdin := os.Stdin
reader := bufio.NewReader(stdin)
stdout := os.Stdout
os.Stdout = os.Stderr
if len(os.Args) < 2 {
fmt.Fprintf(os.Stderr, "argument invalid")
}
logApi := os.Args[1]
conn, err := net.Dial("unix", logApi)
if err != nil {
fmt.Fprintf(os.Stderr, "connect log api failed %v", err)
return
}
logConn = conn
atomic.AddInt64(&inited, 1)
defer conn.Close()
for {
msg, seqId, err := ReadMessage(reader, -1)
if err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
return
}
req := map[string]interface{}{}
err = json.Unmarshal(msg, &req)
if err != nil {
fmt.Fprintf(os.Stderr, "%v", "input not json")
return
}
res := map[string]interface{}{}
event, _ := req["event"].(map[string]interface{})
context, _ := req["context"].(map[string]interface{})
traceId, ok := event["trace_id"].(string)
if ok && traceId != "" {
traceIdVal.Store(traceId)
}
result, err := handle(event, context)
if err != nil {
res["code"] = -4
res["message"] = err.Error()
} else {
res["code"] = 0
}
res["result"] = result
os.Stdout = os.Stderr
res["trace_id"] = traceId
resStr, err := json.Marshal(res)
if err != nil {
resStr = []byte(`{"error":"cannot marshal res to json"}`)
}
err = WriteMessage(stdout, resStr, int64(seqId))
if err != nil {
fmt.Fprintf(os.Stderr, "%v", err)
return
}
}
}
|
package inmemory_test
import (
"testing"
"github.com/Tinee/go-graphql-chat/inmemory"
"github.com/Tinee/go-graphql-chat/domain"
)
func Test_messagesInMemory_Create(t *testing.T) {
c := NewClient()
repo := c.MessageRepository()
m, err := repo.Create(domain.Message{
ReceiverID: "Foo",
SenderID: "Bar",
Text: "FooBar",
})
if err != nil {
t.Errorf("Expected not an error but got: %v", err)
}
_, err = repo.Find(m.ID)
if err == inmemory.ErrProfileNotFound {
t.Error("Expected to find the entity, but I didn't.")
}
}
|
//go:generate go run generate.go insertionsort.go
//go:generate goimports -w ../../insertionsort/
//go:generate gofmt -w ../../insertionsort/
package main
const PACKAGE = "insertionsort"
const TEMPLATE = `
// {{.FuncName}} sorting slice of {{.Name}}
func {{.FuncName}}(in []{{.Name}}) {
var i, j int
var key {{.Name}}
for i = 1; i < len(in); i++ {
key = in[i]
for j = i - 1; j >= 0 && in[j] > key; j-- {
in[j+1] = in[j]
}
in[j+1] = key
}
}
`
const TEMPLATE_NUMERIC_TEST_FUNC = `
func TestInts(t *testing.T) {
input := testutil.InputInts()
Ints(input)
if !testutil.IsSortedInts(input) {
t.Fail()
}
}
`
|
package service
import (
"context"
"go.uber.org/zap"
"mix/test/pb/core/transaction"
)
func (p *Transaction) CreateAccount(ctx context.Context, in *transaction.CreateAccountInput, out *transaction.AccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateAccount"))
defer db.Close()
return p.createAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAccount(ctx context.Context, in *transaction.GetAccountInput, out *transaction.AccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAccount"))
defer db.Close()
return p.getAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAccountByOpenId(ctx context.Context, in *transaction.GetAccountByOpenIdInput, out *transaction.AccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAccountByOpenId"))
defer db.Close()
return p.getAccountByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAccountList(ctx context.Context, in *transaction.Empty, out *transaction.AccountListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAccountList"))
defer db.Close()
return p.getAccountList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAccount(ctx context.Context, in *transaction.RemoveAccountInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAccount"))
defer db.Close()
return p.removeAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAccountByOpenId(ctx context.Context, in *transaction.RemoveAccountByOpenIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAccountByOpenId"))
defer db.Close()
return p.removeAccountByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAccount(ctx context.Context, in *transaction.UpdateAccountInput, out *transaction.AccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAccount"))
defer db.Close()
return p.updateAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAccountByOpenId(ctx context.Context, in *transaction.UpdateAccountByOpenIdInput, out *transaction.AccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAccountByOpenId"))
defer db.Close()
return p.updateAccountByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateAddress(ctx context.Context, in *transaction.CreateAddressInput, out *transaction.AddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateAddress"))
defer db.Close()
return p.createAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAddress(ctx context.Context, in *transaction.GetAddressInput, out *transaction.AddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAddress"))
defer db.Close()
return p.getAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAddressByAccountId(ctx context.Context, in *transaction.GetAddressByAccountIdInput, out *transaction.AddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAddressByAccountId"))
defer db.Close()
return p.getAddressByAccountId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAddressList(ctx context.Context, in *transaction.Empty, out *transaction.AddressListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAddressList"))
defer db.Close()
return p.getAddressList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAddressListByChain(ctx context.Context, in *transaction.GetAddressListByChainInput, out *transaction.AddressListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAddressListByChain"))
defer db.Close()
return p.getAddressListByChain(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAddress(ctx context.Context, in *transaction.RemoveAddressInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAddress"))
defer db.Close()
return p.removeAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAddressByAccountId(ctx context.Context, in *transaction.RemoveAddressByAccountIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAddressByAccountId"))
defer db.Close()
return p.removeAddressByAccountId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAddressListByChain(ctx context.Context, in *transaction.RemoveAddressListByChainInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAddressListByChain"))
defer db.Close()
return p.removeAddressListByChain(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAddress(ctx context.Context, in *transaction.UpdateAddressInput, out *transaction.AddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAddress"))
defer db.Close()
return p.updateAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAddressByAccountId(ctx context.Context, in *transaction.UpdateAddressByAccountIdInput, out *transaction.AddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAddressByAccountId"))
defer db.Close()
return p.updateAddressByAccountId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAddressListByChain(ctx context.Context, in *transaction.UpdateAddressListByChainInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAddressListByChain"))
defer db.Close()
return p.updateAddressListByChain(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateAudit(ctx context.Context, in *transaction.CreateAuditInput, out *transaction.AuditOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateAudit"))
defer db.Close()
return p.createAudit(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAudit(ctx context.Context, in *transaction.GetAuditInput, out *transaction.AuditOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAudit"))
defer db.Close()
return p.getAudit(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAuditByAddressId(ctx context.Context, in *transaction.GetAuditByAddressIdInput, out *transaction.AuditOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAuditByAddressId"))
defer db.Close()
return p.getAuditByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetAuditList(ctx context.Context, in *transaction.Empty, out *transaction.AuditListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetAuditList"))
defer db.Close()
return p.getAuditList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAudit(ctx context.Context, in *transaction.RemoveAuditInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAudit"))
defer db.Close()
return p.removeAudit(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveAuditByAddressId(ctx context.Context, in *transaction.RemoveAuditByAddressIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveAuditByAddressId"))
defer db.Close()
return p.removeAuditByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAudit(ctx context.Context, in *transaction.UpdateAuditInput, out *transaction.AuditOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAudit"))
defer db.Close()
return p.updateAudit(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateAuditByAddressId(ctx context.Context, in *transaction.UpdateAuditByAddressIdInput, out *transaction.AuditOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateAuditByAddressId"))
defer db.Close()
return p.updateAuditByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateBalance(ctx context.Context, in *transaction.CreateBalanceInput, out *transaction.BalanceOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateBalance"))
defer db.Close()
return p.createBalance(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetBalance(ctx context.Context, in *transaction.GetBalanceInput, out *transaction.BalanceOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetBalance"))
defer db.Close()
return p.getBalance(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetBalanceByAddressId(ctx context.Context, in *transaction.GetBalanceByAddressIdInput, out *transaction.BalanceOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetBalanceByAddressId"))
defer db.Close()
return p.getBalanceByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetBalanceList(ctx context.Context, in *transaction.Empty, out *transaction.BalanceListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetBalanceList"))
defer db.Close()
return p.getBalanceList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveBalance(ctx context.Context, in *transaction.RemoveBalanceInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveBalance"))
defer db.Close()
return p.removeBalance(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveBalanceByAddressId(ctx context.Context, in *transaction.RemoveBalanceByAddressIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveBalanceByAddressId"))
defer db.Close()
return p.removeBalanceByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateBalance(ctx context.Context, in *transaction.UpdateBalanceInput, out *transaction.BalanceOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateBalance"))
defer db.Close()
return p.updateBalance(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateBalanceByAddressId(ctx context.Context, in *transaction.UpdateBalanceByAddressIdInput, out *transaction.BalanceOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateBalanceByAddressId"))
defer db.Close()
return p.updateBalanceByAddressId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateCallback(ctx context.Context, in *transaction.CreateCallbackInput, out *transaction.CallbackOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateCallback"))
defer db.Close()
return p.createCallback(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetCallback(ctx context.Context, in *transaction.GetCallbackInput, out *transaction.CallbackOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetCallback"))
defer db.Close()
return p.getCallback(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetCallbackList(ctx context.Context, in *transaction.Empty, out *transaction.CallbackListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetCallbackList"))
defer db.Close()
return p.getCallbackList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetCallbackListByRequestId(ctx context.Context, in *transaction.GetCallbackListByRequestIdInput, out *transaction.CallbackListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetCallbackListByRequestId"))
defer db.Close()
return p.getCallbackListByRequestId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveCallback(ctx context.Context, in *transaction.RemoveCallbackInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveCallback"))
defer db.Close()
return p.removeCallback(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveCallbackListByRequestId(ctx context.Context, in *transaction.RemoveCallbackListByRequestIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveCallbackListByRequestId"))
defer db.Close()
return p.removeCallbackListByRequestId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateCallback(ctx context.Context, in *transaction.UpdateCallbackInput, out *transaction.CallbackOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateCallback"))
defer db.Close()
return p.updateCallback(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateCallbackListByRequestId(ctx context.Context, in *transaction.UpdateCallbackListByRequestIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateCallbackListByRequestId"))
defer db.Close()
return p.updateCallbackListByRequestId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateFromAddress(ctx context.Context, in *transaction.CreateFromAddressInput, out *transaction.FromAddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateFromAddress"))
defer db.Close()
return p.createFromAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetFromAddress(ctx context.Context, in *transaction.GetFromAddressInput, out *transaction.FromAddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetFromAddress"))
defer db.Close()
return p.getFromAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetFromAddressList(ctx context.Context, in *transaction.Empty, out *transaction.FromAddressListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetFromAddressList"))
defer db.Close()
return p.getFromAddressList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveFromAddress(ctx context.Context, in *transaction.RemoveFromAddressInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveFromAddress"))
defer db.Close()
return p.removeFromAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateFromAddress(ctx context.Context, in *transaction.UpdateFromAddressInput, out *transaction.FromAddressOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateFromAddress"))
defer db.Close()
return p.updateFromAddress(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateHotAccount(ctx context.Context, in *transaction.CreateHotAccountInput, out *transaction.HotAccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateHotAccount"))
defer db.Close()
return p.createHotAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetHotAccount(ctx context.Context, in *transaction.GetHotAccountInput, out *transaction.HotAccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetHotAccount"))
defer db.Close()
return p.getHotAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetHotAccountList(ctx context.Context, in *transaction.Empty, out *transaction.HotAccountListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetHotAccountList"))
defer db.Close()
return p.getHotAccountList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveHotAccount(ctx context.Context, in *transaction.RemoveHotAccountInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveHotAccount"))
defer db.Close()
return p.removeHotAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateHotAccount(ctx context.Context, in *transaction.UpdateHotAccountInput, out *transaction.HotAccountOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateHotAccount"))
defer db.Close()
return p.updateHotAccount(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateHotWithdraw(ctx context.Context, in *transaction.CreateHotWithdrawInput, out *transaction.HotWithdrawOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateHotWithdraw"))
defer db.Close()
return p.createHotWithdraw(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetHotWithdraw(ctx context.Context, in *transaction.GetHotWithdrawInput, out *transaction.HotWithdrawOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetHotWithdraw"))
defer db.Close()
return p.getHotWithdraw(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetHotWithdrawByMerchantId(ctx context.Context, in *transaction.GetHotWithdrawByMerchantIdInput, out *transaction.HotWithdrawOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetHotWithdrawByMerchantId"))
defer db.Close()
return p.getHotWithdrawByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetHotWithdrawList(ctx context.Context, in *transaction.Empty, out *transaction.HotWithdrawListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetHotWithdrawList"))
defer db.Close()
return p.getHotWithdrawList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveHotWithdraw(ctx context.Context, in *transaction.RemoveHotWithdrawInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveHotWithdraw"))
defer db.Close()
return p.removeHotWithdraw(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveHotWithdrawByMerchantId(ctx context.Context, in *transaction.RemoveHotWithdrawByMerchantIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveHotWithdrawByMerchantId"))
defer db.Close()
return p.removeHotWithdrawByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateHotWithdraw(ctx context.Context, in *transaction.UpdateHotWithdrawInput, out *transaction.HotWithdrawOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateHotWithdraw"))
defer db.Close()
return p.updateHotWithdraw(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateHotWithdrawByMerchantId(ctx context.Context, in *transaction.UpdateHotWithdrawByMerchantIdInput, out *transaction.HotWithdrawOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateHotWithdrawByMerchantId"))
defer db.Close()
return p.updateHotWithdrawByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateMember(ctx context.Context, in *transaction.CreateMemberInput, out *transaction.MemberOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateMember"))
defer db.Close()
return p.createMember(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMember(ctx context.Context, in *transaction.GetMemberInput, out *transaction.MemberOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMember"))
defer db.Close()
return p.getMember(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMemberList(ctx context.Context, in *transaction.Empty, out *transaction.MemberListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMemberList"))
defer db.Close()
return p.getMemberList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMember(ctx context.Context, in *transaction.RemoveMemberInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMember"))
defer db.Close()
return p.removeMember(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMember(ctx context.Context, in *transaction.UpdateMemberInput, out *transaction.MemberOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMember"))
defer db.Close()
return p.updateMember(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateMerchant(ctx context.Context, in *transaction.CreateMerchantInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateMerchant"))
defer db.Close()
return p.createMerchant(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchant(ctx context.Context, in *transaction.GetMerchantInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchant"))
defer db.Close()
return p.getMerchant(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantByClientId(ctx context.Context, in *transaction.GetMerchantByClientIdInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantByClientId"))
defer db.Close()
return p.getMerchantByClientId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantByOpenId(ctx context.Context, in *transaction.GetMerchantByOpenIdInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantByOpenId"))
defer db.Close()
return p.getMerchantByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantList(ctx context.Context, in *transaction.Empty, out *transaction.MerchantListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantList"))
defer db.Close()
return p.getMerchantList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMerchant(ctx context.Context, in *transaction.RemoveMerchantInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMerchant"))
defer db.Close()
return p.removeMerchant(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMerchantByClientId(ctx context.Context, in *transaction.RemoveMerchantByClientIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMerchantByClientId"))
defer db.Close()
return p.removeMerchantByClientId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMerchantByOpenId(ctx context.Context, in *transaction.RemoveMerchantByOpenIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMerchantByOpenId"))
defer db.Close()
return p.removeMerchantByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMerchant(ctx context.Context, in *transaction.UpdateMerchantInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMerchant"))
defer db.Close()
return p.updateMerchant(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMerchantByClientId(ctx context.Context, in *transaction.UpdateMerchantByClientIdInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMerchantByClientId"))
defer db.Close()
return p.updateMerchantByClientId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMerchantByOpenId(ctx context.Context, in *transaction.UpdateMerchantByOpenIdInput, out *transaction.MerchantOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMerchantByOpenId"))
defer db.Close()
return p.updateMerchantByOpenId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateMerchantToken(ctx context.Context, in *transaction.CreateMerchantTokenInput, out *transaction.MerchantTokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateMerchantToken"))
defer db.Close()
return p.createMerchantToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantToken(ctx context.Context, in *transaction.GetMerchantTokenInput, out *transaction.MerchantTokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantToken"))
defer db.Close()
return p.getMerchantToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantTokenByMerchantId(ctx context.Context, in *transaction.GetMerchantTokenByMerchantIdInput, out *transaction.MerchantTokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantTokenByMerchantId"))
defer db.Close()
return p.getMerchantTokenByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetMerchantTokenList(ctx context.Context, in *transaction.Empty, out *transaction.MerchantTokenListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetMerchantTokenList"))
defer db.Close()
return p.getMerchantTokenList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMerchantToken(ctx context.Context, in *transaction.RemoveMerchantTokenInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMerchantToken"))
defer db.Close()
return p.removeMerchantToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveMerchantTokenByMerchantId(ctx context.Context, in *transaction.RemoveMerchantTokenByMerchantIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveMerchantTokenByMerchantId"))
defer db.Close()
return p.removeMerchantTokenByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMerchantToken(ctx context.Context, in *transaction.UpdateMerchantTokenInput, out *transaction.MerchantTokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMerchantToken"))
defer db.Close()
return p.updateMerchantToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateMerchantTokenByMerchantId(ctx context.Context, in *transaction.UpdateMerchantTokenByMerchantIdInput, out *transaction.MerchantTokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateMerchantTokenByMerchantId"))
defer db.Close()
return p.updateMerchantTokenByMerchantId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateNetworkFee(ctx context.Context, in *transaction.CreateNetworkFeeInput, out *transaction.NetworkFeeOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateNetworkFee"))
defer db.Close()
return p.createNetworkFee(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetNetworkFee(ctx context.Context, in *transaction.GetNetworkFeeInput, out *transaction.NetworkFeeOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetNetworkFee"))
defer db.Close()
return p.getNetworkFee(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetNetworkFeeByTransactionId(ctx context.Context, in *transaction.GetNetworkFeeByTransactionIdInput, out *transaction.NetworkFeeOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetNetworkFeeByTransactionId"))
defer db.Close()
return p.getNetworkFeeByTransactionId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetNetworkFeeList(ctx context.Context, in *transaction.Empty, out *transaction.NetworkFeeListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetNetworkFeeList"))
defer db.Close()
return p.getNetworkFeeList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveNetworkFee(ctx context.Context, in *transaction.RemoveNetworkFeeInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveNetworkFee"))
defer db.Close()
return p.removeNetworkFee(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveNetworkFeeByTransactionId(ctx context.Context, in *transaction.RemoveNetworkFeeByTransactionIdInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveNetworkFeeByTransactionId"))
defer db.Close()
return p.removeNetworkFeeByTransactionId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateNetworkFee(ctx context.Context, in *transaction.UpdateNetworkFeeInput, out *transaction.NetworkFeeOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateNetworkFee"))
defer db.Close()
return p.updateNetworkFee(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateNetworkFeeByTransactionId(ctx context.Context, in *transaction.UpdateNetworkFeeByTransactionIdInput, out *transaction.NetworkFeeOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateNetworkFeeByTransactionId"))
defer db.Close()
return p.updateNetworkFeeByTransactionId(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateTicket(ctx context.Context, in *transaction.CreateTicketInput, out *transaction.TicketOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateTicket"))
defer db.Close()
return p.createTicket(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) DeleteTicket(ctx context.Context, in *transaction.DeleteTicketInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "DeleteTicket"))
defer db.Close()
return p.deleteTicket(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTicket(ctx context.Context, in *transaction.GetTicketInput, out *transaction.TicketOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTicket"))
defer db.Close()
return p.getTicket(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTicketList(ctx context.Context, in *transaction.Empty, out *transaction.TicketListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTicketList"))
defer db.Close()
return p.getTicketList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveTicket(ctx context.Context, in *transaction.RemoveTicketInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveTicket"))
defer db.Close()
return p.removeTicket(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateTicket(ctx context.Context, in *transaction.UpdateTicketInput, out *transaction.TicketOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateTicket"))
defer db.Close()
return p.updateTicket(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateToken(ctx context.Context, in *transaction.CreateTokenInput, out *transaction.TokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateToken"))
defer db.Close()
return p.createToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetToken(ctx context.Context, in *transaction.GetTokenInput, out *transaction.TokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetToken"))
defer db.Close()
return p.getToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTokenBySymbol(ctx context.Context, in *transaction.GetTokenBySymbolInput, out *transaction.TokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTokenBySymbol"))
defer db.Close()
return p.getTokenBySymbol(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTokenList(ctx context.Context, in *transaction.Empty, out *transaction.TokenListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTokenList"))
defer db.Close()
return p.getTokenList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveToken(ctx context.Context, in *transaction.RemoveTokenInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveToken"))
defer db.Close()
return p.removeToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveTokenBySymbol(ctx context.Context, in *transaction.RemoveTokenBySymbolInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveTokenBySymbol"))
defer db.Close()
return p.removeTokenBySymbol(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateToken(ctx context.Context, in *transaction.UpdateTokenInput, out *transaction.TokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateToken"))
defer db.Close()
return p.updateToken(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateTokenBySymbol(ctx context.Context, in *transaction.UpdateTokenBySymbolInput, out *transaction.TokenOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateTokenBySymbol"))
defer db.Close()
return p.updateTokenBySymbol(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) CreateTransaction(ctx context.Context, in *transaction.CreateTransactionInput, out *transaction.TransactionOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "CreateTransaction"))
defer db.Close()
return p.createTransaction(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTransaction(ctx context.Context, in *transaction.GetTransactionInput, out *transaction.TransactionOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTransaction"))
defer db.Close()
return p.getTransaction(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTransactionByChainTxidUnique(ctx context.Context, in *transaction.GetTransactionByChainTxidUniqueInput, out *transaction.TransactionOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTransactionByChainTxidUnique"))
defer db.Close()
return p.getTransactionByChainTxidUnique(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) GetTransactionList(ctx context.Context, in *transaction.Empty, out *transaction.TransactionListOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "GetTransactionList"))
defer db.Close()
return p.getTransactionList(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveTransaction(ctx context.Context, in *transaction.RemoveTransactionInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveTransaction"))
defer db.Close()
return p.removeTransaction(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) RemoveTransactionByChainTxidUnique(ctx context.Context, in *transaction.RemoveTransactionByChainTxidUniqueInput, out *transaction.Empty) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "RemoveTransactionByChainTxidUnique"))
defer db.Close()
return p.removeTransactionByChainTxidUnique(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateTransaction(ctx context.Context, in *transaction.UpdateTransactionInput, out *transaction.TransactionOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateTransaction"))
defer db.Close()
return p.updateTransaction(NewContext(ctx, db, logger), in, out)
}
func (p *Transaction) UpdateTransactionByChainTxidUnique(ctx context.Context, in *transaction.UpdateTransactionByChainTxidUniqueInput, out *transaction.TransactionOutput) error {
db := p.db.NewSession()
logger := p.logger.With(zap.String("caller", "UpdateTransactionByChainTxidUnique"))
defer db.Close()
return p.updateTransactionByChainTxidUnique(NewContext(ctx, db, logger), in, out)
}
|
package main
import "fmt"
import s "strings"
func main() {
messages := make(chan string)
var message string
fmt.Scanln(&message)
go func() { messages <- message }()
go func() {
msg := <- messages
if s.ToLower(msg) == "ping" {
fmt.Println("PONG")
}
}()
var exit string
fmt.Scanln(&exit)
}
|
package main
import (
"context"
"flag"
"fmt"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/sirupsen/logrus"
"golang.org/x/net/http2"
"golang.org/x/net/http2/h2c"
"google.golang.org/grpc/reflection"
"google.golang.org/protobuf/types/known/timestamppb"
"net/http"
"strings"
"time"
"github.com/kkweon/grpc-rest-via-gateway/gen/go/blog/v1"
"google.golang.org/grpc"
)
var port = flag.Int("port", 80, "--port 80")
type blogImpl struct {
posts []*v1.Post
v1.UnimplementedBlogServiceServer
}
func (b *blogImpl) CreatePost(ctx context.Context, request *v1.CreatePostRequest) (*v1.CreatePostResponse, error) {
post := &v1.Post{
Id: b.getNewId(),
Content: request.GetContent(),
CreatedAt: timestamppb.Now(),
}
b.posts = append(b.posts, post)
return &v1.CreatePostResponse{Post: post}, nil
}
func (b *blogImpl) GetPosts(ctx context.Context, request *v1.GetPostsRequest) (*v1.GetPostsResponse, error) {
if request.GetPostId() > 0 {
for _, post := range b.posts {
if post.GetId() == request.GetPostId() {
return &v1.GetPostsResponse{Posts: []*v1.Post{post}}, nil
}
}
return nil, fmt.Errorf("unable to find post_id = %d", request.GetPostId())
}
return &v1.GetPostsResponse{Posts: b.posts}, nil
}
func (b *blogImpl) DeletePost(ctx context.Context, request *v1.DeletePostRequest) (*v1.DeletePostResponse, error) {
for i, post := range b.posts {
if post.GetId() == request.GetPostId() {
b.posts = append(b.posts[:i], b.posts[i+1:]...)
return &v1.DeletePostResponse{}, nil
}
}
return nil, fmt.Errorf("unable to find post_id = %d", request.GetPostId())
}
func (b *blogImpl) getNewId() int64 {
return time.Now().UnixNano()
}
func allHandler(grpcServer *grpc.Server, httpHandler http.Handler) http.Handler {
return h2c.NewHandler(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
logrus.WithField("request", fmt.Sprintf("%+v", r)).Info("hit Handler")
if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
grpcServer.ServeHTTP(w, r)
} else {
httpHandler.ServeHTTP(w, r)
}
}), &http2.Server{})
}
func main() {
flag.Parse()
addr := fmt.Sprintf(":%d", *port)
logrus.WithField("addr", addr).Info("flag parsed")
grpcServer := grpc.NewServer()
v1.RegisterBlogServiceServer(grpcServer, &blogImpl{})
reflection.Register(grpcServer)
gwmux := runtime.NewServeMux()
err := v1.RegisterBlogServiceHandlerFromEndpoint(context.Background(), gwmux, addr, []grpc.DialOption{grpc.WithInsecure()})
if err != nil {
panic(err)
}
mux := http.NewServeMux()
mux.HandleFunc("/swagger.json", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "gen/openapiv2/blog/v1/blog.swagger.json")
})
mux.Handle("/swagger-ui/", http.StripPrefix("/swagger-ui/", http.FileServer(http.Dir("swagger-ui/dist"))))
mux.Handle("/", gwmux)
err = http.ListenAndServe(addr, allHandler(grpcServer, mux))
if err != nil {
panic(err)
}
}
|
package api
import (
"bytes"
"fmt"
"io"
"net/http"
"strconv"
"time"
"github.com/BurntSushi/toml"
"github.com/hashicorp/raft"
"github.com/robustirc/robustirc/internal/config"
"github.com/robustirc/robustirc/internal/robust"
)
func (api *HTTP) configRevision() uint64 {
i := api.ircServer()
i.ConfigMu.RLock()
defer i.ConfigMu.RUnlock()
return i.Config.Revision
}
func (api *HTTP) applyConfig(revision uint64, body string) error {
if got, want := revision, api.configRevision(); got != want {
return fmt.Errorf("Revision mismatch (got %d, want %d). Try again.", got, want)
}
msg := &robust.Message{
Type: robust.Config,
Data: body,
Revision: revision + 1,
}
return api.applyMessageWait(msg, 10*time.Second)
}
func (api *HTTP) handlePostConfig(w http.ResponseWriter, r *http.Request) {
revision, err := strconv.ParseUint(r.Header.Get("X-RobustIRC-Config-Revision"), 0, 64)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var unused config.Network
var body bytes.Buffer
if _, err := toml.DecodeReader(io.TeeReader(r.Body, &body), &unused); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if api.raftNode.State() != raft.Leader {
api.maybeProxyToLeader(w, r, nopCloser{&body})
return
}
if err := api.applyConfig(revision, body.String()); err != nil {
if err == raft.ErrNotLeader {
api.maybeProxyToLeader(w, r, nopCloser{&body})
return
}
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
|
package config
import (
"fmt"
)
var (
defaults = NewValues("defaults", nil)
)
//Set a default value to use if value is not found in any config engine
//Fails when already defined (which may be from a config engine or default previously set)
func SetDefault(name string, defaultValue interface{}) error {
if definedValue, ok := defined.Get(name); ok {
return fmt.Errorf("cannot set default for %s=(%T)%+v (already defined)", name, definedValue, definedValue)
}
if err := defaults.Set(name, defaultValue); err != nil {
return fmt.Errorf("failed to set default in defaults: %v", err)
}
return nil
}
|
package ankaboot
// Greatly inspired by Rob Pike's talk on Lexical Analysis in Go
// and borrowed a lot of the code from slide, to get it to work.
// Will require to modify the code to be more suitable for my own
// use case.
//
type itemType int
// Define all of the 'items' the lexer will need to lex.
const (
itemError itemType = iota // Error
itemEOF // End Of File
itemLTag // Left Tag <
itemRTag // Right Tag >
itemRCTag //Right Close tag /> [ <input />]
itemLCTag // Left Close tag </ [ <a></a>]
itemDocType // <!DOCTYPE html >
itemHTML // <HTML>
itemHead // <head>
itemMeta // <meta >
itemLink // <link />
itemTitle // <title>
itemStyle // <style>
itemBody // <body>
itemScript // <script>
itemFooter // <footer>
itemTable // <table>
itemTHead // <thead>
itemTBody // <tbody>
itemTh // <th>
itemTr // <tr>
itemTd // <td>
itemDiv // <div>
itemH1 // <h1>
itemH2 // <h2>
itemH3 // <h3>
itemH4 // <h4>
itemH5 // <h5>
itemH6 // <h6>
itemP // <p>
itemSpan // <span>
itemA // <a>
itemImg // <img />
itemUl // <ul>
itemOl // <ol>
itemLi // <li>
itemBr // <br />
itemHr // <hr />
itemText // the values wrapped by the elements
itemAttrId //
itemAttrClass //
itemAttrHref //
itemAttrSrc //
itemAttrType //
itemAttrName //
itemAttrValue //
itemAttrCol //
itemAttrRow //
)
// item struct that will be passed through lexer items channel to the
// parser.
type item struct {
typ itemType
val string
}
// lexer struct
type lexer struct {
name string
input string
start int
pos int
width int
items chan item
}
//stateFn represents a state of the scanner
// as a function that returns the next state (rob pike)
type stateFn func(*lexer) stateFn
// lex initialises the lexer
func lex(name, input string) (*lexer, chan item) {
l := &lexer{
name: name,
input: input,
items: make(chan item),
}
go l.run()
return l, l.items
}
// run lexer
func (l *lexer) run() {
// here will run
close(l.items)
}
// emit tokens
func (l *lexer) emit(t itemType) {
l.items <- item{t, l.input[l.start:l.pos]}
l.start = l.pos
}
//next moves to the next item in the slice
func (l *lexer) next() {
// next
}
//peek looks ahead 1 in a slice and then backs up.
func (l *lexer) peek() {
}
//backup goes back 1 in a slice
func (l *lexer) backup() {
}
//close open channel
func close(items chan item) {
// close channel
}
//lexInsideElement
func lexInsideElement() {
// lex inside the element
}
|
package api
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
)
type Book struct {
Title string `json:"title"`
Author string `json:"author"`
ISBN string `json:"isbn"`
}
var books map[string]Book
func ToJSON(b Book) ([]byte, error) {
return json.Marshal(b)
}
func FromJSON(bJson []byte) (*Book, error) {
book := &Book{}
err := json.Unmarshal(bJson, book)
if err != nil {
return nil, err
}
return book, nil
}
func AddBook(b *Book) error {
if _, ok := books[b.ISBN]; ok {
return errors.New("Book with ISBN already exists")
}
books[b.ISBN] = *b
return nil
}
func AllBooks() []Book {
ret := []Book{}
for _, book := range books {
ret = append(ret, book)
}
return ret
}
func UpdateBook(isbn string, book *Book) {
book.ISBN = isbn
books[isbn] = *book
}
func BooksHandler(w http.ResponseWriter, r *http.Request) {
switch method := r.Method; method {
case http.MethodGet:
booksJson, _ := json.Marshal(books)
w.Header().Add("content-type", "application/json")
w.Header().Add("charset", "utf-8")
w.Write(booksJson)
case http.MethodPost:
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
b, err := FromJSON(body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
err = AddBook(b)
if err != nil {
fmt.Printf("BooksHandler: Method: http.POST. isbn: %v already exists\n", b.ISBN)
w.WriteHeader(http.StatusConflict)
}
w.Header().Add("Location", "/api/books/"+b.ISBN)
w.WriteHeader(http.StatusCreated)
default:
w.WriteHeader(http.StatusBadRequest)
}
}
func BookHandler(w http.ResponseWriter, r *http.Request) {
isbn := r.URL.Path[len("/api/book/"):]
switch method := r.Method; method {
case http.MethodGet:
if book, ok := books[isbn]; ok {
bookJson, _ := json.Marshal(book)
w.Header().Add("content-type", "application/json")
w.Header().Add("charset", "utf-8")
w.Write(bookJson)
return
}
w.WriteHeader(http.StatusNotFound)
case http.MethodPut:
if _, ok := books[isbn]; ok {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
b, err := FromJSON(body)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
}
UpdateBook(isbn, b)
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusNotFound)
case http.MethodDelete:
if _, ok := books[isbn]; ok {
delete(books, isbn)
w.WriteHeader(http.StatusOK)
return
}
w.WriteHeader(http.StatusNotFound)
default:
w.WriteHeader(http.StatusBadRequest)
}
return
}
func init() {
books = map[string]Book{
"100001": {"Basics of C++", "Balaguruswamy", "100001"},
"100002": {"Data Structure and Algorithms", "Kosaraju", "100002"},
}
}
|
package models
import (
. "2019_2_IBAT/pkg/pkg/models"
"github.com/google/uuid"
)
type InChatMessage struct {
ChatID uuid.UUID `json:"chat_id" db:"id"`
OwnerInfo AuthStorageValue `json:"-" db:"-"`
Timestamp string `json:"timestamp" db:"id"`
Text string `json:"content" db:"id"`
}
type OutChatMessage struct {
ChatID uuid.UUID `json:"chat_id"`
OwnerId uuid.UUID `json:"owner_id"`
OwnerName string `json:"owner_name"`
Timestamp string `json:"created_at"`
Text string `json:"content"`
IsNotYours bool `json:"is_not_yours"`
}
type Chat struct {
ChatID uuid.UUID `json:"chat_id"`
CompanionName string `json:"companion_name"`
CompanionID uuid.UUID `json:"companion_id"`
}
|
package main
import (
"testing"
"github.com/ubinte/livego/app"
"github.com/ubinte/livego/protocol/rtmp"
)
func TestStartHttpflvServer(t *testing.T) {
app.AddApp("live").AddChannelKey("insecure_channel_key", "movie")
stream := rtmp.NewRtmpStream()
go StartRtmpServer(stream, ":1935") // push rtmp://127.0.0.1:1935/live/insecure_channel_key
StartHttpflvServer(stream, ":80") // pull http://127.0.0.1/live/movie.flv
}
|
package cmd
import (
"github.com/danhale-git/craft/craft"
"github.com/danhale-git/craft/internal/logger"
"github.com/spf13/cobra"
)
// NewListCmd returns the list command which lists running and backed up servers.
func NewListCmd() *cobra.Command {
listCmd := &cobra.Command{
Use: "list <server>",
Short: "List servers",
Args: cobra.NoArgs,
Run: func(cmd *cobra.Command, args []string) {
all, err := cmd.Flags().GetBool("all")
if err != nil {
panic(err)
}
if err := craft.PrintServers(all); err != nil {
logger.Error.Fatal(err)
}
},
}
listCmd.Flags().BoolP("all", "a", false,
"Show all servers. The Default is to show only running servers.")
return listCmd
}
|
package mutexrw
import (
"fmt"
"log"
"math/rand"
"runtime"
"sync"
"testing"
"time"
)
// counter 代表计数器
type counter struct {
num uint //计数
mu sync.RWMutex //读写锁
}
// number 会返回当前的计数
func (c *counter) number() uint {
c.mu.RLock()
defer c.mu.RUnlock()
return c.num
}
func (c *counter) add(increment uint) uint {
c.mu.Lock()
defer c.mu.Unlock()
c.num += increment
return c.num
}
func count(c *counter) {
// sign 用于传递信号
sign := make(chan struct{}, 3)
// 用于增加计数的函数
go func() {
defer func() {
sign <- struct{}{}
}()
for i := 0; i <= 10; i++ {
time.Sleep(time.Millisecond * 600)
c.add(1)
}
}()
go func() {
defer func() {
sign <- struct{}{}
}()
for j := 1; j <= 5; j++ {
time.Sleep(time.Millisecond * 300)
log.Printf("The number is counter:%d [%d-%d]", c.number(), 1, j)
}
}()
go func() {
defer func() {
sign <- struct{}{}
}()
for k := 1; k <= 5; k++ {
time.Sleep(time.Millisecond * 300)
log.Printf("The number is counter:%d [%d-%d]", c.number(), 2, k)
}
}()
<-sign
<-sign
<-sign
}
func redundantUnlock() {
var rwMu sync.RWMutex
// 实例1 解锁未锁定的互斥锁会立即引发 panic。会引发panic
// rwMu.Unlock()
// 实例2 原因同上
// rwMu.RUnlock()
//实例3
rwMu.RLock()
// rwMu.RUnlock() 重复解锁
rwMu.RUnlock()
// 实例4
rwMu.Lock()
// rwMu.Unlock()
rwMu.Unlock()
}
func TestMuntexRW(t *testing.T) {
c := counter{}
count(&c)
redundantUnlock()
}
func TestRWLock(t *testing.T) {
var rwm sync.RWMutex
for i := 0; i < 3; i++ {
go func(i int) {
fmt.Printf("Try to lock for reading... [%d]\n", i)
rwm.RLock()
fmt.Printf("Locked for reading. [%d]\n", i)
time.Sleep(time.Second * 2)
fmt.Printf("Try to unlock for reading... [%d]\n", i)
rwm.RUnlock()
fmt.Printf("Unlocked for reading. [%d]\n", i)
}(i)
}
time.Sleep(time.Millisecond * 100)
fmt.Println("Try to lock from writing...")
rwm.Lock()
fmt.Printf("Locked for writing.")
}
|
package reddit
import (
"net/http"
"io/ioutil"
"strings"
"encoding/json"
"log"
"fmt"
"os"
)
const (
RedditTokenEndpoint = "https://www.reddit.com/api/v1/access_token"
)
type Application struct {
ClientId string
ClientSecret string
UserAgent string
}
func NewApp(clientId string, clientSecret string, userAgent string) *Application {
return &Application{
ClientId : clientId,
ClientSecret : clientSecret,
UserAgent : userAgent,
}
}
func (app *Application) Auth() *Reddit {
// Prepare Request to get Auth Token
grantType := strings.NewReader("grant_type=client_credentials")
req, err := http.NewRequest("POST", RedditTokenEndpoint, grantType)
req.SetBasicAuth(app.ClientId, app.ClientSecret)
req.Header.Set("User-Agent", app.UserAgent)
client := &http.Client{}
// Send request
res, err := client.Do(req)
if err != nil {
log.Fatal(err)
}
// Check for valid response
if res.StatusCode != 200 {
log.Fatal("unable to auth client")
}
// Extract request body
body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatal(err)
}
var reddit Reddit
err = json.Unmarshal(body, &reddit)
if err != nil {
log.Fatal(err)
}
//Pass along the user agent
reddit.UserAgent = app.UserAgent
return &reddit
}
type Reddit struct {
AccessToken string `json:"access_token"`
UserAgent string
}
func (r *Reddit) GetPostComments(postId string) []Comment {
url := fmt.Sprintf("https://oauth.reddit.com/comments/%s?depth=0&sort=top&showmore=true",postId)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(2)
}
req.Header.Set("Authorization", "bearer " + r.AccessToken)
req.Header.Set("User-Agent", r.UserAgent)
client := &http.Client{}
res, err := client.Do(req)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(2)
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
os.Exit(2)
}
var jsonBody interface{}
err = json.Unmarshal(body, &jsonBody)
if err != nil {
log.Fatal(err)
}
comments := parseCommentJson(jsonBody)
return comments
}
func parseCommentJson(raw interface{}) []Comment {
comments := []Comment{}
array := raw.([]interface{})
for _, list := range array {
data := list.(map[string]interface{})["data"]
children := data.(map[string]interface{})["children"]
for _, child := range children.([]interface{}) {
childMap := child.(map[string]interface{})
kind := childMap["kind"].(string)
if kind == "t1" {
commentData := childMap["data"].(map[string]interface{})
commentDataString, err := json.Marshal(commentData)
if err != nil {
log.Fatal(err)
}
var comment Comment
err = json.Unmarshal(commentDataString, &comment)
if err != nil {
log.Fatal(err)
}
comments = append(comments, comment)
}
}
}
return comments
}
|
package quacktors
import (
"github.com/Azer0s/quacktors/mailbox"
"github.com/Azer0s/quacktors/metrics"
"github.com/opentracing/opentracing-go"
"sync"
)
//The Actor interface defines the methods a struct has to implement
//so it can be spawned by quacktors.
type Actor interface {
//Init is called when an Actor is initialized. It is
//guaranteed to be called before an Actor has been registered
//or even started. Typically, Init is used to start monitors
//to other actors or do some setup work. The caller
//function provides a Context to the Init function.
//Context can be used to interact with other actors
//(e.g. send, monitor, etc) or modify the current Actor
//(e.g. quit, defer actions, etc).
Init(ctx *Context)
//Run is called when an Actor receives a Message. The caller
//function provides both a Context as well as the actual
//Message to the Run function. Context can then be used to
//interact with other actors (e.g. send, monitor, etc) or
//modify the current Actor (e.g. quit, defer actions, etc).
Run(ctx *Context, message Message)
}
//The StatelessActor struct is the Actor implementation that is
//used when using Spawn or SpawnWithInit. As the name implies,
//the StatelessActor doesn't have a state and just requires one
//anonymous function as the initializer (for Init) and another one
//as the run function (for Run) to work.
//ReceiveFunction can be nil, InitFunction has to be set.
type StatelessActor struct {
InitFunction func(ctx *Context)
ReceiveFunction func(ctx *Context, message Message)
}
//Init initializes the StatelessActor by calling InitFunction if it
//is not nil. Init panics if ReceiveFunction is not set.
func (s *StatelessActor) Init(ctx *Context) {
if s.InitFunction != nil {
s.InitFunction(ctx)
}
if s.ReceiveFunction == nil {
panic("ReceiveFunction of a StatelessActor cannot be nil")
}
}
//Run forwards both the Message and the Context to the ReceiveFunction
//when the StatelessActor receives a message.
func (s *StatelessActor) Run(ctx *Context, message Message) {
s.ReceiveFunction(ctx, message)
}
func doSend(to *Pid, message Message, spanContext opentracing.SpanContext) {
returnChan := make(chan bool)
go func() {
defer func() {
if r := recover(); r != nil {
//This happens if we write to the messageChan while the actor or remote connection is being closed
metrics.RecordUnhandled(to.Id)
}
//As soon as we have put the message into the buffered messageChan, return
//This is to preserve message ordering
returnChan <- true
}()
if to.MachineId != machineId {
//Pid is not on this machine
m, ok := getMachine(to.MachineId)
if ok && m.connected {
m.messageChan <- remoteMessageTuple{
To: to,
Message: message,
SpanContext: spanContext,
}
metrics.RecordSendRemote(to.Id)
}
return
}
//If the actor has already quit, do nothing
if to.messageChan == nil {
//Maybe the current pid instance is just empty but the pid actually does exist on our local machine
//This can happen when you send the pid to a remote machine and receive it back
p, ok := getByPidId(to.Id)
if ok {
p.messageChan <- localMessage{
message: message,
spanContext: spanContext,
}
metrics.RecordSendLocal(p.Id)
}
return
}
to.messageChan <- localMessage{
message: message,
spanContext: spanContext,
}
metrics.RecordSendLocal(to.Id)
}()
<-returnChan
}
func recordDroppedMessages(pidId string, mb *mailbox.Mailbox) {
unreadMessages := mb.Len()
if unreadMessages != 0 {
//If we still have pending messages in the channel, these are marked as dropped
metrics.RecordDrop(pidId, mb.Len())
}
}
func startActor(actor Actor) *Pid {
quitChan := make(chan bool) //channel to quit
mb := mailbox.New() //message mailbox
monitorChan := make(chan *Pid) //channel to notify the actor of who wants to monitor it
demonitorChan := make(chan *Pid) //channel to notify the actor of who wants to unmonitor it
scheduled := make(map[string]chan bool)
monitorQuitChannels := make(map[string]chan bool)
pid := createPid(quitChan, mb.In(), monitorChan, demonitorChan, scheduled, monitorQuitChannels)
ctx := &Context{
self: pid,
Logger: contextLogger{pid: pid.Id},
sendLock: &sync.Mutex{},
deferred: make([]func(), 0),
traceFork: opentracing.FollowsFrom,
}
//Initialize the actor
actor.Init(ctx)
//If the init was successful, record the spawn
metrics.RecordSpawn(pid.Id)
logger.Info("starting actor",
"pid", pid.Id)
messageChan := mb.Out()
go func() {
defer func() {
//We don't want to forward a panic
if r := recover(); r != nil {
if _, ok := r.(quitAction); ok {
logger.Info("actor quit",
"pid", pid.Id)
} else {
//if we did pick up a panic, log it
logger.Warn("actor quit due to panic",
"pid", pid.Id,
"panic", r)
}
}
//We don't really care how the actor died, we just wanna know that it did
metrics.RecordDie(pid.Id)
recordDroppedMessages(pid.Id, mb)
if len(ctx.deferred) != 0 {
ctx.Logger.Debug("executing deferred actor actions")
for _, action := range ctx.deferred {
func() {
defer func() {
if r := recover(); r != nil {
//action failed but we want to ignore that
}
}()
action()
}()
}
ctx.deferred = make([]func(), 0)
}
pid.cleanup()
}()
for {
select {
case <-quitChan:
logger.Info("actor received quit event",
"pid", pid.Id)
return
case mi := <-messageChan:
metrics.RecordReceive(pid.Id)
m := mi.(localMessage)
if _, ok := m.message.(PoisonPill); ok && !ctx.passthroughPoisonPill {
logger.Info("actor received poison pill",
"pid", pid.Id)
//Quit actor on PoisonPill message
return
}
ctx.span = nil
func() {
if m.spanContext != nil && ctx.traceName != "" {
span := opentracing.GlobalTracer().StartSpan(ctx.traceName,
ctx.traceFork(m.spanContext))
span.SetTag("pid", pid.Id)
span.SetTag("machine_id", pid.MachineId)
ctx.span = span
defer span.Finish()
}
actor.Run(ctx, m.message)
}()
//Clean after run so the span won't be sent in any defers if the actor goes down right after
ctx.span = nil
case monitor := <-monitorChan:
logger.Info("actor received monitor request",
"pid", pid.Id,
"monitor_gpid", monitor.String())
pid.setupMonitor(monitor)
case monitor := <-demonitorChan:
logger.Info("actor received demonitor request",
"pid", pid.Id,
"monitor_gpid", monitor.String())
pid.removeMonitor(monitor)
}
}
}()
return pid
}
|
package session
import (
"go/internal/pkg/api/app/request"
"go/internal/pkg/response"
"net/http"
"github.com/gin-gonic/gin"
)
func (h *sessionHandler) DeleteHandler(ctx *gin.Context) {
var req request.SessionRequest
claimemail := ctx.GetString("userEmail")
req.Email = claimemail
if err := ctx.ShouldBindJSON(&req); err != nil {
response.NewErrorResponse(ctx, http.StatusBadRequest, err, "failed to delete session data")
return
}
resp, err := h.uc.DeleteUseCase(req)
if err != nil {
response.NewErrorResponse(ctx, http.StatusUnprocessableEntity, err, "failed to delete session data")
return
}
response.NewSuccessResponse(ctx, http.StatusOK, resp)
return
}
|
package templatescompiler_test
import (
"errors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
boshlog "github.com/cloudfoundry/bosh-agent/logger"
fakeblobs "github.com/cloudfoundry/bosh-agent/blobstore/fakes"
fakecmd "github.com/cloudfoundry/bosh-agent/platform/commands/fakes"
fakesys "github.com/cloudfoundry/bosh-agent/system/fakes"
fakebmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment/fakes"
fakebmtemp "github.com/cloudfoundry/bosh-micro-cli/templatescompiler/fakes"
bmdepl "github.com/cloudfoundry/bosh-micro-cli/deployment"
bmrel "github.com/cloudfoundry/bosh-micro-cli/release"
. "github.com/cloudfoundry/bosh-micro-cli/templatescompiler"
)
var _ = Describe("TemplatesCompiler", func() {
var (
templatesCompiler TemplatesCompiler
jobRenderer *fakebmtemp.FakeJobRenderer
compressor *fakecmd.FakeCompressor
blobstore *fakeblobs.FakeBlobstore
templatesRepo *fakebmtemp.FakeTemplatesRepo
fs *fakesys.FakeFileSystem
compileDir string
jobs []bmrel.Job
deployment bmdepl.Deployment
logger boshlog.Logger
)
BeforeEach(func() {
jobRenderer = fakebmtemp.NewFakeJobRenderer()
compressor = fakecmd.NewFakeCompressor()
compressor.CompressFilesInDirTarballPath = "fake-tarball-path"
blobstore = fakeblobs.NewFakeBlobstore()
fs = fakesys.NewFakeFileSystem()
templatesRepo = fakebmtemp.NewFakeTemplatesRepo()
deployment = fakebmdepl.NewFakeDeployment()
deployment.Properties["fake-property-key"] = "fake-property-value"
logger = boshlog.NewLogger(boshlog.LevelNone)
templatesCompiler = NewTemplatesCompiler(
jobRenderer,
compressor,
blobstore,
templatesRepo,
fs,
logger,
)
var err error
compileDir, err = fs.TempDir("bosh-micro-cli-tests")
Expect(err).ToNot(HaveOccurred())
fs.TempDirDir = compileDir
})
Context("with a job", func() {
BeforeEach(func() {
jobs = []bmrel.Job{
bmrel.Job{
Name: "fake-job-1",
ExtractedPath: "fake-extracted-path",
Templates: map[string]string{
"cpi.erb": "/bin/cpi",
},
},
}
blobstore.CreateBlobID = "fake-blob-id"
blobstore.CreateFingerprint = "fake-sha1"
record := TemplateRecord{
BlobID: "fake-blob-id",
BlobSHA1: "fake-sha1",
}
templatesRepo.SetSaveBehavior(jobs[0], record, nil)
})
It("renders job templates", func() {
fs.TempDirDir = "/fake-temp-dir"
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(jobRenderer.RenderInputs).To(ContainElement(
fakebmtemp.RenderInput{
SourcePath: "fake-extracted-path",
DestinationPath: "/fake-temp-dir",
Job: bmrel.Job{
Name: "fake-job-1",
Fingerprint: "",
SHA1: "",
ExtractedPath: "fake-extracted-path",
Templates: map[string]string{
"cpi.erb": "/bin/cpi",
},
PackageNames: nil,
Packages: nil,
Properties: nil,
},
Properties: map[string]interface{}{
"fake-property-key": "fake-property-value",
},
DeploymentName: "fake-deployment-name",
}),
)
})
It("cleans the temp folder to hold the compile result for job", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(fs.FileExists(compileDir)).To(BeFalse())
})
It("generates templates archive", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(compressor.CompressFilesInDirDir).To(Equal(compileDir))
Expect(compressor.CleanUpTarballPath).To(Equal("fake-tarball-path"))
})
It("saves archive in blobstore", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).ToNot(HaveOccurred())
Expect(blobstore.CreateFileName).To(Equal("fake-tarball-path"))
})
It("stores the compiled package blobID and fingerprint into the compile package repo", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).ToNot(HaveOccurred())
record := TemplateRecord{
BlobID: "fake-blob-id",
BlobSHA1: "fake-sha1",
}
Expect(templatesRepo.SaveInputs).To(ContainElement(
fakebmtemp.SaveInput{Job: jobs[0], Record: record},
))
})
Context("when creating compilation directory fails", func() {
BeforeEach(func() {
fs.TempDirError = errors.New("fake-tempdir-error")
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-tempdir-error"))
})
})
Context("when rendering fails", func() {
BeforeEach(func() {
jobRenderer.SetRenderBehavior(
"fake-extracted-path",
errors.New("fake-render-error"),
)
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-render-error"))
})
})
Context("when generating templates archive fails", func() {
BeforeEach(func() {
compressor.CompressFilesInDirErr = errors.New("fake-compress-error")
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-compress-error"))
})
})
Context("when saving to blobstore fails", func() {
BeforeEach(func() {
blobstore.CreateErr = errors.New("fake-blobstore-error")
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-blobstore-error"))
})
})
Context("when saving to templates repo fails", func() {
BeforeEach(func() {
record := TemplateRecord{
BlobID: "fake-blob-id",
BlobSHA1: "fake-sha1",
}
err := errors.New("fake-template-error")
templatesRepo.SetSaveBehavior(jobs[0], record, err)
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-template-error"))
})
})
Context("when one of the job fails to compile", func() {
BeforeEach(func() {
jobs = []bmrel.Job{
bmrel.Job{
Name: "fake-job-1",
ExtractedPath: "fake-extracted-path-1",
Templates: map[string]string{
"cpi.erb": "/bin/cpi",
},
},
bmrel.Job{
Name: "fake-job-2",
ExtractedPath: "fake-extracted-path-2",
Templates: map[string]string{
"cpi.erb": "/bin/cpi",
},
},
bmrel.Job{
Name: "fake-job-3",
ExtractedPath: "fake-extracted-path-3",
Templates: map[string]string{
"cpi.erb": "/bin/cpi",
},
},
}
jobRenderer.SetRenderBehavior(
"fake-extracted-path-1",
nil,
)
jobRenderer.SetRenderBehavior(
"fake-extracted-path-2",
errors.New("fake-render-2-error"),
)
record := TemplateRecord{
BlobID: "fake-blob-id",
BlobSHA1: "fake-sha1",
}
templatesRepo.SetSaveBehavior(jobs[0], record, nil)
templatesRepo.SetSaveBehavior(jobs[1], record, nil)
})
It("returns an error", func() {
err := templatesCompiler.Compile(jobs, deployment)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("fake-render-2-error"))
})
})
})
})
|
package web
import (
"ChangeInspector/logservice"
"encoding/json"
"net/http"
"github.com/gorilla/mux"
)
/*CommitsHandler ...*/
type CommitsHandler struct {
logService *logservice.LogService
}
func (handler CommitsHandler) register(router *mux.Router) {
router.HandleFunc("/commits/{hash}", func(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
hash := vars["hash"]
commit := handler.logService.GitLog.Commits[hash]
json.NewEncoder(w).Encode(commit)
})
}
|
/**
*
*
Given an array of positive integers arr, find a pattern of length m that is repeated k or more times.
A pattern is a subarray (consecutive sub-sequence) that consists of one or more values, repeated multiple times consecutively without overlapping. A pattern is defined by its length and the number of repetitions.
Return true if there exists a pattern of length m that is repeated k or more times, otherwise return false.
Example 1:
Input: arr = [1,2,4,4,4,4], m = 1, k = 3
Output: true
Explanation: The pattern (4) of length 1 is repeated 4 consecutive times. Notice that pattern can be repeated k or more times but not less.
Example 2:
Input: arr = [1,2,1,2,1,1,1,3], m = 2, k = 2
Output: true
Explanation: The pattern (1,2) of length 2 is repeated 2 consecutive times. Another valid pattern (2,1) is also repeated 2 times.
Example 3:
Input: arr = [1,2,1,2,1,3], m = 2, k = 3
Output: false
Explanation: The pattern (1,2) is of length 2 but is repeated only 2 times. There is no pattern of length 2 that is repeated 3 or more times.
Example 4:
Input: arr = [1,2,3,1,2], m = 2, k = 2
Output: false
Explanation: Notice that the pattern (1,2) exists twice but not consecutively, so it doesn't count.
Example 5:
Input: arr = [2,2,2,2], m = 2, k = 3
Output: false
Explanation: The only pattern of length 2 is (2,2) however it's repeated only twice. Notice that we do not count overlapping repetitions.
Constraints:
2 <= arr.length <= 100
1 <= arr[i] <= 100
1 <= m <= 100
2 <= k <= 100
*
*
*/
// 1,暴力法,参考js版本
// 2,很巧妙的重复次数,但是很容易出错
/* *
* 这里不能用 count的数量等于 m * k 来判断,实际上是可以的,但是会有额外的逻辑
* 不能用简单的count 初始值为 1
* 特殊情况 [1,2,1,2,1,3] 2, 3, 6个数字,这里中间的1,计算重复 ,还有其他边界条件
*
* */
func containsPattern(arr []int, m, k int) {
var count = 0
for i := 0; i < len(arr)-m; i++ {
if arr[i] == arr[i+m] {
count = count + 1
} else {
count = 0
}
if count == (k-1)*m {
return true
}
return false
}
} |
package main
import (
"context"
"fmt"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_logrus "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus"
grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery"
grpc_ctxtags "github.com/grpc-ecosystem/go-grpc-middleware/tags"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"google.golang.org/grpc/reflection"
"net"
"net/http"
"os"
"path"
"runtime"
calculator "github.com/radutopala/grpc-calculator/api"
"github.com/radutopala/grpc-calculator/service"
jaeger_metrics "github.com/uber/jaeger-lib/metrics"
grpc_runtime "github.com/grpc-ecosystem/grpc-gateway/runtime"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/uber/jaeger-client-go/config"
"github.com/uber/jaeger-client-go/rpcmetrics"
prometheus_metrics "github.com/uber/jaeger-lib/metrics/prometheus"
log "github.com/sirupsen/logrus"
"github.com/urfave/cli"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// Panic handler prints the stack trace when recovering from a panic.
var panicHandler = grpc_recovery.RecoveryHandlerFunc(func(p interface{}) error {
buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
log.Errorf("panic recovered: %+v", string(buf))
return status.Errorf(codes.Internal, "%s", p)
})
func main() {
app := cli.NewApp()
app.Name = path.Base(os.Args[0])
app.Usage = "Calculator Server"
app.Version = "0.0.1"
app.Flags = flags
app.Action = start
if err := app.Run(os.Args); err != nil {
log.Fatal(err)
}
}
func start(c *cli.Context) {
lis, err := net.Listen("tcp", c.String("bind-grpc"))
if err != nil {
log.Fatalf("Failed to listen: %v", c.String("bind-grpc"))
}
// Logrus
logger := log.NewEntry(log.New())
grpc_logrus.ReplaceGrpcLogger(logger)
log.SetLevel(log.InfoLevel)
// Prometheus monitoring
metrics := prometheus_metrics.New()
// Jaeger tracing
cfg := config.Configuration{
Sampler: &config.SamplerConfig{
Type: "const",
Param: c.Float64("jaeger-sampler"),
},
Reporter: &config.ReporterConfig{
LocalAgentHostPort: c.String("jaeger-host") + ":" + c.String("jaeger-port"),
},
}
tracer, closer, err := cfg.NewTracer(
config.Logger(jaegerLoggerAdapter{logger}),
config.Observer(rpcmetrics.NewObserver(metrics.Namespace(jaeger_metrics.NSOptions{Name: "calculator"}), rpcmetrics.DefaultNameNormalizer)),
)
if err != nil {
logger.Fatalf("Cannot initialize Jaeger Tracer %s", err)
}
defer func() {
_ = closer.Close()
}()
// Set GRPC Interceptors
server := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
grpc_ctxtags.StreamServerInterceptor(grpc_ctxtags.WithFieldExtractor(grpc_ctxtags.CodeGenRequestFieldExtractor)),
grpc_opentracing.StreamServerInterceptor(grpc_opentracing.WithTracer(tracer)),
grpc_prometheus.StreamServerInterceptor,
grpc_logrus.StreamServerInterceptor(logger),
grpc_recovery.StreamServerInterceptor(grpc_recovery.WithRecoveryHandler(panicHandler)),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
grpc_ctxtags.UnaryServerInterceptor(grpc_ctxtags.WithFieldExtractor(grpc_ctxtags.CodeGenRequestFieldExtractor)),
grpc_opentracing.UnaryServerInterceptor(grpc_opentracing.WithTracer(tracer)),
grpc_prometheus.UnaryServerInterceptor,
grpc_logrus.UnaryServerInterceptor(logger),
grpc_recovery.UnaryServerInterceptor(grpc_recovery.WithRecoveryHandler(panicHandler)),
)),
)
// Register Calculator service, prometheus and HTTP service handler
calculator.RegisterServiceServer(server, &service.Service{})
reflection.Register(server)
grpc_prometheus.Register(server)
go func() {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
_ = http.ListenAndServe(c.String("bind-prometheus-http"), mux)
}()
log.Println("Starting Calculator service..")
go func() {
_ = server.Serve(lis)
}()
conn, err := grpc.Dial(c.String("bind-grpc"), grpc.WithInsecure())
if err != nil {
panic("Couldn't contact grpc server")
}
mux := grpc_runtime.NewServeMux()
err = calculator.RegisterServiceHandler(context.Background(), mux, conn)
if err != nil {
panic("Cannot serve http api")
}
_ = http.ListenAndServe(c.String("bind-http"), mux)
}
type jaegerLoggerAdapter struct {
logger *log.Entry
}
func (l jaegerLoggerAdapter) Error(msg string) {
l.logger.Error(msg)
}
func (l jaegerLoggerAdapter) Infof(msg string, args ...interface{}) {
l.logger.Info(fmt.Sprintf(msg, args...))
}
var flags = []cli.Flag{
cli.StringFlag{
Name: "bind-http",
Usage: "bind address for HTTP",
EnvVar: "BIND_HTTP",
Value: ":8080",
},
cli.StringFlag{
Name: "bind-grpc",
Usage: "bind address for gRPC",
EnvVar: "BIND_GRPC",
Value: ":2338",
},
cli.StringFlag{
Name: "bind-prometheus-http",
Usage: "bind prometheus address for HTTP",
EnvVar: "BIND_PROMETHEUS_HTTP",
Value: ":8081",
},
cli.StringFlag{
Name: "jaeger-host",
Usage: "Jaeger hostname",
EnvVar: "JAEGER_HOST",
Value: "127.0.0.1",
},
cli.IntFlag{
Name: "jaeger-port",
Usage: "Jaeger port",
EnvVar: "JAEGER_PORT",
Value: 5775,
},
cli.Float64Flag{
Name: "jaeger-sampler",
Usage: "Jaeger sampler",
EnvVar: "JAEGER_SAMPLER",
Value: 0.05,
},
cli.StringFlag{
Name: "jaeger-tags",
Usage: "Jaeger tags",
EnvVar: "JAEGER_TAGS",
Value: "calculator",
},
}
|
package longestCommonPrefix
func longestCommonPrefix(strs []string) string {
if len(strs) == 0 {
return ""
}
if len(strs) == 1 {
return strs[0]
}
ret := strs[0]
last := len(ret)
label:
for _, str := range strs[1:] {
i := 0
for ; i < last && i < len(str); i++ {
if str[i] != ret[i] {
last = i
continue label
}
}
if i != last {
last = i
}
}
return ret[:last]
}
|
/////////////////////////////////////////////////////////////////////
// arataca89@gmail.com
// 20210417
//
// func Count(s, substr string) int
//
// Retorna o número de ocorrências de substr em s.
// Se substr for "" retorna 1 + o número de Unicode code points em s.
//
// Fonte: https://golang.org/pkg/strings/
//
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Count("cheese", "e")) // 3
fmt.Println(strings.Count("cheese", "z")) // 0
fmt.Println(strings.Count("five", "")) // 5
}
|
package duplicateobj
import (
"fmt"
"gopkg.in/oleiade/reflections.v1"
)
func getObjFieldString(fieldsToExtract []string, object interface{}) string {
output := ""
for _, fieldName := range fieldsToExtract {
value, _ := reflections.GetField(object, fieldName)
output = output + ">>" + fmt.Sprintf("%v", value)
}
return output
}
// FindDuplicateObj - reads each object and detects objects with the same fields
func FindDuplicateObj(fieldsToExtract []string, objects []interface{}) []int {
unique := make(map[string]bool)
repeatIndexes := []int{}
for index, object := range objects {
name := getObjFieldString(fieldsToExtract, object)
_, alreadyExists := unique[name]
if alreadyExists {
repeatIndexes = append(repeatIndexes, index)
} else {
unique[name] = true
}
}
return repeatIndexes
}
|
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package redis
import (
"errors"
"github.com/garyburd/redigo/redis"
"github.com/goharbor/harbor/src/common/utils"
"time"
)
var (
// ErrUnLock ...
ErrUnLock = errors.New("error to release the redis lock")
)
const (
unlockScript = `
if redis.call("get",KEYS[1]) == ARGV[1] then
return redis.call("del",KEYS[1])
else
return 0
end
`
defaultDelay = 5 * time.Second
defaultMaxRetry = 5
defaultExpiry = 600 * time.Second
)
// Mutex ...
type Mutex struct {
Conn redis.Conn
key string
value string
opts Options
}
// New ...
func New(conn redis.Conn, key, value string) *Mutex {
o := *DefaultOptions()
if value == "" {
value = utils.GenerateRandomString()
}
return &Mutex{conn, key, value, o}
}
// Require retry to require the lock
func (rm *Mutex) Require() (bool, error) {
var isRequired bool
var err error
for i := 0; i < rm.opts.maxRetry; i++ {
isRequired, err = rm.require()
if isRequired {
break
}
if err != nil || !isRequired {
time.Sleep(rm.opts.retryDelay)
}
}
return isRequired, err
}
// require get the redis lock, for details, just refer to https://redis.io/topics/distlock
func (rm *Mutex) require() (bool, error) {
reply, err := redis.String(rm.Conn.Do("SET", rm.key, rm.value, "NX", "PX", int(rm.opts.expiry/time.Millisecond)))
if err != nil {
return false, err
}
return reply == "OK", nil
}
// Free releases the lock, for details, just refer to https://redis.io/topics/distlock
func (rm *Mutex) Free() (bool, error) {
script := redis.NewScript(1, unlockScript)
resp, err := redis.Int(script.Do(rm.Conn, rm.key, rm.value))
if err != nil {
return false, err
}
if resp == 0 {
return false, ErrUnLock
}
return true, nil
}
// Options ...
type Options struct {
retryDelay time.Duration
expiry time.Duration
maxRetry int
}
// DefaultOptions ...
func DefaultOptions() *Options {
opt := &Options{
retryDelay: defaultDelay,
expiry: defaultExpiry,
maxRetry: defaultMaxRetry,
}
return opt
}
|
package main
import "fmt"
type Invoker interface {
// 需要实现Call方法
Call(interface{})
}
type FuncCaller func(interface{})
// 实现 Invoker
func (f FuncCaller) Call(p interface{}) {
f(p)
}
var invoker Invoker
func main() {
invoker = FuncCaller(func(v interface{}) {
fmt.Println("from function", v)
})
invoker.Call("Hello")
}
|
package config
import(
"encoding/json"
"io/ioutil"
"util"
"runtime"
"path/filepath"
"fmt"
//"os"
)
type ServiceAPI struct {
Key string `json: "key"`
Method string `json: "method"`
Uri string `json: "uri"`
Data string `json: "data"`
}
type ServiceItem struct {
Id string `json: "id"`
Host string `json: "host"`
Apis [] ServiceAPI `json: "apis"`
}
type ServiceConfig struct {
Services [] ServiceItem `json: "services"`
}
type ServiceConfigManager struct {
config ServiceConfig
}
func (m *ServiceConfigManager) Parse(filename string) {
//dir, err := os.Getwd()
//if err != nil {
// fmt.Println(err)
//}
//fmt.Println("dir:", dir)
chunks, err := ioutil.ReadFile(filename)
//util.CheckError(err)
if err != nil {
//fmt.Println("Cannot read file:", filename, err)
panic(err)
}
err = json.Unmarshal(chunks, &m.config)
util.CheckError(err)
}
func (m *ServiceConfigManager) GetService(id string) (ServiceItem, bool) {
var item ServiceItem
var ok bool = false
items := m.config.Services
for _, v := range items {
if v.Id == id {
item = v
ok = true
break
}
}
return item, ok
}
func (m *ServiceConfigManager) GetApi(id, key string) ServiceAPI {
item, isExist := m.GetService(id)
var api ServiceAPI
if isExist {
for _, v := range item.Apis {
if v.Key == key {
api = v
api.Uri = item.Host + api.Uri
}
}
}
return api
}
func NewServiceConfigManager() *ServiceConfigManager{
pc, filename, line, ok := runtime.Caller(0)
if pc < 0 || line < 0 || !ok {
fmt.Println("Cannot read the serviceconfig.json")
util.NewLog().Error("Cannot read the file serverconfig.json")
}
filename = filepath.Dir(filename) + "/" + "serviceconfig.json"
m := new(ServiceConfigManager)
//m.Parse("./serviceconfig.json")
m.Parse(filename)
return m
}
|
/*
A simple traceroute program written in Go.
*/
package main
import (
"flag"
"fmt"
"net"
"os"
"syscall"
"time"
)
const (
HOST = "0.0.0.0"
SEND_PORT = 33333
RECV_PORT = 0
TIMEOUT = 5000
)
type ReturnArgs struct {
ok bool
done bool
addr string
ip string
elapsed float64
}
func main() {
flag.Usage = func() {
fmt.Printf("Usage:\n sudo ./traceroute [options] <domain/ip>\nOptions:\n")
flag.PrintDefaults()
}
maxTTL := flag.Int("m", 30, "Set the max number of hops (max TTL to be reached). Default is 30")
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
return
}
host := flag.Args()[0]
addrs, err := net.LookupHost(host)
if err != nil {
exitWithError(err)
}
addr := addrs[0]
fmt.Printf("traceroute to %v (%v), %v hops max\n", host, addr, *maxTTL)
traceroute(addr, *maxTTL)
}
func traceroute(ip string, maxTTL int) {
addr := toAddr(ip, SEND_PORT)
done := false
for ttl := 1; ttl <= maxTTL; ttl++ {
info := fmt.Sprintf("%v ", ttl)
for i := 0; i < 3; i++ {
rr := traceOne(addr, ttl)
if rr.done {
done = true // use break TAG?
}
if rr.ok {
info += fmt.Sprintf("%v(%v) %vms", rr.addr, rr.ip, rr.elapsed)
} else {
info += "*"
}
if i != 2 {
info += " "
}
}
fmt.Println(info)
if done {
break
}
}
}
func traceOne(addr *syscall.SockaddrInet4, ttl int) *ReturnArgs {
cli, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_UDP)
if err != nil {
exitWithError(err)
}
srv, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_RAW, syscall.IPPROTO_ICMP)
if err != nil {
exitWithError(err)
}
defer syscall.Close(cli)
defer syscall.Close(srv)
// set ttl, stolen from somewhere else...
// https://github.com/aeden/traceroute/blob/master/traceroute.go#L195
if err := syscall.SetsockoptInt(cli, syscall.SOL_IP, syscall.IP_TTL, ttl); err != nil {
exitWithError(err)
}
// set timeout, stolen from somewhere else...
// https://github.com/aeden/traceroute/blob/master/traceroute.go#L197
tv := syscall.NsecToTimeval(1e6 * TIMEOUT)
if err := syscall.SetsockoptTimeval(srv, syscall.SOL_SOCKET, syscall.SO_RCVTIMEO, &tv); err != nil {
exitWithError(err)
}
if err := syscall.Bind(srv, toAddr(HOST, RECV_PORT)); err != nil {
exitWithError(err)
}
rr := &ReturnArgs{}
start := time.Now()
if err := syscall.Sendto(cli, makeICMP(), 0, addr); err != nil {
return rr
}
buf := make([]byte, 512)
_, from, err := syscall.Recvfrom(srv, buf, 0)
if err != nil {
return rr
}
rr.elapsed = float64(time.Since(start).Nanoseconds()) / 1e6
t, c := parseICMP(buf)
if t == 3 && c == 3 { // Destination port unreachable, type==3 && code==3
rr.done = true
} else if t != 11 { // Time Exceeded, type==11 && code in (0,1)
return rr
}
rr.ok = true
rr.ip = toStr(from)
addrs, err := net.LookupAddr(rr.ip)
if err != nil {
rr.addr = rr.ip
} else {
rr.addr = addrs[0]
}
return rr
}
func exitWithError(err error) {
fmt.Printf("%v\n", err)
os.Exit(1)
}
func toStr(addr syscall.Sockaddr) string {
b := addr.(*syscall.SockaddrInet4).Addr
return fmt.Sprintf("%v.%v.%v.%v", b[0], b[1], b[2], b[3])
}
func toAddr(addr string, port int) *syscall.SockaddrInet4 {
b := net.ParseIP(addr).To4()
return &syscall.SockaddrInet4{
Port: port,
Addr: [4]byte{b[0], b[1], b[2], b[3]},
}
}
func parseICMP(value []byte) (int, int) {
// 20bytes IP header
v := value[20:22]
return int(v[0]), int(v[1])
}
func makeICMP() []byte {
icmp := []byte{
8, 0, // echo request, 8bit: type=8, 8bit: code=0
0, 0, // 16bit: check sum=0(init)
0, 0, 0, 0, // 32bit: not used=0
}
cs := checkSum(icmp)
icmp[2] = byte(cs)
icmp[3] = byte(cs >> 8)
return icmp
}
func checkSum(value []byte) uint16 {
sum := uint32(0)
for i, n := 0, len(value); i < n; i += 2 {
sum += uint32(value[i+1]<<8) + uint32(value[i])
}
sum = (sum >> 16) + (sum & 0xffff)
sum += (sum >> 16)
return uint16(^sum)
}
|
package main
import (
// "log"
"fmt"
"net/http"
)
func HandleShutdown(w http.ResponseWriter, r *http.Request) {
fmt.Fprint(w, "Handle shutdown")
}
|
package main
import (
"html/template"
)
var userInfoTemplate = template.Must(template.New("").Parse(`
<html><body>
This app is now authenticated to access your Google user info. Your details are:<br />
{{.}}
</body></html>
`))
type ErrorPage struct {
Code int
Message interface{}
}
var errorTemplate = template.Must(template.New("").Parse(`
<html><body>
<h2>This app is crashed with error:</h2>
<h2>Code: {{.Code}}<br>
Message: «{{.Message}}»
</h2>
<a href="/">return to main page</a>
</body></html>
`))
type LoginPage struct {
GoogleUrl string
Admin string
}
var loginTemplate = template.Must(template.New("").Parse(`
<!DOCTYPE html>
<html>
<head><title>Login page</title></head>
<body>
<div style="text-align: center; font-size: 80%; font-family: Arial, sans-serif">
<p><a href="{{.GoogleUrl}}">Log in</a> with your Google account</p>
<p style="margin-top: 3em">
For access, contact with <a href="{{.Admin}}">administrator</a>.
</p>
:D
</div>
</body>
</html>
`))
|
package golang
const (
AuthService = "examples.blog.service.auth"
UserService = "examples.blog.service.user"
)
const (
ApiGateway = "examples.blog.api.gateway"
) |
package helpers
import (
"math"
"strconv"
)
func GetAvatar2(level int) string {
ret := "avatar2"
n100 := math.Floor(float64(level)/100) * 100
if n100 >= 100 {
ret += " lvl_" + strconv.FormatFloat(n100, 'f', 0, 64)
n10 := math.Floor(float64(level)/10) * 10
n10String := strconv.FormatFloat(n10, 'f', 0, 64)
n10String = n10String[len(n10String)-2:]
if n10String != "00" {
ret += " lvl_plus_" + n10String
}
}
return ret
}
|
package main
import (
"crypto/ecdsa"
"crypto/rand"
"errors"
"fmt"
"math/big"
"testing"
)
const (
TEST_PRIVATE_KEY = "fe90f04022ee37dfb4ccae2c9d2610932a1c7bd8f92b0a2e05cf8c7031ad5b1c"
TEST_PUBLIC_KEY = "023f00e77837b341841f587385594951d65179364c2d435d44457df19797012975"
)
func TestSignatureCommand(t *testing.T) {
command := SignatureCommand{}
command.Type = TYPE_REGISTER_SIGNATURE
command.Username = "TestUser"
command.ImageName = "TestImage"
command.TagName = "TestTag"
command.KeyId = 0
command.Digests = append(command.Digests, "sha256:eed4da4937cb562e9005f3c66eb8c3abc14bb95ad497c03dc89d66bcd172fc7f")
command.Digests = append(command.Digests, "sha256:b6ca02dfe5e62c58dacb1dec16eb42ed35761c15562485f9da9364bb7c90b9b3")
command.Digests = append(command.Digests, "sha256:afbfb84fad8a4d9a9818efcb4f084bde4b14934c91a531ae344814a5191a2eb6")
command.Digests = append(command.Digests, "sha256:41829a143bccc7bee3849353e1ce4358d4cf1c7481f236d8194ea36113bf73d9")
command.Digests = append(command.Digests, "sha256:86726b2a83fc76e0eeabb1ae34b1a128e1bb41d1efc2242c0a39cd7b94ca2b98")
command.Digests = append(command.Digests, "sha256:5b0e63da2fb4bc9917bb6d00046cc7db229c15649cabe61439a6b2c0f01c5bf9")
command.Digests = append(command.Digests, "sha256:c418a51fc08117c4b436d1c4317dc4c567f1874abb9d7297b8d794ce3ac59407")
command.Digests = append(command.Digests, "sha256:eae6e471b603f3df6028a2a932a211707ce4032ab223e352b82ef62821d30a4c")
var setSuccess bool
privateKey := new(ecdsa.PrivateKey)
privateKey.D, setSuccess = new(big.Int).SetString(TEST_PRIVATE_KEY, 16)
if !setSuccess {
t.Error(errors.New("Private Key is invalid."))
}
privateKey.PublicKey.Curve = secp256k1Curve
r, s, err := ecdsa.Sign(rand.Reader, privateKey, command.BytesForSigning())
if err != nil {
t.Error(err)
}
signature := r.Bytes()
signature = append(signature, s.Bytes()...)
copy(command.Signature[:], signature)
commandStr := fmt.Sprintf("%+v", command)
t.Logf("SignatureCommand: %s\n", commandStr)
packedCommand := command.Bytes()
t.Logf("Packed SignatureCommand (%d): %x\n", len(packedCommand), packedCommand)
command2 := *NewSignatureCommandFromBytes(packedCommand)
command2Str := fmt.Sprintf("%+v", command2)
t.Logf("SignatureCommand: %s\n", command2Str)
if commandStr != command2Str {
t.Error("Failed to pack and unpack while preserving data\n")
}
}
|
package main
import (
"fmt"
)
func main() {
var n, m int
fmt.Scanf("%d %d\n", &n, &m)
v := make([]uint64, m)
for i := 0; i < m; i++ {
fmt.Scanf("%d", &(v[i]))
}
var steps,pos uint64
pos = 1
steps = 0
for i := 0; i < m; i++ {
if v[i] >= pos {
steps += v[i] - pos
} else {
steps += uint64(n) - (pos - v[i])
}
pos = v[i]
//fmt.Println("steps=", steps)
}
fmt.Println(steps)
}
|
package storage
import (
"github.com/biezhi/gorm-paginator/pagination"
md "github.com/ebikode/eLearning-core/model"
)
// DBJournalStorage encapsulates DB Connection Model
type DBJournalStorage struct {
*MDatabase
}
// NewDBJournalStorage Initialize Journal Storage
func NewDBJournalStorage(db *MDatabase) *DBJournalStorage {
return &DBJournalStorage{db}
}
// Get Fetch Single Journal fron DB
func (jdb *DBJournalStorage) Get(id uint) *md.Journal {
journal := md.Journal{}
// Select resource from database
err := jdb.db.
Preload("Course").
Preload("User").
Where("journals.id=?", id).First(&journal).Error
if journal.ID < 1 || err != nil {
return nil
}
return &journal
}
// GetByUserID Fetch Single Journal fron DB
func (jdb *DBJournalStorage) GetByUserID(id string) *md.Journal {
journal := md.Journal{}
// Select resource from database
err := jdb.db.
Preload("Course").
Preload("User").
Where("user_id=?", id).First(&journal).Error
if journal.ID < 1 || err != nil {
return nil
}
return &journal
}
// GetAll Fetch all journals from DB
func (jdb *DBJournalStorage) GetAll(page, limit int) []*md.Journal {
var journals []*md.Journal
pagination.Paging(&pagination.Param{
DB: jdb.db.
Preload("Course").
Preload("User").
Order("created_at desc").
Find(&journals),
Page: page,
Limit: limit,
OrderBy: []string{"created_at desc"},
}, &journals)
return journals
}
// GetByUser Fetch all user' journals from DB
func (jdb *DBJournalStorage) GetByUser(userID string, page, limit int) []*md.Journal {
var journals []*md.Journal
pagination.Paging(&pagination.Param{
DB: jdb.db.
Preload("Course").
Preload("User").
Where("user_id=?", userID).
Find(&journals),
Page: page,
Limit: limit,
OrderBy: []string{"created_at desc"},
}, &journals)
return journals
}
// GetByCourse ...
func (jdb *DBJournalStorage) GetByCourse(courseID int) []*md.Journal {
var journals []*md.Journal
// Select resource from database
jdb.db.
Preload("Course").
Preload("User").
Where("course_id=?", courseID).Order("created_at desc").Find(&journals)
return journals
}
// Store Add a new journal
func (jdb *DBJournalStorage) Store(p md.Journal) (*md.Journal, error) {
journal := p
err := jdb.db.Create(&journal).Error
if err != nil {
return nil, err
}
return jdb.Get(journal.ID), nil
}
// Update a journal
func (jdb *DBJournalStorage) Update(journal *md.Journal) (*md.Journal, error) {
err := jdb.db.Save(&journal).Error
if err != nil {
return nil, err
}
return journal, nil
}
// Delete a journal
func (jdb *DBJournalStorage) Delete(c md.Journal, isPermarnant bool) (bool, error) {
var err error
if isPermarnant {
err = jdb.db.Unscoped().Delete(c).Error
}
if !isPermarnant {
err = jdb.db.Delete(c).Error
}
if err != nil {
return false, err
}
return true, nil
}
|
package main
import (
"context"
"log"
"net"
pb "github.com/morimolymoly/grpc-is-fun/helloworld/pb"
"google.golang.org/grpc"
"google.golang.org/grpc/reflection"
)
const (
port = ":8100"
)
// HelloWorldServer ... implements pb.HelloWorldServiceServer
type HelloWorldServer struct {
}
// SayHello ... implements pb.HelloWorldServiceServer.SayHello
func (s *HelloWorldServer) SayHello(ctx context.Context, req *pb.SayHelloRequest) (*pb.HelloReply, error) {
return &pb.HelloReply{Message: "Hello, " + req.Name}, nil
}
func main() {
lis, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to open server: %v", err)
}
server := grpc.NewServer()
pb.RegisterHelloWorldServiceServer(server, &HelloWorldServer{})
reflection.Register(server)
if err := server.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
|
package g
import (
//"github.com/elves-project/agent/src/thrift/scheduler"
"sync"
"time"
)
type StatGInfo struct {
Mode string
Asset string
Ip string
Uptime string
Hbtime string
Ver string
Apps map[string]string
}
type sIns struct {
Time string
ID string
Type string
Mode string
Proxy string
App string
Func string
Costtime int32
Flag string
}
type StatCInfo struct {
Id string
App string
Func string
Mode string
Rule string
Comment string
Lastexec string
}
var (
stat = StatGInfo{}
tstat = [40]sIns{}
estat = [20]string{}
cstat = map[string]StatCInfo{}
slock = new(sync.RWMutex)
tlock = new(sync.RWMutex)
elock = new(sync.RWMutex)
clock = new(sync.RWMutex)
)
func InitStat() {
s := StatGInfo{}
s.Mode = "PRODUCT"
if Config().Devmode.Enabled == true {
s.Mode = "DEVELOP"
}
s.Asset = Config().Asset
s.Ip = Config().Ip
tm := time.Unix(time.Now().Unix(), 0)
s.Uptime = tm.Format("2006/01/02 15:04:05")
s.Ver = VERSION
stat = s
}
func UpdateHbTime() {
slock.Lock()
defer slock.Unlock()
tm := time.Unix(time.Now().Unix(), 0)
stat.Hbtime = tm.Format("2006/01/02 15:04:05")
}
func SaveTaskStat(ins SchedulerInstruct, flag int32, costtime int32) {
tlock.Lock()
defer tlock.Unlock()
tptask := tstat
tm := time.Unix(time.Now().Unix(), 0)
tstat[0].Time = tm.Format("2006/01/02 15:04:05")
tstat[0].App = ins.App
tstat[0].Func = ins.Func
tstat[0].ID = ins.ID
tstat[0].Mode = ins.Mode
tstat[0].Type = ins.Type
tstat[0].Proxy = ins.Proxy
if flag == 0 {
tstat[0].Flag = "failure"
} else if flag == -1 {
tstat[0].Flag = "error"
} else {
tstat[0].Flag = "success"
}
tstat[0].Costtime = costtime
for i := 1; i < 40; i++ {
tstat[i] = tptask[i-1]
}
}
func SaveErrorStat(err string) {
elock.Lock()
defer elock.Unlock()
errlist := estat
tm := time.Unix(time.Now().Unix(), 0)
estat[0] = tm.Format("2006/01/02 15:04:05") + "|" + err
for i := 1; i < 20; i++ {
estat[i] = errlist[i-1]
}
}
func UpdateCronSata(id string) {
clock.Lock()
defer clock.Unlock()
tm := time.Unix(time.Now().Unix(), 0)
timestr := tm.Format("2006/01/02 15:04:05")
SCI := cstat[id]
//SCI1 := SCI[id]
SCI.Lastexec = timestr
cstat[id] = SCI
}
func SaveCronSata(id string, app string, funcs string, mode string, rule string, comment string) {
clock.Lock()
defer clock.Unlock()
timestr := "0000/00/00 00:00:00"
cstat[id] = StatCInfo{
id,
app,
funcs,
mode,
rule,
comment,
timestr,
}
}
func DelCronSata(id string) {
//cstat = map[string]string{}
clock.Lock()
defer clock.Unlock()
delete(cstat, id)
}
func GetCStat() map[string]StatCInfo {
clock.Lock()
defer clock.Unlock()
return cstat
}
func GetAStat() map[string]string {
return Config().Apps
}
func GetGStat() StatGInfo {
slock.Lock()
defer slock.Unlock()
stat.Apps = Config().Apps
return stat
}
func GetTStat() [40]sIns {
tlock.Lock()
defer tlock.Unlock()
return tstat
}
func GetEStat() [20]string {
elock.Lock()
defer elock.Unlock()
return estat
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package image
import (
"fmt"
"testing"
)
type result struct {
result string
err error
}
var registryTests = []struct {
in string
out result
}{
{
"docker.io/library/test:123",
result{
result: "test.io/library/test:123",
err: nil,
},
},
{
"docker.io/library/test",
result{
result: "test.io/library/test",
err: nil,
},
},
{
"test",
result{
result: "test.io/library/test",
err: nil,
},
},
{
"gcr.io/kubernetes-e2e-test-images/test:123",
result{
result: "test.io/kubernetes-e2e-test-images/test:123",
err: nil,
},
},
{
"k8s.gcr.io/test:123",
result{
result: "test.io/test:123",
err: nil,
},
},
{
"gcr.io/k8s-authenticated-test/test:123",
result{
result: "test.io/k8s-authenticated-test/test:123",
err: nil,
},
},
{
"gcr.io/gke-release/test:latest",
result{
result: "test.io/gke-release/test:latest",
err: nil,
},
},
{
"gcr.io/google-samples/test:latest",
result{
result: "test.io/google-samples/test:latest",
err: nil,
},
},
{
"gcr.io/k8s-staging-csi/test:latest",
result{
result: "test.io/k8s-staging-csi/test:latest",
err: nil,
},
},
{
"unknwon.io/google-samples/test:latest",
result{
result: "",
err: fmt.Errorf("Registry: unknwon.io/google-samples is missing in test/utils/image/manifest.go, please add the registry, otherwise the test will fail on air-gapped clusters"),
},
},
}
// ToDo Add Benchmark
func TestReplaceRegistryInImageURL(t *testing.T) {
// Set custom registries
dockerLibraryRegistry = "test.io/library"
e2eRegistry = "test.io/kubernetes-e2e-test-images"
gcRegistry = "test.io"
gcrReleaseRegistry = "test.io/gke-release"
PrivateRegistry = "test.io/k8s-authenticated-test"
sampleRegistry = "test.io/google-samples"
k8sCSI = "test.io/k8s-staging-csi"
for _, tt := range registryTests {
t.Run(tt.in, func(t *testing.T) {
s, err := ReplaceRegistryInImageURL(tt.in)
if err != nil && err.Error() != tt.out.err.Error() {
t.Errorf("got %q, want %q", err, tt.out.err)
}
if s != tt.out.result {
t.Errorf("got %q, want %q", s, tt.out.result)
}
})
}
}
|
package segment
import (
"fmt"
"net/http"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/assert"
)
var (
mux *http.ServeMux
client *Client
server *httptest.Server
)
const (
testToken = "test-token"
testWorkspace = "test-workspace"
)
func setup() {
mux = http.NewServeMux()
server = httptest.NewServer(mux)
client = NewClient(testToken, testWorkspace)
client.baseURL = server.URL
}
func teardown() {
server.Close()
}
func testClientDefaultBaseURL(t *testing.T, c *Client) {
if c.baseURL == "" || c.baseURL != defaultBaseURL {
t.Errorf("NewClient BaseURL = %v, expected %v", c.baseURL, defaultBaseURL)
}
}
func Test_NewClient(t *testing.T) {
c := NewClient(testToken, testWorkspace)
testClientDefaultBaseURL(t, c)
}
func Test_doRequest(t *testing.T) {
setup()
defer teardown()
testData := `{"testing":"things"}`
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
fmt.Fprint(w, testData)
})
actual, err := client.doRequest(http.MethodGet, "/", nil)
assert.NoError(t, err)
expected := []byte(testData)
assert.Equal(t, expected, actual)
}
func Test_doRequest_httpError_notFound(t *testing.T) {
setup()
defer teardown()
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, "Not Found", 404)
})
_, err := client.doRequest(http.MethodGet, "/", nil)
assert.Error(t, err)
}
func Test_doRequest_httpError_badRequest(t *testing.T) {
setup()
defer teardown()
expected := SegmentApiError{Code: 5, Message: "foo"}
errorJson := `{ "error": "foo", "code": 5 }`
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, errorJson, 400)
})
_, err := client.doRequest(http.MethodGet, "/", nil)
assert.EqualError(t, err, expected.Error())
}
func Test_doRequest_httpError_badRequestUnstructured(t *testing.T) {
setup()
defer teardown()
expected := "request error unknown: bad request\n"
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, "bad request", 400)
})
_, err := client.doRequest(http.MethodGet, "/", nil)
assert.EqualError(t, err, expected)
}
func Test_doRequest_httpError_internalServerError(t *testing.T) {
setup()
defer teardown()
expected := SegmentApiError{Code: 5, Message: "foo"}
errorJson := `{ "error": "foo", "code": 5 }`
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, errorJson, 500)
})
_, err := client.doRequest(http.MethodGet, "/", nil)
assert.EqualError(t, err, expected.Error())
}
func Test_doRequest_httpError_toomanyrequests(t *testing.T) {
setup()
defer teardown()
expected := SegmentApiError{Code: 429, Message: "too many requests to API"}
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
http.Error(w, "Too many", 429)
})
_, err := client.doRequest(http.MethodGet, "/", nil)
assert.EqualError(t, err, expected.Error())
}
|
package main
import (
"fmt"
"strconv"
"strings"
)
func main() {
starting := strings.Split("17,1,3,16,19,0", ",")
var spoken []int
lastSeen := map[int]int{} // number: index
lastSeen2 := map[int]int{} // number: index
for index, s := range starting {
i, _ := strconv.Atoi(s)
lastSeen[i] = index + 1
spoken = append(spoken, i)
}
turn := len(lastSeen) + 1
for turn <= 30000000 {
last := spoken[len(spoken)-1]
if _, ok := lastSeen2[last]; !ok {
spoken = append(spoken, 0)
if ls, ok := lastSeen[0]; ok {
lastSeen2[0] = ls
}
lastSeen[0] = turn
} else {
age := lastSeen[last] - lastSeen2[last]
spoken = append(spoken, age)
if ls, ok := lastSeen[age]; ok {
lastSeen2[age] = ls
}
lastSeen[age] = turn
}
turn++
}
fmt.Println(spoken[30000000-1])
}
|
//
// Copyright (c) 2018
// Mainflux
//
// SPDX-License-Identifier: Apache-2.0
//
package cli
import (
"fmt"
"github.com/davecgh/go-spew/spew"
"github.com/fatih/color"
)
var (
// Limit query parameter
Limit uint = 10
// Offset query parameter
Offset uint
)
func flush(i interface{}) {
fmt.Printf("%s", color.BlueString(spew.Sdump(i)))
}
func logUsage(u string) {
fmt.Printf(color.YellowString("Usage: %s\n"), u)
}
func logError(err error) {
fmt.Printf("%s\n", color.RedString(err.Error()))
}
func logOK() {
fmt.Printf("%s\n", color.GreenString("OK"))
}
|
/*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package adminapi
import (
"database/sql"
"fmt"
"github.com/ant0ine/go-json-rest/rest"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/admindb"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/cpmcontainerapi"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/cpmserverapi"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/logit"
"github.com/crunchydata/crunchy-postgresql-manager-openshift/util"
"net/http"
"time"
)
const CONTAINER_NOT_FOUND = "CONTAINER NOT FOUND"
func GetNode(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetNode: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("GetNode: error node ID required")
rest.Error(w, "node ID required", http.StatusBadRequest)
return
}
results, err2 := admindb.GetContainer(dbConn, ID)
if results.ID == "" {
rest.NotFound(w, r)
return
}
if err2 != nil {
logit.Error.Println("GetNode: " + err2.Error())
rest.Error(w, err2.Error(), http.StatusBadRequest)
return
}
var currentStatus = "UNKNOWN"
//go get the docker server IPAddress
/**
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, results.ServerID)
if err != nil {
logit.Error.Println("GetNode: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
*/
var domain string
domain, err = admindb.GetDomain(dbConn)
if err != nil {
logit.Error.Println("GetNode: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
/**
request := &cpmserverapi.DockerInspectRequest{}
request.ContainerName = results.Name
var url = "http://" + server.IPAddress + ":10001"
_, err = cpmserverapi.DockerInspectClient(url, request)
if err != nil {
logit.Error.Println("GetNode: " + err.Error())
currentStatus = CONTAINER_NOT_FOUND
}
*/
//if currentStatus != "CONTAINER NOT FOUND" {
//ping the db on that node to get current status
var pinghost = results.Name
logit.Info.Println("pinging db on " + pinghost + "." + domain)
currentStatus, err = GetPGStatus2(dbConn, results.Name, pinghost)
if err != nil {
logit.Error.Println("GetNode:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
logit.Info.Println("pinging db finished")
//}
node := ClusterNode{results.ID, results.ClusterID, results.ServerID,
results.Name, results.Role, results.Image, results.CreateDate, currentStatus, results.ProjectID, results.ProjectName, results.ServerName, results.ClusterName}
w.WriteJson(node)
}
func GetAllNodesForProject(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetAllNodes: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("GetAllNodesForProject: error project ID required")
rest.Error(w, "project ID required", http.StatusBadRequest)
return
}
results, err := admindb.GetAllContainersForProject(dbConn, ID)
if err != nil {
logit.Error.Println("GetAllNodes: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
}
nodes := make([]ClusterNode, len(results))
i := 0
for i = range results {
nodes[i].ID = results[i].ID
nodes[i].Name = results[i].Name
nodes[i].ClusterID = results[i].ClusterID
nodes[i].ServerID = results[i].ServerID
nodes[i].Role = results[i].Role
nodes[i].Image = results[i].Image
nodes[i].CreateDate = results[i].CreateDate
nodes[i].ProjectID = results[i].ProjectID
nodes[i].ProjectName = results[i].ProjectName
nodes[i].ServerName = results[i].ServerName
nodes[i].ClusterName = results[i].ClusterName
//nodes[i].Status = "UNKNOWN"
i++
}
w.WriteJson(&nodes)
}
func GetAllNodes(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetAllNodes: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
results, err := admindb.GetAllContainers(dbConn)
if err != nil {
logit.Error.Println("GetAllNodes: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
}
nodes := make([]ClusterNode, len(results))
i := 0
for i = range results {
nodes[i].ID = results[i].ID
nodes[i].Name = results[i].Name
nodes[i].ClusterID = results[i].ClusterID
nodes[i].ServerID = results[i].ServerID
nodes[i].Role = results[i].Role
nodes[i].Image = results[i].Image
nodes[i].CreateDate = results[i].CreateDate
nodes[i].ProjectID = results[i].ProjectID
nodes[i].ProjectName = results[i].ProjectName
nodes[i].ServerName = results[i].ServerName
nodes[i].ClusterName = results[i].ClusterName
//nodes[i].Status = "UNKNOWN"
i++
}
w.WriteJson(&nodes)
}
func GetAllNodesNotInCluster(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetAllNodesNotInCluster: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
results, err := admindb.GetAllContainersNotInCluster(dbConn)
if err != nil {
logit.Error.Println("GetAllNodesNotInCluster: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
}
nodes := make([]ClusterNode, len(results))
i := 0
for i = range results {
nodes[i].ID = results[i].ID
nodes[i].Name = results[i].Name
nodes[i].ClusterID = results[i].ClusterID
nodes[i].ServerID = results[i].ServerID
nodes[i].Role = results[i].Role
nodes[i].Image = results[i].Image
nodes[i].CreateDate = results[i].CreateDate
nodes[i].ProjectID = results[i].ProjectID
nodes[i].ProjectName = results[i].ProjectName
nodes[i].ServerName = results[i].ServerName
nodes[i].ClusterName = results[i].ClusterName
//nodes[i].Status = "UNKNOWN"
i++
}
w.WriteJson(&nodes)
}
func GetAllNodesForCluster(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetAllForCluster: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ClusterID := r.PathParam("ClusterID")
if ClusterID == "" {
logit.Error.Println("GetAllNodesForCluster: node ClusterID required")
rest.Error(w, "node ClusterID required", http.StatusBadRequest)
return
}
results, err := admindb.GetAllContainersForCluster(dbConn, ClusterID)
if err != nil {
logit.Error.Println("GetAllNodesForCluster:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
}
nodes := make([]ClusterNode, len(results))
i := 0
for i = range results {
nodes[i].ID = results[i].ID
nodes[i].Name = results[i].Name
nodes[i].ClusterID = results[i].ClusterID
nodes[i].ServerID = results[i].ServerID
nodes[i].Role = results[i].Role
nodes[i].Image = results[i].Image
nodes[i].CreateDate = results[i].CreateDate
nodes[i].ProjectID = results[i].ProjectID
nodes[i].ProjectName = results[i].ProjectName
nodes[i].ServerName = results[i].ServerName
nodes[i].ClusterName = results[i].ClusterName
//nodes[i].Status = "UNKNOWN"
i++
}
w.WriteJson(&nodes)
}
/*
TODO refactor this to share code with DeleteCluster!!!!!
*/
func DeleteNode(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-container")
if err != nil {
logit.Error.Println("DeleteNode: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("DeleteNode: error node ID required")
rest.Error(w, "node ID required", http.StatusBadRequest)
return
}
//go get the node we intend to delete
var dbNode admindb.Container
dbNode, err = admindb.GetContainer(dbConn, ID)
if err != nil {
logit.Error.Println("DeleteNode: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//go get the docker server IPAddress
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, dbNode.ServerID)
if err != nil {
logit.Error.Println("DeleteNode: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
var url = "http://" + server.IPAddress + ":10001"
err = admindb.DeleteContainer(dbConn, ID)
if err != nil {
logit.Error.Println("DeleteNode: " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
logit.Info.Println("got server IP " + server.IPAddress)
//it is possible that someone can remove a container
//outside of us, so we let it pass that we can't remove
//it
request := &cpmserverapi.DockerRemoveRequest{}
request.ContainerName = dbNode.Name
_, err = cpmserverapi.DockerRemoveClient(url, request)
if err != nil {
logit.Error.Println("DeleteNode: error when trying to remove container " + err.Error())
}
//send the server a deletevolume command
request2 := &cpmserverapi.DiskDeleteRequest{}
request2.Path = server.PGDataPath + "/" + dbNode.Name
_, err = cpmserverapi.DiskDeleteClient(url, request2)
if err != nil {
fmt.Println(err.Error())
}
//we should not have to delete the DNS entries because
//of the dnsbridge, it should remove them when we remove
//the containers via the docker api
w.WriteHeader(http.StatusOK)
status := SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
}
func GetAllNodesForServer(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("GetAllNodesForServer: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
serverID := r.PathParam("ServerID")
if serverID == "" {
logit.Error.Println("GetAllNodesForServer: error serverID required")
rest.Error(w, "serverID required", http.StatusBadRequest)
return
}
results, err := admindb.GetAllContainersForServer(dbConn, serverID)
if err != nil {
logit.Error.Println("GetAllNodesForServer:" + err.Error())
logit.Error.Println("error " + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
server, err2 := admindb.GetServer(dbConn, serverID)
if err2 != nil {
logit.Error.Println("GetAllNodesForServer:" + err2.Error())
logit.Error.Println("error " + err2.Error())
rest.Error(w, err2.Error(), http.StatusBadRequest)
return
}
var response cpmserverapi.DockerInspectResponse
var e error
var url string
nodes := make([]ClusterNode, len(results))
i := 0
for i = range results {
nodes[i].ID = results[i].ID
nodes[i].Name = results[i].Name
nodes[i].ClusterID = results[i].ClusterID
nodes[i].ServerID = results[i].ServerID
nodes[i].Role = results[i].Role
nodes[i].Image = results[i].Image
nodes[i].CreateDate = results[i].CreateDate
nodes[i].ProjectID = results[i].ProjectID
nodes[i].ProjectName = results[i].ProjectName
nodes[i].ServerName = results[i].ServerName
nodes[i].Status = "down"
request := &cpmserverapi.DockerInspectRequest{}
request.ContainerName = results[i].Name
url = "http://" + server.IPAddress + ":10001"
response, e = cpmserverapi.DockerInspectClient(url, request)
logit.Info.Println("GetAllNodesForServer:" + results[i].Name + " " + response.IPAddress + " " + response.RunningState)
if e != nil {
logit.Error.Println("GetAllNodesForServer:" + e.Error())
logit.Error.Println(e.Error())
nodes[i].Status = "notfound"
} else {
logit.Info.Println("GetAllNodesForServer: setting " + results[i].Name + " to " + response.RunningState)
nodes[i].Status = response.RunningState
}
i++
}
w.WriteJson(&nodes)
}
func AdminStartNode(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("AdminStartNode: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("AdminStartNode: error ID required")
rest.Error(w, "ID required", http.StatusBadRequest)
return
}
node, err := admindb.GetContainer(dbConn, ID)
if err != nil {
logit.Error.Println("AdminStartNode:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, node.ServerID)
if err != nil {
logit.Error.Println("AdminStartNode:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
var url = "http://" + server.IPAddress + ":10001"
var response cpmserverapi.DockerStartResponse
request := &cpmserverapi.DockerStartRequest{}
request.ContainerName = node.Name
response, err = cpmserverapi.DockerStartClient(url, request)
if err != nil {
logit.Error.Println("AdminStartNode: error when trying to start container " + err.Error())
}
logit.Info.Println(response.Output)
w.WriteHeader(http.StatusOK)
status := SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
}
func AdminStopNode(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("AdminStopNode: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
ID := r.PathParam("ID")
if ID == "" {
logit.Error.Println("AdminStopNode: error ID required")
rest.Error(w, "ID required", http.StatusBadRequest)
return
}
node, err := admindb.GetContainer(dbConn, ID)
if err != nil {
logit.Error.Println("AdminStopNode:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, node.ServerID)
if err != nil {
logit.Error.Println("AdminStopNode:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
request := &cpmserverapi.DockerStopRequest{}
request.ContainerName = node.Name
var url = "http://" + server.IPAddress + ":10001"
_, err = cpmserverapi.DockerStopClient(url, request)
if err != nil {
logit.Error.Println("AdminStopNode error when trying to stop container " + err.Error())
}
w.WriteHeader(http.StatusOK)
status := SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
}
func GetPGStatus2(dbConn *sql.DB, nodename string, hostname string) (string, error) {
//fetch cpmtest user credentials
nodeuser, err := admindb.GetContainerUser(dbConn, nodename, "cpmtest")
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
logit.Info.Println("cpmtest password is " + nodeuser.Passwd)
var pgport admindb.Setting
pgport, err = admindb.GetSetting(dbConn, "PG-PORT")
dbConn2, err := util.GetMonitoringConnection(hostname, "cpmtest", pgport.Value, "cpmtest", nodeuser.Passwd)
defer dbConn2.Close()
if err != nil {
logit.Error.Println(err.Error())
return "", err
}
var value string
err = dbConn2.QueryRow(fmt.Sprintf("select now()::text")).Scan(&value)
switch {
case err == sql.ErrNoRows:
logit.Info.Println("getpgstatus 2 no rows returned")
return "OFFLINE", nil
case err != nil:
logit.Info.Println("getpgstatus2 error " + err.Error())
return "OFFLINE", nil
default:
logit.Info.Println("getpgstatus2 returned " + value)
}
return "RUNNING", nil
}
func AdminStartServerContainers(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("AdminStartServerContainers: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
//serverID
serverid := r.PathParam("ID")
if serverid == "" {
logit.Error.Println("AdminStartServerContainers: error ID required")
rest.Error(w, "ID required", http.StatusBadRequest)
return
}
containers, err := admindb.GetAllContainersForServer(dbConn, serverid)
if err != nil {
logit.Error.Println("AdminStartServerContainers:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//for each, get server, start container
//use a 'best effort' approach here since containers
//can be removed outside of CPM's control
var url string
for i := range containers {
//fetch the server
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, containers[i].ServerID)
if err != nil {
logit.Error.Println("AdminStartServerContainers:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//start the container
var response cpmserverapi.DockerStartResponse
var err error
request := &cpmserverapi.DockerStartRequest{}
request.ContainerName = containers[i].Name
url = "http://" + server.IPAddress + ":10001"
response, err = cpmserverapi.DockerStartClient(url, request)
if err != nil {
logit.Error.Println("AdminStartServerContainers: error when trying to start container " + err.Error())
}
logit.Info.Println(response.Output)
}
w.WriteHeader(http.StatusOK)
status := SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
}
func AdminStopServerContainers(w rest.ResponseWriter, r *rest.Request) {
dbConn, err := util.GetConnection(CLUSTERADMIN_DB)
if err != nil {
logit.Error.Println("BackupNow: error " + err.Error())
rest.Error(w, err.Error(), 400)
return
}
defer dbConn.Close()
err = secimpl.Authorize(dbConn, r.PathParam("Token"), "perm-read")
if err != nil {
logit.Error.Println("AdminStopServerContainers: validate token error " + err.Error())
rest.Error(w, err.Error(), http.StatusUnauthorized)
return
}
//serverID
serverid := r.PathParam("ID")
if serverid == "" {
logit.Error.Println("AdminStopoServerContainers: error ID required")
rest.Error(w, "ID required", http.StatusBadRequest)
return
}
//fetch the server
containers, err := admindb.GetAllContainersForServer(dbConn, serverid)
if err != nil {
logit.Error.Println("AdminStopServerContainers:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
var url string
//for each, get server, stop container
for i := range containers {
server := admindb.Server{}
server, err = admindb.GetServer(dbConn, containers[i].ServerID)
if err != nil {
logit.Error.Println("AdminStopServerContainers:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
//send stop command before stopping container
if containers[i].Role == "pgpool" {
var stoppoolResp cpmcontainerapi.StopPgpoolResponse
stoppoolResp, err = cpmcontainerapi.StopPgpoolClient(containers[i].Name)
logit.Info.Println("AdminStoppg:" + stoppoolResp.Output)
} else {
var stopResp cpmcontainerapi.StopPGResponse
stopResp, err = cpmcontainerapi.StopPGClient(containers[i].Name)
logit.Info.Println("AdminStoppg:" + stopResp.Output)
}
if err != nil {
logit.Error.Println("AdminStopServerContainers:" + err.Error())
rest.Error(w, err.Error(), http.StatusBadRequest)
return
}
time.Sleep(2000 * time.Millisecond)
//stop container
request := &cpmserverapi.DockerStopRequest{}
request.ContainerName = containers[i].Name
url = "http://" + server.IPAddress + ":10001"
_, err = cpmserverapi.DockerStopClient(url, request)
if err != nil {
logit.Error.Println("AdminStopServerContainers: error when trying to start container " + err.Error())
}
}
w.WriteHeader(http.StatusOK)
status := SimpleStatus{}
status.Status = "OK"
w.WriteJson(&status)
}
|
package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/dmitry-vovk/csv-chg-go/api"
"github.com/dmitry-vovk/csv-chg-go/config"
"github.com/dmitry-vovk/csv-chg-go/source"
"github.com/dmitry-vovk/csv-chg-go/worker"
)
func main() {
// Load configuration
cfg := config.MustLoad()
// Build worker instance
w := worker.New(api.New(cfg.APIURL)).
WithWorkersCount(cfg.Workers).
WithInterval(cfg.Interval)
// Read input data
if err := source.ReadAny(cfg.CSVFile, w.ReadUUIDs); err != nil {
log.Fatalf("Error reading source file: %s", err)
}
// Subscribe to OS signals
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
go func() {
s := <-signals
log.Printf("Got %s, initiating shutdown...", s)
w.Shutdown()
}()
// Start the worker
log.Printf("Worker started")
w.Run()
log.Printf("Worker exited")
}
|
package resources
import (
"os"
"path"
"testing"
)
func TestConf(t *testing.T) {
wd, _ := os.Getwd()
fileName := path.Join(wd, "test.log")
Conf(fileName)
Logger.Debugf("hello debug.")
Logger.Infof("hello info.")
}
|
package client
import (
exchange "github.com/preichenberger/go-coinbase-exchange"
)
type GDAXClient struct {
Client *exchange.Client
}
func NewGDAXClient(client *exchange.Client) GDAXClient{
return GDAXClient{client}
}
|
package main
import "fmt"
/**
Golang中的面向对象
1、封装:通过可见性原则来保证
2、继承:基于组合来实现。内嵌结构体,内嵌结构体
3、多态:基于接口来实现。松耦合的正交,隐式实现接口,鸭式辩形
*/
type Seller interface {
sell() string
}
type BusinessMan interface {
Seller
purchase() string
}
type FruitSeller struct {
fruitType string
}
type ShoeSeller struct {
shoeType string
}
func main() {
sellers := []Seller{FruitSeller{"apple"}, &ShoeSeller{"nike"}}
for _, seller := range sellers{
fmt.Println(seller.sell())
}
}
func (fruit FruitSeller) sell() string {
return fruit.fruitType
}
func (shoe *ShoeSeller) sell() string {
return shoe.shoeType
}
|
package main
import (
"github.com/hyperledger/fabric/core/chaincode/shim"
"github.com/hyperledger/fabric/protos/peer"
"fmt"
)
// SimpleAsset implements a simple chaincode to manage an asset
type Simpleasset struct{}
func (t *Simpleasset) Init(stub shim.ChaincodeStubInterface) peer.Response{
args:= stub.GetStringArgs()
if len(args) !=2{
return shim.Error("Invalid expecting key value")
}
err := stub.PutState(args[0], []byte(args[1]))
if err != nil{
return shim.Error(fmt.Sprintf("failes to createasset: %s", args[0]))
}
return shim.Success(nil)
}
func (t * Simpleasset) Invoke(stub shim.ChaincodeStubInterface) peer.Response {
fn, args := stub.GetFunctionAndParameters()
var result string
var err error
if fn == "set" {
result, err = set(stub, args)
} else{
result, err = get(stub, args)
}
if err != nil {
return shim.Error(err.Error())
}
return shim.Success([]byte(result))
}
// Set stores the asset (both key and value) on the ledger. If the key exists,
// it will override the value with the new one
func set(stub shim.ChaincodeStubInterface, args []string) (string, error){
if len(args) !=2{
return "", fmt.Errorf("Insufficient Arguments. Expecting a key value pair")
}
err:= stub.PutState(args[0],[]byte(args[1]))
if err !=nil {
return "",fmt.Errorf("Failed to set asset: %s", args[0])
}
return args[1], nil
}
// Get returns the value of the specified asset key
func get(stub shim.ChaincodeStubInterface,args []string) (string,error) {
if len(args) != 1 {
return "", fmt.Errorf("Insufficient Arguments. Expecting a key value pair")
}
value,err := stub.GetState(args[0])
if err != nil {
return "", fmt.Errorf( "Failed to set asset: %s with error: %s", args[0],err)
}
if value != nil {
return "", fmt.Errorf("Asset: not found: %s", args[0])
}
return args[1],nil
}
// main function starts up the chaincode in the container during instantiate
func main(){
if err := shim.Start(new(Simpleasset));err !=nil{
fmt.Printf("Error starting SimpleAsset chaincode: %s", err)
}
}
|
package main
import (
"container/list"
"flag"
"fmt"
"io/ioutil"
"strings"
)
func main() {
flag.Parse()
testWords, dictionary := parseInputFile(flag.Args()[0])
for _, word := range testWords {
fmt.Println(sizeOfSocialNetwork(word, dictionary))
}
}
// file parsing
func parseInputFile(filename string) ([]string, []string) {
fileBytes, err := ioutil.ReadFile(filename)
if err != nil {
panic(err)
}
lines := strings.Split(string(fileBytes), "\n")
lines = removeBlankLines(lines)
indexEOI := getIndexOfEOI(lines)
return lines[:indexEOI], lines[indexEOI+1:]
}
func removeBlankLines(in []string) []string {
l := list.New()
for _, line := range in {
if len(line) != 0 {
l.PushBack(line)
}
}
return listToStringSlice(l)
}
func listToStringSlice(l *list.List) []string {
result := make([]string, l.Len())
el := l.Front()
for i := range result {
result[i] = el.Value.(string)
el = el.Next()
}
return result
}
func getIndexOfEOI(lines []string) int {
for i := 0; i < len(lines); i++ {
if lines[i] == "END OF INPUT" {
return i
}
}
return -1
}
// levenshtein distance calculation
func hasLevenshteinEq1(word1, word2 string) bool {
return has1Addition(word1, word2) || has1Subtraction(word1, word2) || has1Substitution(word1, word2)
}
func has1Addition(word1, word2 string) bool {
if len(word2) != len(word1)+1 {
return false
}
for i := range word2 {
prefix := word2[:i]
var suffix string
if i+1 < len(word2) {
suffix = word2[i+1:]
}
if word1 == prefix+suffix {
return true
}
}
return false
}
func has1Subtraction(word1, word2 string) bool {
return has1Addition(word2, word1)
}
func has1Substitution(word1, word2 string) bool {
if len(word2) != len(word1) {
return false
}
for i := range word1 {
prefix1 := word1[:i]
prefix2 := word2[:i]
var suffix1, suffix2 string
if i+1 < len(word1) {
suffix1 = word1[i+1:]
suffix2 = word2[i+1:]
}
if prefix1 == prefix2 &&
suffix1 == suffix2 &&
word1[i] != word2[i] {
return true
}
}
return false
}
// social network
func sizeOfSocialNetwork(word string, dict []string) int {
l := list.New()
el := l.PushFront(word)
for el != nil {
for _, w := range dict {
if listContains(l, w) {
continue
}
if hasLevenshteinEq1(el.Value.(string), w) {
l.PushBack(w)
}
}
el = el.Next()
}
return l.Len()
}
func listContains(l *list.List, word string) bool {
el := l.Front()
for el != nil {
if el.Value.(string) == word {
return true
}
el = el.Next()
}
return false
}
|
package rpc
import (
"context"
v1 "github.com/tinkerbell/pbnj/api/v1"
"github.com/tinkerbell/pbnj/pkg/logging"
"github.com/tinkerbell/pbnj/pkg/task"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// TaskService for retrieving task details.
type TaskService struct {
Log logging.Logger
TaskRunner task.Task
v1.UnimplementedTaskServer
}
// Status returns a task record.
func (t *TaskService) Status(ctx context.Context, in *v1.StatusRequest) (*v1.StatusResponse, error) {
l := t.Log.GetContextLogger(ctx)
l.Info("start Status request", "taskID", in.TaskId)
record, err := t.TaskRunner.Status(ctx, in.TaskId)
if err != nil {
return nil, status.Error(codes.NotFound, err.Error())
}
c := codes.OK
if record.Error.Message != "" {
if codes.Code(record.Error.Code) != codes.OK {
c = codes.Code(record.Error.Code)
} else {
c = codes.Unknown
}
}
return &v1.StatusResponse{
Id: record.ID,
Description: record.Description,
Error: nil,
State: record.State,
Result: record.Result,
Complete: record.Complete,
Messages: record.Messages,
}, status.Error(c, record.Error.Message)
}
|
package main
import (
"fmt"
"time"
)
/**
* created: 2019/5/8 10:24
* By Will Fan
*/
func main() {
var ch chan int
for i:= 0; i < 3; i++ {
go func(idx int) {
ch <- (idx + 1)*2
}(i)
}
//
fmt.Println("result:", <-ch)
time.Sleep(2*time.Second)
}
|
package controller
import "github.com/therecipe/qt/core"
var Controller *ThemeController
type ThemeController struct {
core.QObject
_ func() `constructor:"init"`
_ string `property:"name,auto,changed"`
_ string `property:"accent,auto,get"`
_ string `property:"nextAccent,auto,get"`
_ string `property:"background,auto,get"`
_ string `property:"darkBackground,auto,get"`
_ string `property:"walletTableHeader,auto,get"`
_ string `property:"walletTableAlternate,auto,get"`
_ string `property:"walletTableHighlight,auto,get"`
_ string `property:"inputFieldBackground,auto,get"`
_ string `property:"font,auto,get"`
_ string `property:"fontHighlight,auto,get"`
_ func() `signal:"change,auto"`
_ func() `signal:"hide"`
}
func (c *ThemeController) init() {
Controller = c
c.SetName("dark")
initColorDialog()
}
func (c *ThemeController) nameChanged(string) {
c.AccentChanged(c.Accent())
c.NextAccentChanged(c.NextAccent())
c.BackgroundChanged(c.Background())
c.DarkBackgroundChanged(c.DarkBackground())
c.WalletTableHeaderChanged(c.WalletTableHeader())
c.WalletTableAlternateChanged(c.WalletTableAlternate())
c.WalletTableHighlightChanged(c.WalletTableHighlight())
c.InputFieldBackgroundChanged(c.InputFieldBackground())
c.FontChanged(c.Font())
c.FontHighlightChanged(c.FontHighlight())
}
func (c *ThemeController) accent() string {
if color := c.AccentDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#00CA9F"
default:
return "#00CA9F"
}
}
func (c *ThemeController) nextAccent() string {
if color := c.NextAccentDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#00CA9F"
default:
return "#00CA9F"
}
}
func (c *ThemeController) background() string {
if color := c.BackgroundDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#343C58"
default:
return "#FFFFFF"
}
}
func (c *ThemeController) darkBackground() string {
if color := c.DarkBackgroundDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#272E49"
default:
return "#E6E9EE"
}
}
func (c *ThemeController) walletTableHeader() string {
if color := c.WalletTableHeaderDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#4C547A"
default:
return "#18CCA1"
}
}
func (c *ThemeController) walletTableAlternate() string {
if color := c.WalletTableAlternateDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#2F3655"
default:
return "#3CD6B4"
}
}
func (c *ThemeController) walletTableHighlight() string {
if color := c.WalletTableHighlightDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#FDB06F"
default:
return "white"
}
}
func (c *ThemeController) inputFieldBackground() string {
if color := c.InputFieldBackgroundDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#232942"
default:
return "#747579"
}
}
func (c *ThemeController) font() string {
if color := c.FontDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "#A9AEBE"
default:
return "#B1B2B3"
}
}
func (c *ThemeController) fontHighlight() string {
if color := c.FontHighlightDefault(); color != "" {
return color
}
switch c.Name() {
case "dark":
return "white"
default:
return "black"
}
}
func (c *ThemeController) change() {
colorDialog.Show()
//TODO: there is no light theme yet
/*
if c.Name() == "dark" {
c.SetName("light")
} else {
c.SetName("dark")
}
*/
}
|
package examples
import (
"fmt"
"io/ioutil"
"os"
)
func StartFile() {
fmt.Println("\nРабота с файлами")
create()
openAndRead()
readFile()
}
func create() {
f, err := os.Create("exp-1")
defer f.Close()
if err != nil {
panic(err)
}
count, err := f.WriteString("Hello world")
if err != nil {
fmt.Println(count)
panic(err)
}
}
func openAndRead() {
f, err := os.Open("exp-1")
defer f.Close()
if err != nil {
panic(err)
}
buffer := make([]byte, 100)
count, err := f.Read(buffer)
if err != nil {
panic(err)
}
text := buffer[:count]
fmt.Printf("Файл считан %s\n", text)
}
func readFile() {
bytes, err := ioutil.ReadFile("exp-1")
if err != nil {
panic(err)
}
text := string(bytes)
fmt.Printf("Функция ReadFile пакет ioutil %s\n", text)
}
|
package rest
import (
jwtmiddleware "github.com/jinmukeji/jiujiantang-services/pkg/rest/jwt"
analysispb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/analysis/v1"
corepb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/core/v1"
subscriptionpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/subscription/v1"
jinmuidpb "github.com/jinmukeji/proto/v3/gen/micro/idl/partner/xima/user/v1"
"github.com/micro/go-micro/v2/client"
)
type v2Handler struct {
rpcSvc corepb.XimaAPIService
jwtMiddleware *jwtmiddleware.Middleware
rpcSubscriptionManagerSvc subscriptionpb.SubscriptionManagerAPIService
rpcJinmuidSvc jinmuidpb.UserManagerAPIService
rpcAnalysisSvc analysispb.AnalysisManagerAPIService
}
const (
rpcServiceName = "com.himalife.srv.svc-biz-core"
rpcSubscriptionServiceName = "com.himalife.srv.svc-subscription"
rpcJinmuidServiceName = "com.himalife.srv.svc-jinmuid"
rpcAnalysisServiceName = "com.himalife.srv.svc-analysis"
)
func newV2Handler(jwtMiddleware *jwtmiddleware.Middleware) *v2Handler {
return &v2Handler{
rpcSvc: corepb.NewXimaAPIService(rpcServiceName, client.DefaultClient),
jwtMiddleware: jwtMiddleware,
rpcSubscriptionManagerSvc: subscriptionpb.NewSubscriptionManagerAPIService(rpcSubscriptionServiceName, client.DefaultClient),
rpcJinmuidSvc: jinmuidpb.NewUserManagerAPIService(rpcJinmuidServiceName, client.DefaultClient),
rpcAnalysisSvc: analysispb.NewAnalysisManagerAPIService(rpcAnalysisServiceName, client.DefaultClient),
}
}
|
package collectors
import (
"encoding/json"
"errors"
"fmt"
"time"
cclog "github.com/ClusterCockpit/cc-metric-collector/pkg/ccLogger"
lp "github.com/ClusterCockpit/cc-metric-collector/pkg/ccMetric"
"github.com/ClusterCockpit/go-rocm-smi/pkg/rocm_smi"
)
type RocmSmiCollectorConfig struct {
ExcludeMetrics []string `json:"exclude_metrics,omitempty"`
ExcludeDevices []string `json:"exclude_devices,omitempty"`
AddPciInfoTag bool `json:"add_pci_info_tag,omitempty"`
UsePciInfoAsTypeId bool `json:"use_pci_info_as_type_id,omitempty"`
AddSerialMeta bool `json:"add_serial_meta,omitempty"`
}
type RocmSmiCollectorDevice struct {
device rocm_smi.DeviceHandle
index int
tags map[string]string // default tags
meta map[string]string // default meta information
excludeMetrics map[string]bool // copy of exclude metrics from config
}
type RocmSmiCollector struct {
metricCollector
config RocmSmiCollectorConfig // the configuration structure
devices []RocmSmiCollectorDevice
}
// Functions to implement MetricCollector interface
// Init(...), Read(...), Close()
// See: metricCollector.go
// Init initializes the sample collector
// Called once by the collector manager
// All tags, meta data tags and metrics that do not change over the runtime should be set here
func (m *RocmSmiCollector) Init(config json.RawMessage) error {
var err error = nil
// Always set the name early in Init() to use it in cclog.Component* functions
m.name = "RocmSmiCollector"
// This is for later use, also call it early
m.setup()
// Define meta information sent with each metric
// (Can also be dynamic or this is the basic set with extension through AddMeta())
//m.meta = map[string]string{"source": m.name, "group": "AMD"}
// Define tags sent with each metric
// The 'type' tag is always needed, it defines the granulatity of the metric
// node -> whole system
// socket -> CPU socket (requires socket ID as 'type-id' tag)
// cpu -> single CPU hardware thread (requires cpu ID as 'type-id' tag)
//m.tags = map[string]string{"type": "node"}
// Read in the JSON configuration
if len(config) > 0 {
err = json.Unmarshal(config, &m.config)
if err != nil {
cclog.ComponentError(m.name, "Error reading config:", err.Error())
return err
}
}
ret := rocm_smi.Init()
if ret != rocm_smi.STATUS_SUCCESS {
err = errors.New("failed to initialize ROCm SMI library")
cclog.ComponentError(m.name, err.Error())
return err
}
numDevs, ret := rocm_smi.NumMonitorDevices()
if ret != rocm_smi.STATUS_SUCCESS {
err = errors.New("failed to get number of GPUs from ROCm SMI library")
cclog.ComponentError(m.name, err.Error())
return err
}
exclDev := func(s string) bool {
skip_device := false
for _, excl := range m.config.ExcludeDevices {
if excl == s {
skip_device = true
break
}
}
return skip_device
}
m.devices = make([]RocmSmiCollectorDevice, 0)
for i := 0; i < numDevs; i++ {
str_i := fmt.Sprintf("%d", i)
if exclDev(str_i) {
continue
}
device, ret := rocm_smi.DeviceGetHandleByIndex(i)
if ret != rocm_smi.STATUS_SUCCESS {
err = fmt.Errorf("failed to get handle for GPU %d", i)
cclog.ComponentError(m.name, err.Error())
return err
}
pciInfo, ret := rocm_smi.DeviceGetPciInfo(device)
if ret != rocm_smi.STATUS_SUCCESS {
err = fmt.Errorf("failed to get PCI information for GPU %d", i)
cclog.ComponentError(m.name, err.Error())
return err
}
pciId := fmt.Sprintf(
"%08X:%02X:%02X.%X",
pciInfo.Domain,
pciInfo.Bus,
pciInfo.Device,
pciInfo.Function)
if exclDev(pciId) {
continue
}
dev := RocmSmiCollectorDevice{
device: device,
tags: map[string]string{
"type": "accelerator",
"type-id": str_i,
},
meta: map[string]string{
"source": m.name,
"group": "AMD",
},
}
if m.config.UsePciInfoAsTypeId {
dev.tags["type-id"] = pciId
} else if m.config.AddPciInfoTag {
dev.tags["pci_identifier"] = pciId
}
if m.config.AddSerialMeta {
serial, ret := rocm_smi.DeviceGetSerialNumber(device)
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Unable to get serial number for device at index", i, ":", rocm_smi.StatusStringNoError(ret))
} else {
dev.meta["serial"] = serial
}
}
// Add excluded metrics
dev.excludeMetrics = map[string]bool{}
for _, e := range m.config.ExcludeMetrics {
dev.excludeMetrics[e] = true
}
dev.index = i
m.devices = append(m.devices, dev)
}
// Set this flag only if everything is initialized properly, all required files exist, ...
m.init = true
return err
}
// Read collects all metrics belonging to the sample collector
// and sends them through the output channel to the collector manager
func (m *RocmSmiCollector) Read(interval time.Duration, output chan lp.CCMetric) {
// Create a sample metric
timestamp := time.Now()
for _, dev := range m.devices {
metrics, ret := rocm_smi.DeviceGetMetrics(dev.device)
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Unable to get metrics for device at index", dev.index, ":", rocm_smi.StatusStringNoError(ret))
continue
}
if !dev.excludeMetrics["rocm_gfx_util"] {
value := metrics.Average_gfx_activity
y, err := lp.New("rocm_gfx_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_umc_util"] {
value := metrics.Average_umc_activity
y, err := lp.New("rocm_umc_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_mm_util"] {
value := metrics.Average_mm_activity
y, err := lp.New("rocm_mm_util", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_avg_power"] {
value := metrics.Average_socket_power
y, err := lp.New("rocm_avg_power", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_mem"] {
value := metrics.Temperature_mem
y, err := lp.New("rocm_temp_mem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_hotspot"] {
value := metrics.Temperature_hotspot
y, err := lp.New("rocm_temp_hotspot", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_edge"] {
value := metrics.Temperature_edge
y, err := lp.New("rocm_temp_edge", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrgfx"] {
value := metrics.Temperature_vrgfx
y, err := lp.New("rocm_temp_vrgfx", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrsoc"] {
value := metrics.Temperature_vrsoc
y, err := lp.New("rocm_temp_vrsoc", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_vrmem"] {
value := metrics.Temperature_vrmem
y, err := lp.New("rocm_temp_vrmem", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_gfx_clock"] {
value := metrics.Average_gfxclk_frequency
y, err := lp.New("rocm_gfx_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_soc_clock"] {
value := metrics.Average_socclk_frequency
y, err := lp.New("rocm_soc_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_u_clock"] {
value := metrics.Average_uclk_frequency
y, err := lp.New("rocm_u_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v0_clock"] {
value := metrics.Average_vclk0_frequency
y, err := lp.New("rocm_v0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_v1_clock"] {
value := metrics.Average_vclk1_frequency
y, err := lp.New("rocm_v1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d0_clock"] {
value := metrics.Average_dclk0_frequency
y, err := lp.New("rocm_d0_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_d1_clock"] {
value := metrics.Average_dclk1_frequency
y, err := lp.New("rocm_d1_clock", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
output <- y
}
}
if !dev.excludeMetrics["rocm_temp_hbm"] {
for i := 0; i < rocm_smi.NUM_HBM_INSTANCES; i++ {
value := metrics.Temperature_hbm[i]
y, err := lp.New("rocm_temp_hbm", dev.tags, dev.meta, map[string]interface{}{"value": value}, timestamp)
if err == nil {
y.AddTag("stype", "device")
y.AddTag("stype-id", fmt.Sprintf("%d", i))
output <- y
}
}
}
}
}
// Close metric collector: close network connection, close files, close libraries, ...
// Called once by the collector manager
func (m *RocmSmiCollector) Close() {
// Unset flag
ret := rocm_smi.Shutdown()
if ret != rocm_smi.STATUS_SUCCESS {
cclog.ComponentError(m.name, "Failed to shutdown ROCm SMI library")
}
m.init = false
}
|
package session
import (
"go/internal/pkg/api/app/request"
"go/internal/pkg/response"
"net/http"
"github.com/gin-gonic/gin"
)
func (h *sessionHandler) CreateHandler(ctx *gin.Context) {
var req request.SessionRequest
// claimid := ctx.GetInt("userId")
claimname := ctx.GetString("userName")
claimemail := ctx.GetString("userEmail")
// req.UserID = claimid
req.Name = claimname
req.Email = claimemail
if err := ctx.ShouldBindJSON(&req); err != nil {
response.NewErrorResponse(ctx, http.StatusBadRequest, err, "failed to register session data")
return
}
resp, err := h.uc.CreateUseCase(req)
if err != nil {
response.NewErrorResponse(ctx, http.StatusUnprocessableEntity, err, "failed to register session data")
return
}
response.NewSuccessResponse(ctx, http.StatusOK, resp)
return
}
|
package connectors
import (
"io"
"fmt"
"errors"
"encoding/json"
"net/http"
log "github.com/sirupsen/logrus"
)
var (
// define base URL for google API
baseApiURL = "https://maps.googleapis.com/maps/api/place/details/json"
// define custom errors
ErrInvalidAPIResponse = errors.New("Received invalid API response")
ErrBusinessNotFound = errors.New("Cannot find API business entry")
ErrUnauthorized = errors.New("Received unauthorized response from API")
ErrInvalidJSONResponse = errors.New("Received invalid JSON response from google API")
ErrRequestLimitReached = errors.New("Reached request limit on API")
)
// function to generate query string for Google Place API
func GenerateQueryString(apiKey, placeId string) string {
fields := "formatted_address,name,permanently_closed,url,place_id,website,business_status,formatted_phone_number"
return fmt.Sprintf("place_id=%s&fields=%s&key=%s", placeId, fields, apiKey)
}
// function used to parse response from google place API
func ParseGoogleResponse(data io.ReadCloser) (GoogleAPIResponse, error) {
var response struct {
Result GoogleAPIResponse `json:"result"`
Status string `json:"status"`
}
if err := json.NewDecoder(data).Decode(&response); err != nil {
return response.Result, ErrInvalidJSONResponse
}
log.Debug(fmt.Sprintf("successfully extracted google response %+v", response))
return response.Result, validate.Struct(response.Result)
}
// function used to get data
func GetGoogleBusinessInfo(placeId string, apiKey string) (GoogleAPIResponse, error) {
log.Debug(fmt.Sprintf("making new request to Google API for ID '%s'", placeId))
queryString := GenerateQueryString(apiKey, placeId)
url := fmt.Sprintf("%s?%s", baseApiURL, queryString)
// createnew HTTP instance and set request headers
req, err := http.NewRequest("GET", url, nil)
if err != nil {
log.Error(fmt.Errorf("unable to generate new HTTP Request: %+v", err))
return GoogleAPIResponse{}, err
}
// set JSON as content type
req.Header.Set("Content-Type", "application/json")
// generate new HTTP client and execute request
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Error(fmt.Errorf("unable to execute HTTP request: %+v", err))
return GoogleAPIResponse{}, err
}
defer resp.Body.Close()
// handle response based on status code
switch resp.StatusCode {
case 200:
log.Debug(fmt.Sprintf("successfully retrieved business data for asset '%s'", placeId))
// parse response body and convert into struct
results, err := ParseGoogleResponse(resp.Body)
if err != nil {
log.Error(fmt.Sprintf("unable to parse JSON response: %+v", err))
return results, ErrInvalidAPIResponse
}
return results, nil
case 401:
log.Error(fmt.Sprintf("received unauthorized response from google API"))
return GoogleAPIResponse{}, ErrUnauthorized
case 404:
log.Error(fmt.Sprintf("cannot find API results for business ID %s", placeId))
return GoogleAPIResponse{}, ErrBusinessNotFound
case 429:
log.Error("reached request limit on API")
return GoogleAPIResponse{}, ErrRequestLimitReached
default:
log.Error(fmt.Errorf("received invalid response from google API with code %d", resp.StatusCode))
return GoogleAPIResponse{}, ErrInvalidAPIResponse
}
}
|
package blog
// Engagement represents social network engagement of the object
type Engagement struct {
// Total object shared counter
ShareCount int `bson:"-" json:"shareCount" graphql:"shareCount"`
}
|
/*
* Copyright (c) 2020. Ant Group. All rights reserved.
*
* SPDX-License-Identifier: Apache-2.0
*/
package nydussdk
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"time"
"github.com/pkg/errors"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/nydussdk/model"
"github.com/dragonflyoss/image-service/contrib/nydus-snapshotter/pkg/utils/retry"
)
const (
infoEndpoint = "/api/v1/daemon"
mountEndpoint = "/api/v1/mount"
metricEndpoint = "/api/v1/metrics"
defaultHttpClientTimeout = 30 * time.Second
contentType = "application/json"
)
type Interface interface {
CheckStatus() (model.DaemonInfo, error)
SharedMount(sharedMountPoint, bootstrap, daemonConfig string) error
Umount(sharedMountPoint string) error
GetFsMetric(sharedDaemon bool, sid string) (*model.FsMetric, error)
}
type NydusClient struct {
httpClient *http.Client
}
func NewNydusClient(sock string) (Interface, error) {
transport, err := buildTransport(sock)
if err != nil {
return nil, err
}
return &NydusClient{
httpClient: &http.Client{
Timeout: defaultHttpClientTimeout,
Transport: transport,
},
}, nil
}
func (c *NydusClient) CheckStatus() (model.DaemonInfo, error) {
resp, err := c.httpClient.Get(fmt.Sprintf("http://unix%s", infoEndpoint))
if err != nil {
return model.DaemonInfo{}, err
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return model.DaemonInfo{}, err
}
var info model.DaemonInfo
if err = json.Unmarshal(b, &info); err != nil {
return model.DaemonInfo{}, err
}
return info, nil
}
func (c *NydusClient) Umount(sharedMountPoint string) error {
requestURL := fmt.Sprintf("http://unix%s?mountpoint=%s", mountEndpoint, sharedMountPoint)
req, err := http.NewRequest(http.MethodDelete, requestURL, nil)
if err != nil {
return err
}
resp, err := c.httpClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return nil
}
return handleMountError(resp.Body)
}
func (c *NydusClient) GetFsMetric(sharedDaemon bool, sid string) (*model.FsMetric, error) {
var getStatURL string
if sharedDaemon {
getStatURL = fmt.Sprintf("http://unix%s?id=/%s/fs", metricEndpoint, sid)
} else {
getStatURL = fmt.Sprintf("http://unix%s", metricEndpoint)
}
req, err := http.NewRequest(http.MethodGet, getStatURL, nil)
if err != nil {
return nil, err
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return nil, err
}
var m model.FsMetric
if err = json.NewDecoder(resp.Body).Decode(&m); err != nil {
return nil, err
}
return &m, nil
}
func (c *NydusClient) SharedMount(sharedMountPoint, bootstrap, daemonConfig string) error {
requestURL := fmt.Sprintf("http://unix%s?mountpoint=%s", mountEndpoint, sharedMountPoint)
content, err := ioutil.ReadFile(daemonConfig)
if err != nil {
return errors.Wrapf(err, "failed to get content of daemon config %s", daemonConfig)
}
body, err := json.Marshal(model.NewMountRequest(bootstrap, string(content)))
if err != nil {
return errors.Wrap(err, "failed to create mount request")
}
resp, err := c.httpClient.Post(requestURL, contentType, bytes.NewBuffer(body))
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusNoContent {
return nil
}
return handleMountError(resp.Body)
}
func waitUntilSocketReady(sock string) error {
return retry.Do(func() error {
if _, err := os.Stat(sock); err != nil {
return err
}
return nil
},
retry.Attempts(3),
retry.LastErrorOnly(true),
retry.Delay(100*time.Millisecond))
}
func buildTransport(sock string) (http.RoundTripper, error) {
err := waitUntilSocketReady(sock)
if err != nil {
return nil, err
}
return &http.Transport{
// DisableKeepAlives: true,
MaxIdleConns: 10,
IdleConnTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DialContext: func(ctx context.Context, _, _ string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 5 * time.Second,
KeepAlive: 5 * time.Second,
}
return dialer.DialContext(ctx, "unix", sock)
},
}, nil
}
func handleMountError(r io.Reader) error {
b, err := ioutil.ReadAll(r)
if err != nil {
return err
}
var errMessage model.ErrorMessage
if err = json.Unmarshal(b, &errMessage); err != nil {
return err
}
return errors.New(errMessage.Message)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.