text
stringlengths 11
4.05M
|
|---|
package inspect
import (
"go/ast"
"fmt"
"github.com/sky0621/go-testcode-autogen/inspect/result"
)
type CommentInspector struct{}
func (i *CommentInspector) IsTarget(node ast.Node) bool {
switch node.(type) {
case *ast.Comment:
return true
}
return false
}
func (i *CommentInspector) Inspect(node ast.Node, aggregater *result.Aggregater) error {
cmt, ok := node.(*ast.Comment)
if !ok {
return fmt.Errorf("Not target Node: %#v", node)
}
// FIXME
fmt.Println("===== CommentInspector ===================================================================================")
fmt.Printf("Comment: %#v\n", cmt)
return nil
}
|
package job
import (
"bufio"
"bytes"
"compress/gzip"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
"common"
ds "poseidon/datastruct"
sj "github.com/bitly/go-simplejson"
"github.com/donnie4w/go-logger/logger"
"github.com/golang/protobuf/proto"
)
const TimeFormatMinute = "2006-01-02-15-04"
const TimeFormatHour = "2006-01-02-15"
type StrSlice []string
type LogInfo struct {
logs StrSlice
urls StrSlice
hourTime string
totalSize int64
startTime time.Time
}
type DocIdInfo struct {
Count int `json:"count"`
Errmsg string `json:"errmsg"`
Errno int `json:"errno"`
StartIndex int `json:"start_index"`
Time int `json:"time"`
}
type SLMap map[string] /*idc*/ LogInfo
type LogtoHdfsCollector struct {
sub string
id int
shortHostname string
flushMinute int // if now - t > flushMinute; then flush
gatherMinute int // gather logs, combined every 5 minutes
tickInterval int // 并不是每次tick都flush,tickInterval * tick 才flush一次
currentTick int // 配合tickInterval使用
lastFlushTime time.Time // 上次flush的时间,防止由于过多的flushDueSize导致长时间不能flush
maxMergeFileByte int64
writeDirs []string
hadoopRemoteDir string
hadoopRemoteTimeDirs []string
hadoopRemoteTimeDirTruncMinutes []int
hadoopRemoteFilePrefix string
hadoopRemoteFileSuffix string
hadoopCmd string
needZip bool
needUnzip bool
loc *time.Location
logMap map[time.Time]SLMap
poseidonMode bool // 只支持gz,不支持明文
docLines int
readBufSizeByte int
docIdDomain string
docIdBusiness string
}
func (p *LogtoHdfsCollector) Init(ctx *sj.Json, id int) error {
p.sub = ctx.Get("runtime").Get("sub").MustString("NIL")
p.id = id
var err error
p.loc, err = time.LoadLocation("Asia/Shanghai")
if err != nil {
return err
}
p.writeDirs = ctx.Get("Hdfs").Get("write_dirs").MustStringArray([]string{"/data1/ck/logto_hdfs"})
p.hadoopRemoteDir = ctx.Get("Hdfs").Get("hadoop_remote_dir").MustString("")
p.hadoopRemoteFilePrefix = ctx.Get("Hdfs").Get("hadoop_remote_file_prefix").MustString("")
p.hadoopRemoteFileSuffix = ctx.Get("Hdfs").Get("hadoop_remote_file_suffix").MustString("")
p.hadoopRemoteTimeDirs = ctx.Get("Hdfs").Get("hadoop_remote_time_dirs").MustStringArray()
p.hadoopRemoteTimeDirTruncMinutes = make([]int, len(p.hadoopRemoteTimeDirs))
for i, timeDir := range p.hadoopRemoteTimeDirs {
if len(timeDir) <= 0 {
return errors.New("hadoop_remote_time_dir must not be empty!")
}
p.hadoopRemoteTimeDirs[i], p.hadoopRemoteTimeDirTruncMinutes[i], err = p.transferTimeFormat(timeDir)
if err != nil {
return err
}
}
if len(p.hadoopRemoteTimeDirs) <= 0 {
return errors.New("there is no hadoop_remote_time_dirs!")
}
p.hadoopCmd = ctx.Get("Hdfs").Get("hadoop_cmd").MustString("/usr/bin/hadoop/software/hadoop/bin/hadoop")
p.flushMinute = ctx.Get("Hdfs").Get("flush_minute").MustInt(10)
p.gatherMinute = ctx.Get("Hdfs").Get("gather_minute").MustInt(5)
p.tickInterval = ctx.Get("Hdfs").Get("tick_interval").MustInt(10)
p.lastFlushTime = time.Now()
p.maxMergeFileByte = ctx.Get("Hdfs").Get("max_merge_file_size").MustInt64(268435456) // 256M. the block size of HDFS currently
for _, v := range p.writeDirs {
if err := os.MkdirAll(v, 0777); err != nil {
logger.ErrorSubf(p.sub, "writeDir err: %v, dir: %s", err, v)
return errors.New("mkdir failed " + v)
}
}
if _, err := os.Stat(p.hadoopCmd); err != nil && !os.IsExist(err) {
logger.ErrorSub(p.sub, "hadoopCmd err:", err)
return err
}
// between 1~60, like 2, 4, 5, 10, 15, 20, 30
if p.gatherMinute <= 0 || p.gatherMinute >= 60 || 60%p.gatherMinute != 0 {
return errors.New("gatherMinute invalid")
}
if p.flushMinute < p.gatherMinute {
return errors.New("flushMinute invalid")
}
if p.maxMergeFileByte < 1048576 || p.maxMergeFileByte > 10737418240 {
return errors.New("maxMergeFileByte should be 1048576-1073741824") // 1M~10G
}
p.logMap = make(map[time.Time]SLMap)
host, err := os.Hostname()
if err != nil {
return err
}
strArr := strings.Split(host, ".")
p.shortHostname = strArr[0]
p.needZip = ctx.Get("Hdfs").Get("need_zip").MustBool(false)
p.needUnzip = ctx.Get("Hdfs").Get("need_unzip").MustBool(false)
if p.needZip && p.needUnzip { // 不能同时为true
p.needZip = false
p.needUnzip = false
}
p.docLines = ctx.Get("Hdfs").Get("doc_lines").MustInt(128)
p.poseidonMode = ctx.Get("Hdfs").Get("poseidon_mode").MustBool(false)
p.readBufSizeByte = ctx.Get("Hdfs").Get("readbuf_size_byte").MustInt(1024 * 1024)
p.docIdDomain = ctx.Get("Hdfs").Get("docid_domain").MustString("127.0.0.1:39360")
p.docIdBusiness = ctx.Get("Hdfs").Get("docid_business").MustString("temp")
return nil
}
func (p *LogtoHdfsCollector) Collect(item Item) error {
// logger.DebugSub(p.sub, "collector get: ", item)
id := item.Id
var minute time.Time
var fileInfo os.FileInfo
var fileSize int64
var tmpLogInfo LogInfo
// 去重
hour, err := common.GetHourStrFromId(id)
if err != nil {
logger.ErrorSub(p.sub, "id "+id+"not contain valid timestr", err)
hour = ""
}
var path string
idc := item.Content
minutePtr, err := p.getMinute(item.RawMsg)
if err != nil {
goto ERROR
}
// zip or unzip
// 这里是预处理,poseidon模式需要输入数据是压缩的,可以在这里提前弄一下
// 如果zip和unzip都不需要,这一步什么都不做
item.RawMsg, err = p.HandleZip(item.RawMsg)
if err != nil {
goto ERROR
}
path = item.RawMsg
minute = *minutePtr
fileInfo, err = os.Stat(path)
if err != nil {
goto ERROR
}
fileSize = fileInfo.Size()
if fileSize <= 0 {
goto FINISH
}
if _, ok := p.logMap[minute]; !ok {
p.logMap[minute] = make(SLMap)
}
if _, ok := p.logMap[minute][idc]; !ok {
p.logMap[minute][idc] = LogInfo{startTime: time.Now(), hourTime: hour}
}
tmpLogInfo = p.logMap[minute][idc]
tmpLogInfo.logs = append(tmpLogInfo.logs, path)
tmpLogInfo.urls = append(tmpLogInfo.urls, id)
tmpLogInfo.totalSize += fileSize
p.logMap[minute][idc] = tmpLogInfo
if tmpLogInfo.totalSize >= p.maxMergeFileByte {
curTime := time.Now()
if curTime.Sub(tmpLogInfo.startTime) < time.Duration(1)*time.Second {
// sleep for 1 sec, in case file name conflict.
// merge后的文件名,时间精确到秒,因此不能在同一秒生成多个merge文件
time.Sleep(time.Duration(1) * time.Second)
}
if err := p.copyLogToHdfs(minute, idc, tmpLogInfo.logs, tmpLogInfo.urls); err != nil {
logger.ErrorSub(p.sub, p.id, "copyLogToHdfs fail with reason [", err, ", [", tmpLogInfo, "]")
}
logger.DebugSub(p.sub, p.id, "copyLogToHdfs due to size")
p.logMap[minute][idc] = LogInfo{startTime: curTime, hourTime: hour}
}
FINISH:
logger.DebugSub(p.sub, "collect finish", p.id, item)
return nil
ERROR:
logger.ErrorSub(p.sub, "collect fail", p.id, item, err)
return err
}
func (p *LogtoHdfsCollector) Destory() error {
logger.DebugSub(p.sub, p.id, "collector destroy")
return p.flush(true)
}
func (p *LogtoHdfsCollector) Tick() error {
p.currentTick++
if p.currentTick > p.tickInterval {
p.currentTick = 0
logger.DebugSubf(p.sub, "LogtoHdfsCollector.Tick %d this is a flush tick", p.id)
return p.flush(false)
}
logger.DebugSubf(p.sub, "LogtoHdfsCollector.Tick id: %d no flush tick: %d", p.id, p.currentTick)
now := time.Now()
if now.Sub(p.lastFlushTime).Minutes() > 10 {
logger.InfoSubf(p.sub, "LogtoHdfsCollector.Tick id: %d no flush tick: %d, but too long ago last flush: %v",
p.id, p.currentTick, p.lastFlushTime)
p.lastFlushTime = now
return p.flush(false)
}
return nil
}
func (p *LogtoHdfsCollector) getMinute(path string) (*time.Time, error) {
// new format: info.2016-01-08-13.gz
// new format: info.2016-01-08-13-01.gz
// 支持2种格式,分别精确到分钟和小时
// 返回值是Truncate过后的值,因此同一个gatherMinute区间的文件会归并到一块
var t time.Time
curYear := strconv.Itoa(time.Now().Year())
idx := strings.Index(path, curYear+"-")
if idx < 0 {
return nil, errors.New("path not contain yearStr")
}
subStr := path[idx:] // 2016-01-08-13-01.gz or 2016-01-08-13.gz
if len(subStr) >= len(TimeFormatMinute) {
minute, err := time.ParseInLocation(TimeFormatMinute, subStr[0:len(TimeFormatMinute)], p.loc) // get "2015-12-14-17-00"
if err == nil {
t = minute.Truncate(time.Duration(p.gatherMinute) * time.Minute)
return &t, nil
}
}
if len(subStr) >= len(TimeFormatHour) {
minute, err := time.ParseInLocation(TimeFormatHour, subStr[0:len(TimeFormatHour)], p.loc) // get "2015-12-14-17"
if err == nil {
t = minute.Truncate(time.Duration(p.gatherMinute) * time.Minute)
return &t, nil
}
}
return nil, errors.New("path timeformat error")
}
func (p *LogtoHdfsCollector) HandleZip(path string) (string, error) {
var newPath string
var cmdArr []string
isGz := strings.HasSuffix(path, ".gz")
if p.needUnzip && isGz {
cmdArr = append(cmdArr, "-d")
newPath = path[0 : len(path)-3]
} else if p.needZip && !isGz {
newPath = path + ".gz"
} else {
return path, nil
}
cmdArr = append(cmdArr, path)
c := exec.Command("/bin/gzip", cmdArr...)
if c == nil {
return "", errors.New("new zip/unzip cmd fail " + path)
}
err := c.Run()
return newPath, err
}
type TimeSlice []time.Time
func (p TimeSlice) Len() int { return len(p) }
func (p TimeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p TimeSlice) Less(i, j int) bool { return p[i].Before(p[j]) }
func (p *LogtoHdfsCollector) flush(flushAll bool) error {
logger.DebugSubf(p.sub, "LogtoHdfsCollector.flush %d, begin flush flushAll: %v", p.id, flushAll)
timeBegin := time.Now()
defer func() {
timeCost := (time.Since(timeBegin).Nanoseconds() / 1000000)
logger.DebugSubf(p.sub, "LogtoHdfsCollector.flush %d, flushAll: %v, cost: %d", p.id, flushAll, timeCost)
}()
timeThreshold := time.Duration(p.flushMinute) * time.Minute
// 最早的最先处理
times := []time.Time{}
for t, _ := range p.logMap {
times = append(times, t)
}
sort.Sort(TimeSlice(times))
for _, t := range times {
slMap := p.logMap[t]
logger.DebugSubf(p.sub, "LogtoHdfsCollector.flush %d len(logMap): %d", p.id, len(p.logMap))
if len(slMap) <= 0 {
delete(p.logMap, t)
continue
}
curTime := time.Now()
if !flushAll && curTime.Sub(t) < timeThreshold {
continue
}
for idc, logInfo := range slMap {
vec := logInfo.logs
if len(vec) <= 0 {
delete(p.logMap[t], idc)
continue
}
if time.Since(logInfo.startTime) < time.Duration(1)*time.Second {
// sleep for 1 sec, in case file nanme conflict
time.Sleep(time.Duration(1) * time.Second)
}
if err := p.copyLogToHdfs(t, idc, vec, logInfo.urls); err != nil {
logger.ErrorSub(p.sub, p.id, "copyLogToHdfs fail with reason [", err, "], [", logInfo, "]")
}
delete(p.logMap[t], idc)
}
}
if flushAll {
logger.InfoSub(p.sub, p.id, "flush_when_exit: [", p.logMap, "]")
}
logger.DebugSubf(p.sub, "LogtoHdfsCollector.flush %d, flush end flushAll: %v", p.id, flushAll)
return nil
}
// 将文件列表files组装好入hdfs,组装可能是poseidon模式
// 这时t是logMap的key,是truncate后的时间,比如2016-06-06-12-15,一个t对应的全部文件都入到hadoop中一个文件中
func (p *LogtoHdfsCollector) copyLogToHdfs(t time.Time, idc string, files []string, urls []string) error {
timeBegin := time.Now()
defer func() {
timeCost := (time.Since(timeBegin).Nanoseconds() / 1000000)
logger.DebugSubf(p.sub, "LogtoHdfsCollector.copyLogToHdfs %d, len(files): %d, cost: %d", p.id, len(files), timeCost)
}()
if len(files) <= 0 {
return nil
}
var err error
var localGzFile string
var localDocidGzFile string
var localGzFileSize int64
timeStr := t.Format(TimeFormatMinute)
subPath, compressRemotePath := p.getRemoteSubPath(t, timeStr)
logger.DebugSubf(p.sub, "LogtoHdfsCollector.copyLogToHdfs %d begin, len(files): %d, subPath: %s", p.id, len(files), subPath)
if p.poseidonMode {
localGzFile, localDocidGzFile, err = p.generateNewGzAndMetaFile(timeStr, idc, files, compressRemotePath)
if err != nil {
logger.ErrorSub(p.sub, p.id, "generate fail,", localGzFile, localDocidGzFile)
return err
}
} else {
localGzFile, err = p.generateNewGzFile(timeStr, idc, files)
if err != nil {
logger.ErrorSub(p.sub, p.id, "generate fail,", localGzFile)
return err
}
}
if err != nil {
return err
}
// 处理文件大小为0的情况
localGzFileSize, err = p.getSize(localGzFile)
if err != nil {
return err
}
if localGzFileSize <= 0 {
bakFileName := filepath.Dir(localGzFile) + "/bak." + filepath.Base(localGzFile)
if err := os.Rename(localGzFile, bakFileName); err != nil {
return err
}
logger.InfoSub(p.sub, bakFileName, "size = 0, ignore")
return nil
}
if p.poseidonMode {
if err = p.copySingleFileToHdfs(localDocidGzFile, p.hadoopRemoteDir+"/docid/"+subPath); err != nil {
logger.ErrorSub(p.sub, p.id, "copy", localDocidGzFile, "=>", p.hadoopRemoteDir+"/docid/"+subPath, "fail")
return err
}
}
// 执行拷贝
err = p.copySingleFileToHdfs(localGzFile, p.hadoopRemoteDir+"/"+subPath)
if err != nil {
logger.ErrorSub(p.sub, p.id, "copy", localGzFile, "=>", p.hadoopRemoteDir+"/"+subPath, "fail")
return err
}
logger.InfoSubf(p.sub, "%dth copy localGzFile[%s] (contains files{%v}) (contains urls{%v}) to hdfs[%s] success\n", p.id, localGzFile, files, urls, subPath)
return nil
}
func (p *LogtoHdfsCollector) getSize(path string) (int64, error) {
fileInfo, err := os.Stat(path)
if err != nil {
return 0, err
}
fileSize := fileInfo.Size()
return fileSize, nil
}
func (p *LogtoHdfsCollector) getRemoteSubPath(t time.Time, timeStr string) (string, string) {
dir := ""
for i := 0; i < len(p.hadoopRemoteTimeDirs); i++ {
timeDir := p.hadoopRemoteTimeDirs[i]
if p.hadoopRemoteTimeDirTruncMinutes[i] > 1 {
// 为了满足以下需求:
// 配置为 "Hi:15",期望的行为是,生成的子目录为 "小时+分钟(按每15分钟一个目录)"
// 即目录为1200/1215/1230/1245,目录下的文件按分钟来入
tmpT := t.Truncate(time.Duration(p.hadoopRemoteTimeDirTruncMinutes[i]) * time.Minute)
dir += tmpT.Format(timeDir) + "/"
} else {
dir += t.Format(timeDir) + "/"
}
}
curTimeStr := time.Now().Format("0102150405")
strId := strconv.Itoa(p.id)
subPath := dir + p.hadoopRemoteFilePrefix + strId + "_" + p.shortHostname + "_" + curTimeStr + "_" + timeStr + p.hadoopRemoteFileSuffix
compressPath := strId + "_" + p.shortHostname + "_" + curTimeStr + timeStr[11:13] + timeStr[14:]
return subPath, compressPath
}
func (p *LogtoHdfsCollector) generateNewGzFile(timeStr string, idc string, files []string) (string, error) {
writeDir := p.writeDirs[rand.Intn(len(p.writeDirs))]
dir := writeDir + "/" + timeStr[0:10] + "/" + timeStr[11:13] // timeStr is like 2015-12-15-12-25; get YYYY-mm-dd and HH
err := os.MkdirAll(dir, 0777) // like mkdir -p
if err != nil {
return "", err
}
var cmdArr []string
cmdArr = append(cmdArr, files...)
cmdArr = append(cmdArr)
cmd := exec.Command("/bin/cat", cmdArr...)
curTimeStr := time.Now().Format("20060102150405")
strId := strconv.Itoa(p.id)
newGzFileName := dir + "/" + strId + "_" + idc + "_" + curTimeStr + "_access.log." + timeStr + ".gz"
outfile, err := os.Create(newGzFileName)
if err != nil {
return "", err
}
defer outfile.Close()
cmd.Stdout = outfile // redirect the output to file
err = cmd.Start()
if err != nil {
return "", err
}
err = cmd.Wait()
return newGzFileName, nil
}
func (p *LogtoHdfsCollector) generateNewGzAndMetaFile(timeStr string, idc string, inputFiles []string, compressRemotePath string) (newGzFileName string, newMetaFileName string, err error) {
runtime.LockOSThread()
defer runtime.UnlockOSThread()
timeBegin := time.Now()
defer func() {
costTime := time.Since(timeBegin)
var fileSize int64
var speed int
if fileInfo, err := os.Stat(newGzFileName); err == nil {
if fileSize = fileInfo.Size(); fileSize > 0 {
speed = int((float64(fileSize) / costTime.Minutes()) / 1024 / 1024)
}
}
logger.DebugSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile %d, len(inputFiles): %d, cost: %v, gz: %s, docid: %s, size: %d M, speed: %d M/minute",
p.id, len(inputFiles), costTime, newGzFileName, newMetaFileName, fileSize/1024/1024, speed)
}()
// 平衡多块磁盘的负载
writeDir := p.writeDirs[rand.Intn(len(p.writeDirs))]
dir := writeDir + "/" + timeStr[0:10] + "/" + timeStr[11:13] // timeStr is like 2015-12-15-12-25; get YYYY-mm-dd and HH
err = os.MkdirAll(dir, 0777) // like mkdir -p
if err != nil {
return "", "", err
}
curTimeStr := time.Now().Format("20060102-150405")
strId := strconv.Itoa(p.id)
newGzFileName = dir + "/" + strId + "_" + idc + "_" + curTimeStr + "_access.log." + timeStr + ".gz"
newMetaFileName = dir + "/" + strId + "_" + idc + "_" + curTimeStr + "_access.docid." + timeStr + ".gz"
f, err := os.OpenFile(newGzFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return "", "", err
}
defer f.Close()
var docStr string
var docLineCount int
var docSlice []ds.DocGzMeta
for _, inputFile := range inputFiles {
inf, err := os.Open(inputFile)
if err != nil {
// return "", "", errors.New("open gz file fail: " + inputFile)
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile open gz file %s fail: %v",
inputFile, err)
continue
}
defer inf.Close()
reader := bufio.NewReaderSize(inf, p.readBufSizeByte)
if strings.HasSuffix(inputFile, ".gz") {
gz, err := gzip.NewReader(inf)
if err != nil {
// return "", "", errors.New("init gzip fail: " + inputFile)
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile init gzip fail: %v file: %s",
err, inputFile)
continue
}
defer gz.Close()
reader = bufio.NewReaderSize(gz, p.readBufSizeByte)
}
var line string
var read_err error
for true {
line, read_err = reader.ReadString('\n')
if read_err != nil && read_err != io.EOF {
// return "", "", read_err
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile read gz fail: %v file: %s",
read_err, inputFile)
break
}
if len(line) <= 0 {
goto CHECK_STATUS
}
docLineCount++
docStr += line
if docLineCount >= p.docLines {
// write do gz
offset, size, err := p.writeGzFile(f, docStr)
if err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile write gz fail: %v file: %s",
err, inputFile)
return "", "", err
}
docSlice = append(docSlice, ds.DocGzMeta{
Path: compressRemotePath,
Offset: uint32(offset),
Length: uint32(size),
})
docStr = ""
docLineCount = 0
}
CHECK_STATUS:
if read_err == io.EOF {
break
}
}
}
if docLineCount > 0 {
offset, size, err := p.writeGzFile(f, docStr)
if err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile write gz fail: %v", err)
return "", "", err
}
docSlice = append(docSlice, ds.DocGzMeta{
Path: compressRemotePath,
Offset: uint32(offset),
Length: uint32(size),
})
}
if err = p.writeMetaFile(docSlice, timeStr, newMetaFileName); err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.generateNewGzAndMetaFile write meta file fail: %v", err)
return "", "", err
}
return newGzFileName, newMetaFileName, nil
}
func (p *LogtoHdfsCollector) writeMetaFile(docSlice []ds.DocGzMeta, timeStr string, newMetaFileName string) error {
count := len(docSlice)
if count <= 0 {
return nil
}
timeSlice := strings.Split(timeStr, "-")
fmt.Print(timeSlice)
if len(timeSlice) != 5 || len(timeSlice[0]) != 4 || len(timeSlice[1]) != 2 || len(timeSlice[2]) != 2 {
return errors.New("timeSlice error")
}
url := fmt.Sprintf("http://%s/service/idgenerator?count=%d&business_name=%s&day=%s", p.docIdDomain, count, p.docIdBusiness, timeSlice[0]+timeSlice[1]+timeSlice[2])
logger.DebugSubf(p.sub, "LogtoHdfsCollector.writeMetaFile url: %v", url)
resp, err := http.Get(url)
if err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile http.Get fail url: %s", url)
return err
}
var docIdInfo DocIdInfo
ret, _ := ioutil.ReadAll(resp.Body)
if err = json.Unmarshal(ret, &docIdInfo); err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile json.Unmarshal err ret: %v, url: %v",
string(ret), url)
return err
}
if docIdInfo.Errno != 0 {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile bad docIdInfo.Errno: %v", docIdInfo.Errno)
return errors.New(docIdInfo.Errmsg)
}
if docIdInfo.Count != count {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile bad docIdInfo.Count: %d", docIdInfo.Count)
return errors.New("cannot get enough docid")
}
startId := docIdInfo.StartIndex
content := ""
for i := 0; i < count; i++ {
content += strconv.Itoa(startId+i) + "\t"
data, err := proto.Marshal(&docSlice[i])
if err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile proto.Marshal err: %v", docSlice[i])
return err
}
content += base64.StdEncoding.EncodeToString([]byte(data)) + "\n"
}
f, err := os.OpenFile(newMetaFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile os.OpenFile err, filename: %s", newMetaFileName)
return err
}
defer f.Close()
if _, _, err = p.writeGzFile(f, content); err != nil {
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.writeMetaFile p.writeGzFile err: %v", err)
return err
}
return nil
}
func (p *LogtoHdfsCollector) writeGzFile(f *os.File, s string) (int, int, error) {
var err error
var j int
var offset int64
if offset, err = f.Seek(0, 1); err != nil {
return 0, 0, err
}
var b bytes.Buffer
w := gzip.NewWriter(&b)
if w == nil {
return 0, 0, errors.New("gzip.NewWriter fail")
}
if _, err = w.Write([]byte(s)); err != nil {
return 0, 0, err
}
w.Close()
if j, err = f.Write(b.Bytes()); err != nil {
return 0, 0, err
}
return int(offset), j, nil
}
func (p *LogtoHdfsCollector) copySingleFileToHdfs(localPath string, remotePath string) error {
// 不成功就一直重试,这样hadoop客户端出问题就会停止不前,以免丢数据
retry := 0
var err error
// makedir -p
for true {
remoteDir := filepath.Dir(remotePath)
cmdArr := []string{p.hadoopCmd, "fs", "-mkdir", "-p", remoteDir}
c := exec.Command("bash", cmdArr...)
if c == nil {
return errors.New("new cmd fail")
}
// logger.DebugSubf(p.sub, "LogtoHdfsCollector.copySingleFileToHdfs mkdir, remoteDir: %s, cmd: %v", remoteDir, c)
err = c.Run()
if err == nil {
logger.InfoSub(p.sub, p.id, "mkdir", remoteDir, "success")
break
}
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.copySingleFileToHdfs mkdir err, remoteDir: %s, remotePath: %v, retry: %d, err: %v",
remoteDir, remotePath, retry, err)
retry++
time.Sleep(time.Second * time.Duration(3))
}
for true {
t1 := time.Now()
cmdArr := []string{p.hadoopCmd, "fs", "-copyFromLocal", localPath, remotePath}
c := exec.Command("bash", cmdArr...)
if c == nil {
return errors.New("new cmd fail")
}
err = c.Run()
//fmt.Printf("cmd: [%+v]\n", c)
if err == nil {
bakFileName := filepath.Dir(localPath) + "/bak." + filepath.Base(localPath)
if err := os.Rename(localPath, bakFileName); err != nil {
return err
}
t2 := time.Since(t1)
logger.InfoSub(p.sub, p.id, "copy", bakFileName, "to", remotePath, "cost:", t2, ", retry:", retry)
return nil
}
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.copySingleFileToHdfs err, localPath: %v, remotePath: %v, retry: %d, err: %v",
localPath, remotePath, retry, err)
retry++
time.Sleep(time.Second * time.Duration(3))
}
badFileName := filepath.Dir(localPath) + "/bad." + filepath.Base(localPath)
os.Rename(localPath, badFileName)
logger.ErrorSubf(p.sub, "LogtoHdfsCollector.copySingleFileToHdfs failed, err: %v, badFileName: %s", err, badFileName)
return err
}
func (p *LogtoHdfsCollector) transferTimeFormat(phpStyleFormat string) (string, int, error) {
var goStyleFormat string
var truncMinute int
var err error
arr := strings.Split(phpStyleFormat, ":")
if len(arr) >= 2 {
truncMinute, err = strconv.Atoi(arr[1])
if err != nil {
return "", 0, err
}
}
goStyleFormat = strings.Replace(arr[0], "Y", "2006", -1)
goStyleFormat = strings.Replace(goStyleFormat, "m", "01", -1)
goStyleFormat = strings.Replace(goStyleFormat, "d", "02", -1)
goStyleFormat = strings.Replace(goStyleFormat, "H", "15", -1)
goStyleFormat = strings.Replace(goStyleFormat, "i", "04", -1)
goStyleFormat = strings.Replace(goStyleFormat, "s", "05", -1)
return goStyleFormat, truncMinute, nil
}
|
// Copyright 2017 Walter Schulze
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package intern
import (
"fmt"
"io"
"github.com/katydid/katydid/parser"
"github.com/katydid/katydid/relapse/ast"
)
//Interpret interprets the grammar given the parser and returns whether the parser is valid given the grammar.
//Interpret uses derivatives and simplification to recusively derive the resulting grammar.
//This resulting grammar's nullability then represents the result of the function.
//This implementation does not handle immediate recursion, see the HasRecursion function.
func Interpret(g *ast.Grammar, record bool, parser parser.Interface) (bool, error) {
c := NewConstructor()
if record {
c = NewConstructorOptimizedForRecords()
}
main, err := c.AddGrammar(g)
if err != nil {
return false, err
}
finals, err := deriv(c, []*Pattern{main}, parser)
if err != nil {
return false, err
}
return finals[0].nullable, nil
}
func escapable(patterns []*Pattern) bool {
for _, p := range patterns {
if isZAny(p) {
continue
}
if isNotZAny(p) {
continue
}
return true
}
return false
}
func deriv(c Construct, patterns []*Pattern, tree parser.Interface) ([]*Pattern, error) {
var resPatterns []*Pattern = patterns
for {
if !escapable(resPatterns) {
return resPatterns, nil
}
if err := tree.Next(); err != nil {
if err == io.EOF {
break
} else {
return nil, err
}
}
ifs := DeriveCalls(c, resPatterns)
childPatterns := make([]*Pattern, len(ifs))
for i, ifExpr := range ifs {
c, err := ifExpr.eval(tree)
if err != nil {
return nil, err
}
childPatterns[i] = c
}
var err error
if tree.IsLeaf() {
//do nothing
} else {
tree.Down()
z := Zip(childPatterns)
z.Patterns, err = deriv(c, z.Patterns, tree)
if err != nil {
return nil, err
}
childPatterns = z.Unzip()
tree.Up()
}
nulls := nullables(childPatterns)
resPatterns, err = DeriveReturns(c, resPatterns, nulls)
if err != nil {
return nil, err
}
}
return resPatterns, nil
}
func DeriveCalls(construct Construct, patterns []*Pattern) []*IfExpr {
res := []*IfExpr{}
for _, pattern := range patterns {
cs := derivCall(construct, pattern)
res = append(res, cs...)
}
return res
}
func derivCall(c Construct, p *Pattern) []*IfExpr {
switch p.Type {
case Empty:
return []*IfExpr{}
case ZAny:
return []*IfExpr{}
case Node:
return []*IfExpr{{p.Func, p.Patterns[0], c.NewNotZAny()}}
case Concat:
res := []*IfExpr{}
for i := range p.Patterns {
ps := derivCall(c, p.Patterns[i])
res = append(res, ps...)
if !p.Patterns[i].Nullable() {
break
}
}
return res
case Or:
return derivCallAll(c, p.Patterns)
case And:
return derivCallAll(c, p.Patterns)
case Interleave:
return derivCallAll(c, p.Patterns)
case ZeroOrMore:
return derivCall(c, p.Patterns[0])
case Reference:
return derivCall(c, c.Deref(p.Ref))
case Not:
return derivCall(c, p.Patterns[0])
case Contains:
return derivCall(c, p.Patterns[0])
case Optional:
return derivCall(c, p.Patterns[0])
}
panic(fmt.Sprintf("unknown pattern typ %d", p.Type))
}
func derivCallAll(c Construct, ps []*Pattern) []*IfExpr {
res := []*IfExpr{}
for i := range ps {
pss := derivCall(c, ps[i])
res = append(res, pss...)
}
return res
}
func DeriveReturns(c Construct, originals []*Pattern, evaluated []bool) ([]*Pattern, error) {
res := make([]*Pattern, len(originals))
rest := evaluated
var err error
for i, original := range originals {
res[i], rest, err = derivReturn(c, original, rest)
if err != nil {
return nil, err
}
}
return res, nil
}
func derivReturn(c Construct, p *Pattern, nulls []bool) (*Pattern, []bool, error) {
switch p.Type {
case Empty:
return c.NewNotZAny(), nulls, nil
case ZAny:
return c.NewZAny(), nulls, nil
case Node:
if nulls[0] {
return c.NewEmpty(), nulls[1:], nil
}
return c.NewNotZAny(), nulls[1:], nil
case Concat:
rest := nulls
orPatterns := make([]*Pattern, 0, len(p.Patterns))
var err error
for i := range p.Patterns {
var ret *Pattern
ret, rest, err = derivReturn(c, p.Patterns[i], rest)
if err != nil {
return nil, nil, err
}
concatPat, err := c.NewConcat(append([]*Pattern{ret}, p.Patterns[i+1:]...))
if err != nil {
return nil, nil, err
}
orPatterns = append(orPatterns, concatPat)
if !p.Patterns[i].nullable {
break
}
}
o, err := c.NewOr(orPatterns)
return o, rest, err
case Or:
rest := nulls
orPatterns := make([]*Pattern, len(p.Patterns))
var err error
for i := range p.Patterns {
orPatterns[i], rest, err = derivReturn(c, p.Patterns[i], rest)
if err != nil {
return nil, nil, err
}
}
o, err := c.NewOr(orPatterns)
return o, rest, err
case And:
rest := nulls
andPatterns := make([]*Pattern, len(p.Patterns))
var err error
for i := range p.Patterns {
andPatterns[i], rest, err = derivReturn(c, p.Patterns[i], rest)
if err != nil {
return nil, nil, err
}
}
a, err := c.NewAnd(andPatterns)
return a, rest, err
case Interleave:
rest := nulls
orPatterns := make([]*Pattern, len(p.Patterns))
var err error
for i := range p.Patterns {
interleaves := make([]*Pattern, len(p.Patterns))
copy(interleaves, p.Patterns)
interleaves[i], rest, err = derivReturn(c, p.Patterns[i], rest)
if err != nil {
return nil, nil, err
}
orPatterns[i], err = c.NewInterleave(interleaves)
if err != nil {
return nil, nil, err
}
}
o, err := c.NewOr(orPatterns)
return o, rest, err
case ZeroOrMore:
pp, rest, err := derivReturn(c, p.Patterns[0], nulls)
if err != nil {
return nil, nil, err
}
ppp, err := c.NewConcat([]*Pattern{pp, p})
return ppp, rest, err
case Reference:
return derivReturn(c, c.Deref(p.Ref), nulls)
case Not:
pp, rest, err := derivReturn(c, p.Patterns[0], nulls)
if err != nil {
return nil, nil, err
}
ppp, err := c.NewNot(pp)
return ppp, rest, err
case Contains:
orPatterns := make([]*Pattern, 0, 3)
orPatterns = append(orPatterns, p)
ret, rest, err := derivReturn(c, p.Patterns[0], nulls)
if err != nil {
return nil, nil, err
}
cp, err := c.NewConcat([]*Pattern{ret, c.NewZAny()})
if err != nil {
return nil, nil, err
}
orPatterns = append(orPatterns, cp)
if p.Patterns[0].nullable {
orPatterns = append(orPatterns, c.NewZAny())
}
o, err := c.NewOr(orPatterns)
return o, rest, err
case Optional:
return derivReturn(c, p.Patterns[0], nulls)
}
panic(fmt.Sprintf("unknown pattern typ %d", p.Type))
}
|
package assigns
var (
// invkID
assignments = make(map[string]assign)
)
type assign struct {
id string // ID of the assignment
holder chan interface{} // channel to pass result
}
|
package gpsd
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"io"
"net"
"sync"
"time"
)
type NMEAMode int
const (
ModeUnknown NMEAMode = iota
ModeNoFix
Mode2D
Mode3D
)
var ErrUnsupportedProtocolVersion = errors.New("unsupported protocol version")
// Positioner implementations provide geographic positioning data.
//
// This is particularly useful for testing if an object returned by Next can be used to determine the device position.
type Positioner interface {
Position() Position
HasFix() bool
}
// Position holds geographic positioning data.
type Position struct {
Lat, Lon float64 // Latitude/longitude in degrees. +/- signifies north/south.
Alt float64 // Altitude in meters.
Track float64 // Course over ground, degrees from true north.
Speed float64 // Speed over ground, meters per second.
Time time.Time // Time as reported by the device.
}
// Conn represents a socket connection to an GPSd daemon.
type Conn struct {
Version Version
mu sync.Mutex
tcpConn net.Conn
rd *bufio.Reader
watchEnabled bool
closed bool
}
// Dial establishes a socket connection to the GPSd daemon.
func Dial(addr string) (*Conn, error) {
tcpConn, err := net.DialTimeout("tcp", addr, 30*time.Second)
if err != nil {
return nil, err
}
c := &Conn{
tcpConn: tcpConn,
rd: bufio.NewReader(tcpConn),
}
err = json.NewDecoder(c.rd).Decode(&c.Version)
if err != nil || c.Version.Release == "" {
tcpConn.Close()
return nil, errors.New("unexpected server response")
}
if c.Version.ProtoMajor < 3 {
tcpConn.Close()
return nil, ErrUnsupportedProtocolVersion
}
return c, nil
}
// Watch enables or disables the watcher mode.
//
// In watcher mode, GPS reports are dumped as TPV and SKY objects. These objects are available through the Next method.
func (c *Conn) Watch(enable bool) bool {
c.mu.Lock()
defer c.mu.Unlock()
if c.closed {
return false
}
if enable == c.watchEnabled {
return enable
}
c.tcpConn.SetDeadline(time.Now().Add(30 * time.Second))
defer c.tcpConn.SetDeadline(time.Time{})
param, _ := json.Marshal(
map[string]interface{}{
"class": "WATCH",
"enable": enable,
"json": true,
})
c.send("?WATCH=%s", param)
for {
obj, err := c.next()
if err != nil {
return false
}
if watch, ok := obj.(watch); ok {
c.watchEnabled = watch.Enable
break
}
}
return c.watchEnabled
}
// Close closes the GPSd daemon connection.
func (c *Conn) Close() error {
c.Watch(false)
c.closed = true
return c.tcpConn.Close()
}
// Next returns the next object sent from the daemon, or an error.
//
// The empty interface returned can be any of the following types:
// * Sky: A Sky object reports a sky view of the GPS satellite positions.
// * TPV: A TPV object is a time-position-velocity report.
func (c *Conn) Next() (interface{}, error) {
c.mu.Lock()
defer c.mu.Unlock()
for {
obj, err := c.next()
if err != nil {
return nil, err
}
switch obj.(type) {
case TPV, Sky:
return obj, nil
default:
// Ignore other objects for now.
}
}
}
func (c *Conn) next() (interface{}, error) {
line, err := c.rd.ReadBytes('\n')
if err != nil {
return nil, err
}
return parseJSONObject(line)
}
var (
ErrTimeout = errors.New("timeout")
ErrWatchModeEnabled = errors.New("operation not available while in watch mode")
)
// NextPos returns the next reported position.
func (c *Conn) NextPos() (Position, error) {
return c.NextPosTimeout(0)
}
// NextPosTimeout returns the next reported position, or an empty position on timeout.
func (c *Conn) NextPosTimeout(timeout time.Duration) (Position, error) {
var deadline time.Time
if timeout > 0 {
deadline = time.Now().Add(timeout)
c.tcpConn.SetDeadline(deadline)
defer c.tcpConn.SetDeadline(time.Time{})
}
for {
obj, err := c.Next()
var netErr net.Error
if ok := errors.As(err, &netErr); ok && netErr.Timeout() {
return Position{}, ErrTimeout
} else if err != nil {
return Position{}, err
}
if pos, ok := obj.(Positioner); ok && pos.HasFix() {
return pos.Position(), nil
}
if !deadline.IsZero() && time.Now().After(deadline) {
return Position{}, ErrTimeout
}
}
}
// Devices returns a list of all devices GPSd is aware of.
//
// ErrWatchModeEnabled will be returned if the connection is in watch mode.
// A nil-slice will be returned if the connection has been closed.
func (c *Conn) Devices() ([]Device, error) {
if c.closed {
return nil, nil
} else if c.watchEnabled {
return nil, ErrWatchModeEnabled
}
c.mu.Lock()
defer c.mu.Unlock()
c.send("?DEVICES;")
for {
obj, err := c.next()
if err != nil {
return nil, errUnexpected(err)
}
if devs, ok := obj.([]Device); ok {
return devs, nil
}
}
}
func (c *Conn) send(s string, params ...interface{}) error {
_, err := fmt.Fprintf(c.tcpConn, s, params...)
return errUnexpected(err)
}
func errUnexpected(err error) error {
if errors.Is(err, io.EOF) {
err = io.ErrUnexpectedEOF
}
return err
}
|
package spellbook
import (
"cloud.google.com/go/datastore"
"context"
"decodica.com/flamel"
"errors"
"fmt"
"github.com/jinzhu/gorm"
"google.golang.org/appengine/log"
"net/http"
"strconv"
"strings"
)
type ReadHandler interface {
HandleGet(context context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse
}
type WriteHandler interface {
HandlePost(context context.Context, out *flamel.ResponseOutput) flamel.HttpResponse
HandlePut(context context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse
HandleDelete(context context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse
}
type ListHandler interface {
HandleList(context context.Context, out *flamel.ResponseOutput) flamel.HttpResponse
HandlePropertyValues(context context.Context, out *flamel.ResponseOutput, property string) flamel.HttpResponse
}
type RestHandler interface {
ReadHandler
WriteHandler
ListHandler
}
type BaseRestHandler struct {
Manager Manager
}
// Builds the paging options, ordering and standard inputs of a given request
func (handler BaseRestHandler) buildOptions(ctx context.Context, out *flamel.ResponseOutput, opts *ListOptions) (*ListOptions, error) {
// build paging
opts.Size = 20
opts.Page = 0
ins := flamel.InputsFromContext(ctx)
if pin, ok := ins["page"]; ok {
if num, err := strconv.Atoi(pin.Value()); err == nil {
if num > 0 {
opts.Page = num
}
} else {
msg := fmt.Sprintf("invalid page value : %v. page must be an integer", pin)
return nil, errors.New(msg)
}
}
if sin, ok := ins["results"]; ok {
if num, err := strconv.Atoi(sin.Value()); err == nil {
if num > 0 {
opts.Size = num
}
} else {
msg := fmt.Sprintf("invalid result size value : %v. results must be an integer", sin)
return nil, errors.New(msg)
}
}
// order is not mandatory
if oin, ok := ins["order"]; ok {
oins := oin.Value()
// descendig has the format "-fieldname"
opts.Descending = oins[:1] == "-"
if opts.Descending {
opts.Order = oins[1:]
} else {
opts.Order = oins
}
}
// filter is not mandatory
if fin, ok := ins["filter"]; ok {
finv := fin.Value()
filters := strings.Split(finv, "^")
opts.Filters = make([]Filter, len(filters), cap(filters))
for i, filter := range filters {
farray := strings.Split(filter, "=")
if len(farray) > 1 {
opts.Filters[i] = Filter{farray[0], farray[1]}
}
}
}
return opts, nil
}
// REST Method handlers
func (handler BaseRestHandler) HandleGet(ctx context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse {
renderer := flamel.JSONRenderer{}
out.Renderer = &renderer
resource, err := handler.Manager.FromId(ctx, key)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
renderer.Data = resource
return flamel.HttpResponse{Status: http.StatusOK}
}
// Called on GET requests.
// This handler is called when the available values of one property of a resource are requested
// Returns a list of the values that the requested property can assume
func (handler BaseRestHandler) HandlePropertyValues(ctx context.Context, out *flamel.ResponseOutput, prop string) flamel.HttpResponse {
opts := &ListOptions{}
opts.Property = prop
opts, err := handler.buildOptions(ctx, out, opts)
if err != nil {
return flamel.HttpResponse{Status: http.StatusBadRequest}
}
results, err := handler.Manager.ListOfProperties(ctx, *opts)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
// output
l := len(results)
count := opts.Size
if l < opts.Size {
count = l
}
renderer := flamel.JSONRenderer{}
renderer.Data = ListResponse{results[:count], l > opts.Size}
out.Renderer = &renderer
return flamel.HttpResponse{Status: http.StatusOK}
}
// Called on GET requests
// This handler is called when a list of resources is requested.
// Returns a paged result
func (handler BaseRestHandler) HandleList(ctx context.Context, out *flamel.ResponseOutput) flamel.HttpResponse {
opts := &ListOptions{}
opts, err := handler.buildOptions(ctx, out, opts)
if err != nil {
return flamel.HttpResponse{Status: http.StatusBadRequest}
}
results, err := handler.Manager.ListOf(ctx, *opts)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
// output
l := len(results)
count := opts.Size
if l < opts.Size {
count = l
}
var renderer flamel.Renderer
// retrieve the negotiated method
ins := flamel.InputsFromContext(ctx)
accept := ins[flamel.KeyNegotiatedContent].Value()
if accept == "text/csv" {
r := &flamel.DownloadRenderer{}
csv, err := Resources(results).ToCSV()
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
r.Data = []byte(csv)
renderer = r
} else {
jrenderer := flamel.JSONRenderer{}
jrenderer.Data = ListResponse{results[:count], l > opts.Size}
renderer = &jrenderer
}
out.Renderer = renderer
return flamel.HttpResponse{Status: http.StatusOK}
}
// handles a POST request, ensuring the creation of the resource.
func (handler BaseRestHandler) HandlePost(ctx context.Context, out *flamel.ResponseOutput) flamel.HttpResponse {
renderer := flamel.JSONRenderer{}
out.Renderer = &renderer
resource, err := handler.Manager.NewResource(ctx)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
errs := Errors{}
// get the content data
ins := flamel.InputsFromContext(ctx)
j, ok := ins[flamel.KeyRequestJSON]
if !ok {
return flamel.HttpResponse{Status: http.StatusBadRequest}
}
err = resource.FromRepresentation(RepresentationTypeJSON, []byte(j.Value()))
if err != nil {
msg := fmt.Sprintf("bad json: %s", err.Error())
errs.AddError("", errors.New(msg))
log.Errorf(ctx, msg)
renderer.Data = errs
return flamel.HttpResponse{Status: http.StatusBadRequest}
}
if err = handler.Manager.Create(ctx, resource, []byte(j.Value())); err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
renderer.Data = resource
return flamel.HttpResponse{Status: http.StatusCreated}
}
// Handles put requests, ensuring the update of the requested resource
func (handler BaseRestHandler) HandlePut(ctx context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse {
renderer := flamel.JSONRenderer{}
out.Renderer = &renderer
ins := flamel.InputsFromContext(ctx)
j, ok := ins[flamel.KeyRequestJSON]
if !ok {
return flamel.HttpResponse{Status: http.StatusBadRequest}
}
resource, err := handler.Manager.FromId(ctx, key)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
if err = handler.Manager.Update(ctx, resource, []byte(j.Value())); err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
renderer.Data = resource
return flamel.HttpResponse{Status: http.StatusOK}
}
// Handles DELETE requests over a Resource type
func (handler BaseRestHandler) HandleDelete(ctx context.Context, key string, out *flamel.ResponseOutput) flamel.HttpResponse {
renderer := flamel.JSONRenderer{}
out.Renderer = &renderer
resource, err := handler.Manager.FromId(ctx, key)
if err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
if err = handler.Manager.Delete(ctx, resource); err != nil {
return handler.ErrorToStatus(ctx, err, out)
}
return flamel.HttpResponse{Status: http.StatusOK}
}
// Converts an error to its equivalent HTTP representation
func (handler BaseRestHandler) ErrorToStatus(ctx context.Context, err error, out *flamel.ResponseOutput) flamel.HttpResponse {
log.Errorf(ctx, "%s", err.Error())
switch err.(type) {
case UnsupportedError:
return flamel.HttpResponse{Status: http.StatusMethodNotAllowed}
case FieldError:
renderer := flamel.JSONRenderer{}
renderer.Data = struct {
Field string
Error string
}{
err.(FieldError).field,
err.(FieldError).error.Error(),
}
out.Renderer = &renderer
return flamel.HttpResponse{Status: http.StatusBadRequest}
case PermissionError:
renderer := flamel.JSONRenderer{}
renderer.Data = struct {
Error string
}{
err.(PermissionError).Error(),
}
out.Renderer = &renderer
return flamel.HttpResponse{Status: http.StatusForbidden}
default:
if err == datastore.ErrNoSuchEntity {
return flamel.HttpResponse{Status: http.StatusNotFound}
}
if err == gorm.ErrRecordNotFound {
return flamel.HttpResponse{Status: http.StatusNotFound}
}
return flamel.HttpResponse{Status: http.StatusInternalServerError}
}
}
|
package rbt
import (
"fmt"
)
type node struct {
color color
key interface{}
value interface{}
children [2]*node
}
var fakeBlackNode = &node{color: black}
func newNode(key, value interface{}) *node {
return &node{key: key, value: value}
}
func (n *node) insert(key, value interface{}, comparer Comparer) *node {
// если не найден узел, то возвращаем новый
if n == nil {
return newNode(key, value)
}
var offset offset
switch comparer(n.key, key) {
case IsGreater:
offset = left
case IsLesser:
offset = right
case AreEqual:
// либо заменяем value в случае set(map),
// либо добавляем в список в случае multiset(multimap)
return n
}
n.children[offset] = n.children[offset].insert(key, value, comparer)
return n.fixDoubleRedViolation(offset)
}
func (n *node) find(key interface{}, comparer Comparer) (interface{}, bool) {
// если не найден узел, то возвращаем новый
if n == nil {
return nil, false
}
var offset offset
switch comparer(n.key, key) {
case IsGreater:
offset = left
case IsLesser:
offset = right
case AreEqual:
return n.value, true
}
return n.children[offset].find(key, comparer)
}
func (n *node) delete(key interface{}, comparer Comparer) *node {
if n == nil {
return nil
}
var offset offset
switch comparer(n.key, key) {
case IsGreater:
offset = left
case IsLesser:
offset = right
case AreEqual:
return n.splice(comparer)
}
n.children[offset] = n.children[offset].delete(key, comparer)
return n.fixDoubleBlackViolation(offset)
}
// String ...
func (n *node) String() string {
if n == nil {
return ""
}
s := ""
s += n.children[left].String()
s += fmt.Sprintf("%v:%s", n.key, n.color)
s += n.children[right].String()
return "(" + s + ")"
}
func (n *node) splice(comparer Comparer) *node {
if n.children[left] == nil && n.children[right] == nil {
if n.red() {
return nil
}
return fakeBlackNode
}
if n.children[right] == nil {
n.children[left].color = black
return n.children[left]
}
if n.children[left] == nil {
n.children[right].color = black
return n.children[right]
}
tempNode := n.children[left].findMax()
n.key = tempNode.key
n.value = tempNode.value
n.children[left] = n.children[left].delete(tempNode.key, comparer)
return n.fixDoubleBlackViolation(left)
}
// findMax ...
func (n *node) findMax() *node {
if n.children[right] != nil {
n = n.children[right].findMax()
}
return n
}
func (n *node) fixDoubleRedViolation(offset offset) *node {
return n.
case1(offset).
case2(offset).
case3(offset)
}
func (n *node) fixDoubleBlackViolation(offset offset) *node {
return n.
caseA(offset).
caseB(offset).
caseC(offset).
caseD(offset)
}
func (n *node) red() bool {
return n != nil && n.color == red
}
func (n *node) black() bool {
return n != nil && n.color == black
}
func (n *node) blackToken(offset offset) bool {
return n.children[offset] != nil &&
(n.children[offset] == fakeBlackNode ||
n.children[offset].color == doubleBlack)
}
func (n *node) rotate(offset offset) *node {
root := n.children[offset.other()]
n.children[offset.other()] = root.children[offset]
root.children[offset] = n
return root
}
func (n *node) case1(offset offset) *node {
if n.children[offset].red() && n.children[offset.other()].red() {
// fmt.Println("Case 1 (Both children are red): recolour")
// fmt.Println("- src", n)
n.color = red
n.children[offset].color = black
n.children[offset.other()].color = black
// fmt.Println("- dst", n)
}
return n
}
func (n *node) case2(offset offset) *node {
if n.children[offset].red() &&
n.children[offset].children[offset.other()].red() {
// fmt.Println("Case 2 (The parent has an other child and are red): rotate", offset, "around child")
// fmt.Println("- src", n)
n.children[offset] = n.children[offset].rotate(offset)
// fmt.Println("- dst", n)
}
return n
}
func (n *node) case3(offset offset) *node {
if n.children[offset].red() &&
n.children[offset].children[offset].red() {
// fmt.Println("Case 3 (The parent has a same child and are red): rotate", offset.other(), "around parent")
// fmt.Println("- src", n)
n = n.rotate(offset.other())
// fmt.Println("- dst", n)
// fmt.Println("and recolour", n)
// fmt.Println("- src", n)
n.color = black
n.children[offset.other()].color = red
// fmt.Println("- dst", n)
}
return n
}
func (n *node) caseA(offset offset) *node {
if n.blackToken(offset) &&
n.children[offset.other()].black() &&
n.children[offset.other()].children[offset.other()].red() {
// fmt.Println("Case A (The sibling is black, but hi has other red child): rotate", offset, "around root")
// fmt.Println("- src", n)
rootColor := n.color
n = n.rotate(offset)
// fmt.Println("- dst", n)
// fmt.Println("and recolour")
// fmt.Println("- src", n)
n.color = rootColor
n.children[offset].color, n.children[offset.other()].color = black, black
if n.children[offset].children[offset] == fakeBlackNode {
n.children[offset].children[offset] = nil
} else {
n.children[offset].children[offset].color = black
}
// fmt.Println("- dst", n)
}
return n
}
func (n *node) caseB(offset offset) *node {
if n.blackToken(offset) &&
n.children[offset.other()].black() &&
n.children[offset.other()].children[offset].red() {
// fmt.Println("Case B (The sibling is black, but he has same red children): rotate", offset, "around sibling")
// fmt.Println("- src", n)
n.children[offset.other()] = n.children[offset.other()].rotate(offset)
// fmt.Println("- dst", n)
// fmt.Println("and recolour", n)
// fmt.Println("- src", n)
n.children[offset.other()].color = black
n.children[offset.other()].children[offset.other()].color = red
// fmt.Println("- dst", n)
return n.caseA(offset)
}
return n
}
func (n *node) caseC(offset offset) *node {
if n.blackToken(offset) &&
n.children[offset.other()].black() {
// fmt.Println("Case C (The sibling is black): recolour")
// fmt.Println("- src", n)
n.color.increment()
n.children[offset.other()].color = red
if n.children[offset] == fakeBlackNode {
n.children[offset] = nil
} else {
n.children[offset].color = black
}
// fmt.Println("- dst", n)
}
return n
}
func (n *node) caseD(offset offset) *node {
if n.blackToken(offset) &&
n.children[offset.other()].red() {
// fmt.Println("Case D (The sibling is red): rotate", offset, "around root")
// fmt.Println("- src", n)
n = n.rotate(offset)
// fmt.Println("- dst", n)
// fmt.Println("and recolour")
// fmt.Println("- src", n)
n.color = black
n.children[offset].color = red
// fmt.Println("- dst", n)
n.children[offset] = n.children[offset].fixDoubleBlackViolation(offset)
}
return n
}
// property1 - every node has a color that is either red or black
func (n *node) property1() {
if n == nil {
return
}
n.children[left].property1()
if !(n.color == red || n.color == black) {
panic("Invalid property 1")
}
n.children[right].property1()
}
// property2 - Every leaf is black
func (n *node) property2() {
if n == nil {
if n.red() {
panic("Invalid property 2")
}
return
}
n.children[left].property1()
n.children[right].property1()
}
// property3 - Every leaf is black
func (n *node) property3() {
if n == nil {
return
}
n.children[left].property1()
n.children[right].property1()
if n.red() && (n.children[left].red() || n.children[right].red()) {
panic("Invalid property 3")
}
}
// property4 - Every path from a given node down to any descendant leaf contains the same number of black nodes
func (n *node) property4() {
if n.blackHeight() == 0 {
panic("Invalid property 4")
}
}
// property5 - The root of the tree is black
func (n *node) property5() {
if n.red() {
panic("Invalid property 5")
}
}
func (n *node) blackHeight() int {
if n == nil {
return 1
}
leftBlackHeight := n.children[left].blackHeight()
if leftBlackHeight == 0 {
return leftBlackHeight
}
rightBlackHeight := n.children[right].blackHeight()
if rightBlackHeight == 0 {
return rightBlackHeight
}
if leftBlackHeight != rightBlackHeight {
return 0
} else {
if n.black() {
leftBlackHeight++
}
return leftBlackHeight
}
}
|
package parser
import (
"strings"
)
type TupleMap struct{
Word string
Url string
}
type ParserContent interface {
Parse(content string,url string) error
Close()
}
type whileSpaceSeparatorParser struct{
producer chan TupleMap
}
//we can create seperate parse based on anchor tags
func NewWhileSpaceSeparatorParser(producer chan TupleMap) *whileSpaceSeparatorParser{
return &whileSpaceSeparatorParser{
producer:producer,
}
}
func(p *whileSpaceSeparatorParser) Parse(content string,url string) error{
listOfWord:= strings.Split(content," ")
for _,v:=range listOfWord{
p.producer<-TupleMap{v,url}
}
return nil
}
func(p *whileSpaceSeparatorParser) Close(){
close(p.producer)
}
|
/*
The challenge is to assign people to tasks randomly~.
From stdin you get 2 lines.
Line one is a comma-separated list of names.
Line 2 is a comma-separated list of jobs.
The output required is one line per task person combo.
The line should be formatted as Name:task.
If there are not enough people for jobs or vice verse then they will not be output.
~ Randomly is defined as each run with the same input has a reasonably different output.
Example Input:
Bob,John,Susan,Josie,Kim,Tom
Bins,Floor,Windows
Example Output:
Bob:Windows
Susan:Bins
Kim:Floor
*/
package main
import (
"fmt"
"math/rand"
"time"
)
func main() {
rand.Seed(time.Now().UnixNano())
assign(
[]string{"Bob", "John", "Susan", "Josie", "Kim", "Tom"},
[]string{"Bins", "Floor", "Windows"},
)
}
func assign(names, jobs []string) {
for {
n := len(names)
m := len(jobs)
if n == 0 || m == 0 {
break
}
i := rand.Intn(n)
j := rand.Intn(m)
fmt.Printf("%s: %s\n", names[i], jobs[j])
names[i], names = names[n-1], names[:n-1]
jobs[j], jobs = jobs[m-1], jobs[:m-1]
}
}
|
package main
import (
"fmt"
. "github.com/little-go/learn-go/basic"
)
func main() {
fmt.Println(Eval(1, 2, "-"))
Aa()
fmt.Println(Convert2Bin(5)) // 101
fmt.Println(Convert2Bin(13)) // 1101
fmt.Println(Convert2Bin(2315)) // 100100001011
fmt.Println(Convert2Bin(0)) // 0
PrintFile("str.go")
//fmt.Println(apply(div, 5, 6))
fmt.Println(Apply(func(a, b int) int {
return a + b
}, 2, 3))
q, r := Div(1, 2)
fmt.Println(q, r)
// 下划线可以占位
c, _ := Div(2, 3)
fmt.Println(c)
}
|
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright 2021 Broadcom. The term Broadcom refers to Broadcom Inc. and/or //
// its subsidiaries. //
// //
// Licensed under the Apache License, Version 2.0 (the "License"); //
// you may not use this file except in compliance with the License. //
// You may obtain a copy of the License at //
// //
// http://www.apache.org/licenses/LICENSE-2.0 //
// //
// Unless required by applicable law or agreed to in writing, software //
// distributed under the License is distributed on an "AS IS" BASIS, //
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //
// See the License for the specific language governing permissions and //
// limitations under the License. //
// //
////////////////////////////////////////////////////////////////////////////////
package apis
import (
"reflect"
"sort"
"testing"
"github.com/Azure/sonic-mgmt-common/translib/db"
)
func TestDiff(t *testing.T) {
var testCases = []diffTestCase{
{
Name: "empty",
Old: map[string]string{},
New: map[string]string{},
Diff: EntryDiff{},
}, {
Name: "equal",
Old: map[string]string{"one": "1111", "two": "2222"},
New: map[string]string{"one": "1111", "two": "2222"},
Diff: EntryDiff{},
}, {
Name: "equal_arr",
Old: map[string]string{"one": "1111", "two": "2222", "arr@": "1,2,3"},
New: map[string]string{"one": "1111", "two": "2222", "arr@": "1,2,3"},
Diff: EntryDiff{},
}, {
Name: "equal_null",
Old: map[string]string{"NULL": "NULL"},
New: map[string]string{"NULL": "NULL"},
Diff: EntryDiff{},
}, {
Name: "create_entry",
Old: map[string]string{},
New: map[string]string{"one": "1111", "two": "2222", "arr@": "1,2,3"},
Diff: EntryDiff{EntryCreated: true},
}, {
Name: "create_null_entry",
Old: map[string]string{},
New: map[string]string{"NULL": "NULL"},
Diff: EntryDiff{EntryCreated: true},
}, {
Name: "delete_entry",
Old: map[string]string{"one": "1111", "two": "2222", "arr@": "1,2,3"},
New: map[string]string{},
Diff: EntryDiff{EntryDeleted: true},
}, {
Name: "add_field",
Old: map[string]string{"one": "1111"},
New: map[string]string{"one": "1111", "two": "2222"},
Diff: EntryDiff{CreatedFields: []string{"two"}},
}, {
Name: "add_fields",
Old: map[string]string{"one": "1111"},
New: map[string]string{"one": "1111", "two": "2222", "arr@": "1,2,3", "foo": "bar"},
Diff: EntryDiff{CreatedFields: []string{"two", "arr", "foo"}},
}, {
Name: "add_null",
Old: map[string]string{"one": "1111"},
New: map[string]string{"one": "1111", "NULL": "NULL"},
Diff: EntryDiff{},
}, {
Name: "del_fields",
Old: map[string]string{"one": "1111", "two": "2222", "foo": "bar"},
New: map[string]string{"one": "1111"},
Diff: EntryDiff{DeletedFields: []string{"two", "foo"}},
}, {
Name: "del_arr_fields",
Old: map[string]string{"one": "1111", "arr@": "1,2", "foo@": "bar"},
New: map[string]string{"foo@": "bar"},
Diff: EntryDiff{DeletedFields: []string{"one", "arr"}},
}, {
Name: "del_null",
Old: map[string]string{"one": "1111", "NULL": "NULL"},
New: map[string]string{"one": "1111"},
Diff: EntryDiff{},
}, {
Name: "mod_fields",
Old: map[string]string{"one": "1111", "two": "2222"},
New: map[string]string{"one": "0001", "two": "2222"},
Diff: EntryDiff{UpdatedFields: []string{"one"}},
}, {
Name: "mod_arr_fields",
Old: map[string]string{"one": "1111", "foo@": "2222"},
New: map[string]string{"one": "0001", "foo@": "1,2,3"},
Diff: EntryDiff{UpdatedFields: []string{"one", "foo"}},
}, {
Name: "cru_fields",
Old: map[string]string{"one": "1111", "foo@": "2222", "NULL": "NULL"},
New: map[string]string{"one": "0001", "two": "2222"},
Diff: EntryDiff{
CreatedFields: []string{"two"},
UpdatedFields: []string{"one"},
DeletedFields: []string{"foo"},
},
},
}
for _, tc := range testCases {
t.Run(tc.Name, func(tt *testing.T) { tc.run(tt) })
}
}
type diffTestCase struct {
Name string
Old map[string]string
New map[string]string
Diff EntryDiff
}
func (tc *diffTestCase) run(t *testing.T) {
t.Helper()
old, new, exp := tc.Old, tc.New, tc.Diff
d := EntryCompare(db.Value{Field: old}, db.Value{Field: new})
ok := reflect.DeepEqual(d.OldValue.Field, old) &&
reflect.DeepEqual(d.NewValue.Field, new) &&
d.EntryCreated == exp.EntryCreated &&
d.EntryDeleted == exp.EntryDeleted &&
arrayEquals(d.CreatedFields, exp.CreatedFields) &&
arrayEquals(d.UpdatedFields, exp.UpdatedFields) &&
arrayEquals(d.DeletedFields, exp.DeletedFields)
if !ok {
t.Errorf("Old values = %v", old)
t.Errorf("New values = %v", new)
t.Errorf("Expect diff = %v", exp.String())
t.Errorf("Actual diff = %v", d.String())
}
}
func arrayEquals(a1, a2 []string) bool {
sort.Strings(a1)
sort.Strings(a2)
return reflect.DeepEqual(a1, a2)
}
|
package main
import(
"github.com/griddb/go_client"
"fmt"
"strconv"
"time"
"os"
)
func main() {
factory := griddb_go.StoreFactoryGetInstance()
update := false
// Get GridStore object
port, err := strconv.Atoi(os.Args[2])
if err != nil {
fmt.Println(err)
os.Exit(2)
}
gridstore := factory.GetStore(map[string]interface{} {
"host" :os.Args[1],
"port" :port,
"cluster_name":os.Args[3],
"username" :os.Args[4],
"password" :os.Args[5],
})
// Get TimeSeries
// Reuse TimeSeries and data from sample 2
ts, err1 := gridstore.GetContainer("point01")
if(err1 != nil) {
fmt.Println("get container failed")
}
// Create normal query to get all row where active = FAlSE and voltage > 50
query, err := ts.Query("select * from point01 where not active and voltage > 50")
if(err != nil) {
fmt.Println("create query failed")
}
rs, err := query.Fetch(update)
if(err != nil) {
fmt.Println("fetch failed")
}
// Get result
for rs.HasNext(){
rrow, err := rs.NextRow()
if(err != nil) {
fmt.Println("NextRow from rs failed")
}
mTime := rrow[0].(time.Time)
timestamp := mTime.UnixNano() / 1000000
// Perform aggregation query to get average value
// during 10 minutes later and 10 minutes earlier from this point
aggCommand := "select AVG(voltage) from point01 where timestamp > TIMESTAMPADD(MINUTE, TO_TIMESTAMP_MS(" + strconv.FormatInt(timestamp, 10) + "), -10) AND timestamp < TIMESTAMPADD(MINUTE, TO_TIMESTAMP_MS(" + strconv.FormatInt(timestamp, 10) + "), 10)"
aggQuery, err := ts.Query(aggCommand)
if(err != nil) {
fmt.Println("create aggQuery failed")
}
aggRs, err := aggQuery.Fetch(update)
if(err != nil) {
fmt.Println("Fetch from aggQuery failed")
}
for aggRs.HasNext(){
// Get aggregation result
aggResult, err := aggRs.NextAggregation()
if(err != nil) {
fmt.Println("NextAggregation from aggRs failed")
}
// Convert result to double and print out
avg, err := aggResult.Get(griddb_go.TYPE_DOUBLE)
if(err == nil) {
fmt.Println("[Timestamp=", timestamp, "] Average voltage=", avg)
} else {
fmt.Println("aggResult.Get err = ", err)
}
}
}
}
|
package fs
import (
"os"
)
type osfile struct {
*os.File
data []byte
}
type osfs struct{}
// OS is a file system backed by the os package.
var OS = &osfs{}
func (fs *osfs) OpenFile(name string, flag int, perm os.FileMode) (MmapFile, error) {
f, err := os.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
stat, err := f.Stat()
if err != nil {
return nil, err
}
mf := &osfile{f, nil}
if stat.Size() > 0 {
if err := mf.Mmap(stat.Size()); err != nil {
return nil, err
}
}
return mf, err
}
func (fs *osfs) CreateLockFile(name string, perm os.FileMode) (LockFile, bool, error) {
return createLockFile(name, perm)
}
func (fs *osfs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (fs *osfs) Remove(name string) error {
return os.Remove(name)
}
type oslockfile struct {
File
path string
}
func (f *oslockfile) Unlock() error {
if err := os.Remove(f.path); err != nil {
return err
}
return f.Close()
}
func (f *osfile) Slice(start int64, end int64) []byte {
return f.data[start:end]
}
func (f *osfile) Close() error {
if err := munmap(f.data); err != nil {
return nil
}
f.data = nil
return f.File.Close()
}
func (f *osfile) Mmap(size int64) error {
if f.data != nil {
if err := munmap(f.data); err != nil {
return err
}
}
// TODO: align to the OS page size?
data, err := mmap(f.File, size)
if err != nil {
return err
}
madviceRandom(data)
f.data = data
return nil
}
|
package ondemand
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
)
type OnDemand struct {
BaseURL string
APIKey string
Debug bool
}
func New(apiKey string, debug bool) (od *OnDemand) {
od = &OnDemand{
BaseURL: "http://ondemand.websol.barchart.com/",
APIKey: apiKey,
Debug: debug,
}
return od
}
func (od *OnDemand) Request(api string, params string, result interface{}) ([]byte, error) {
ep := fmt.Sprintf("%v%v?apikey=%v&%v", od.BaseURL, api, od.APIKey, params)
if od.Debug == true {
fmt.Printf("Going After: %v\n", ep)
}
body, err := od.get(ep)
err = json.Unmarshal(body, result)
//log.Fatal(string(body[:]))
return body, err
}
func (od *OnDemand) get(url string) (body []byte, err error) {
client := http.Client{
Timeout: time.Duration(30 * time.Second),
}
resp, err := client.Get(url)
if err != nil {
return
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
return
}
|
package _3_Longest_Substring_Without_Repeating_Characters
func lengthOfLongestSubstring(s string) int {
if len(s) == 0 {
return 0
}
if len(s) == 1 {
return 1
}
bstr := []byte(s)
max := 0
for i := range bstr {
ventor := []byte{bstr[i]}
if max >= len(bstr)-i {
break
}
for j := i + 1; j <= len(bstr)-1; j++ {
if IndexByte(ventor, bstr[j]) != -1 {
length := len(ventor)
if length >= max {
max = length
}
break
} else {
ventor = append(ventor, bstr[j])
length := len(ventor)
if length >= max {
max = length
}
}
}
}
return max
}
func IndexByte(arr []byte, i byte) int {
ret := -1
for m, ar := range arr {
if ar == i {
ret = m
break
}
}
return ret
}
// 去重使用map慢
func lengthOfLongestSubstring2(s string) int {
if len(s) == 0 {
return 0
}
if len(s) == 1 {
return 1
}
bstr := []byte(s)
max := 0
for i := range bstr {
tmpmap := make(map[byte]bool)
tmpmap[bstr[i]] = true
if max >= len(bstr)-i {
break
}
for j := i + 1; j < len(bstr); j++ {
if _, ok := tmpmap[bstr[j]]; !ok {
tmpmap[bstr[j]] = true
if len(tmpmap) >= max {
max = len(tmpmap)
}
} else {
if len(tmpmap) >= max {
max = len(tmpmap)
}
break
}
}
}
return max
}
|
// Copyright (c) 2014-2017 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package walletdbtest
import (
"bytes"
"fmt"
"reflect"
"sync"
"github.com/btcsuite/btcwallet/walletdb"
)
// errSubTestFail is used to signal that a sub test returned false.
var errSubTestFail = fmt.Errorf("sub test failure")
// testContext is used to store context information about a running test which
// is passed into helper functions.
type testContext struct {
t Tester
db walletdb.DB
bucketDepth int
isWritable bool
}
// rollbackValues returns a copy of the provided map with all values set to an
// empty string. This is used to test that values are properly rolled back.
func rollbackValues(values map[string]string) map[string]string {
retMap := make(map[string]string, len(values))
for k := range values {
retMap[k] = ""
}
return retMap
}
// testGetValues checks that all of the provided key/value pairs can be
// retrieved from the database and the retrieved values match the provided
// values.
func testGetValues(tc *testContext, bucket walletdb.ReadBucket, values map[string]string) bool {
for k, v := range values {
var vBytes []byte
if v != "" {
vBytes = []byte(v)
}
gotValue := bucket.Get([]byte(k))
if !reflect.DeepEqual(gotValue, vBytes) {
tc.t.Errorf("Get: unexpected value - got %s, want %s",
gotValue, vBytes)
return false
}
}
return true
}
// testPutValues stores all of the provided key/value pairs in the provided
// bucket while checking for errors.
func testPutValues(tc *testContext, bucket walletdb.ReadWriteBucket, values map[string]string) bool {
for k, v := range values {
var vBytes []byte
if v != "" {
vBytes = []byte(v)
}
if err := bucket.Put([]byte(k), vBytes); err != nil {
tc.t.Errorf("Put: unexpected error: %v", err)
return false
}
}
return true
}
// testDeleteValues removes all of the provided key/value pairs from the
// provided bucket.
func testDeleteValues(tc *testContext, bucket walletdb.ReadWriteBucket, values map[string]string) bool {
for k := range values {
if err := bucket.Delete([]byte(k)); err != nil {
tc.t.Errorf("Delete: unexpected error: %v", err)
return false
}
}
return true
}
// testNestedReadWriteBucket reruns the testBucketInterface against a nested bucket along
// with a counter to only test a couple of level deep.
func testNestedReadWriteBucket(tc *testContext, testBucket walletdb.ReadWriteBucket) bool {
// Don't go more than 2 nested level deep.
if tc.bucketDepth > 1 {
return true
}
tc.bucketDepth++
defer func() {
tc.bucketDepth--
}()
if !testReadWriteBucketInterface(tc, testBucket) {
return false
}
return true
}
// testSequence tests that the sequence related methods work as expected.
func testSequence(tc *testContext, testBucket walletdb.ReadWriteBucket) bool {
// Obtaining the current sequence twice should give us the same value.
seqNo1 := testBucket.Sequence()
seqNo2 := testBucket.Sequence()
if seqNo1 != seqNo2 {
tc.t.Errorf("Sequence: seq has incremented")
return false
}
// Incrementing to the next sequence should give us a value one larger
// than the prior number.
seqNo3, err := testBucket.NextSequence()
if err != nil {
tc.t.Errorf("Sequence: unexpected error: %v", err)
return false
}
if seqNo3 != seqNo2+1 {
tc.t.Errorf("Sequence: expected seq no of %v, instead got %v",
seqNo2+1, seqNo3)
return false
}
// We should be able to modify the sequence base number.
newBase := uint64(100)
if err := testBucket.SetSequence(newBase); err != nil {
tc.t.Errorf("Sequence: unexpected error: %v", err)
return false
}
// Any offset from this new sequence should now be properly reflected.
seqNo4, err := testBucket.NextSequence()
if err != nil {
tc.t.Errorf("Sequence: unexpected error: %v", err)
return false
}
if seqNo4 != newBase+1 {
tc.t.Errorf("Sequence: expected seq no of %v, instead got %v",
newBase+1, seqNo4)
return false
}
return true
}
// testReadWriteBucketInterface ensures the bucket interface is working properly by
// exercising all of its functions.
func testReadWriteBucketInterface(tc *testContext, bucket walletdb.ReadWriteBucket) bool {
// keyValues holds the keys and values to use when putting
// values into the bucket.
var keyValues = map[string]string{
"bucketkey1": "foo1",
"bucketkey2": "foo2",
"bucketkey3": "foo3",
}
if !testPutValues(tc, bucket, keyValues) {
return false
}
if !testGetValues(tc, bucket, keyValues) {
return false
}
// Iterate all of the keys using ForEach while making sure the
// stored values are the expected values.
keysFound := make(map[string]struct{}, len(keyValues))
err := bucket.ForEach(func(k, v []byte) error {
ks := string(k)
wantV, ok := keyValues[ks]
if !ok {
return fmt.Errorf("ForEach: key '%s' should "+
"exist", ks)
}
if !reflect.DeepEqual(v, []byte(wantV)) {
return fmt.Errorf("ForEach: value for key '%s' "+
"does not match - got %s, want %s",
ks, v, wantV)
}
keysFound[ks] = struct{}{}
return nil
})
if err != nil {
tc.t.Errorf("%v", err)
return false
}
// Ensure all keys were iterated.
for k := range keyValues {
if _, ok := keysFound[k]; !ok {
tc.t.Errorf("ForEach: key '%s' was not iterated "+
"when it should have been", k)
return false
}
}
// Delete the keys and ensure they were deleted.
if !testDeleteValues(tc, bucket, keyValues) {
return false
}
if !testGetValues(tc, bucket, rollbackValues(keyValues)) {
return false
}
// Test that the sequence methods work as expected.
if !testSequence(tc, bucket) {
return false
}
// Ensure creating a new bucket works as expected.
testBucketName := []byte("testbucket")
testBucket, err := bucket.CreateBucket(testBucketName)
if err != nil {
tc.t.Errorf("CreateBucket: unexpected error: %v", err)
return false
}
if !testNestedReadWriteBucket(tc, testBucket) {
return false
}
// Ensure creating a bucket that already exists fails with the
// expected error.
wantErr := walletdb.ErrBucketExists
if _, err := bucket.CreateBucket(testBucketName); err != wantErr {
tc.t.Errorf("CreateBucket: unexpected error - got %v, "+
"want %v", err, wantErr)
return false
}
// Ensure CreateBucketIfNotExists returns an existing bucket.
testBucket, err = bucket.CreateBucketIfNotExists(testBucketName)
if err != nil {
tc.t.Errorf("CreateBucketIfNotExists: unexpected "+
"error: %v", err)
return false
}
if !testNestedReadWriteBucket(tc, testBucket) {
return false
}
// Ensure retrieving and existing bucket works as expected.
testBucket = bucket.NestedReadWriteBucket(testBucketName)
if !testNestedReadWriteBucket(tc, testBucket) {
return false
}
// Ensure deleting a bucket works as intended.
if err := bucket.DeleteNestedBucket(testBucketName); err != nil {
tc.t.Errorf("DeleteNestedBucket: unexpected error: %v", err)
return false
}
if b := bucket.NestedReadWriteBucket(testBucketName); b != nil {
tc.t.Errorf("DeleteNestedBucket: bucket '%s' still exists",
testBucketName)
return false
}
// Ensure deleting a bucket that doesn't exist returns the
// expected error.
wantErr = walletdb.ErrBucketNotFound
if err := bucket.DeleteNestedBucket(testBucketName); err != wantErr {
tc.t.Errorf("DeleteNestedBucket: unexpected error - got %v, "+
"want %v", err, wantErr)
return false
}
// Ensure CreateBucketIfNotExists creates a new bucket when
// it doesn't already exist.
testBucket, err = bucket.CreateBucketIfNotExists(testBucketName)
if err != nil {
tc.t.Errorf("CreateBucketIfNotExists: unexpected "+
"error: %v", err)
return false
}
if !testNestedReadWriteBucket(tc, testBucket) {
return false
}
// Delete the test bucket to avoid leaving it around for future
// calls.
if err := bucket.DeleteNestedBucket(testBucketName); err != nil {
tc.t.Errorf("DeleteNestedBucket: unexpected error: %v", err)
return false
}
if b := bucket.NestedReadWriteBucket(testBucketName); b != nil {
tc.t.Errorf("DeleteNestedBucket: bucket '%s' still exists",
testBucketName)
return false
}
return true
}
// testManualTxInterface ensures that manual transactions work as expected.
func testManualTxInterface(tc *testContext, bucketKey []byte) bool {
db := tc.db
// populateValues tests that populating values works as expected.
//
// When the writable flag is false, a read-only tranasction is created,
// standard bucket tests for read-only transactions are performed, and
// the Commit function is checked to ensure it fails as expected.
//
// Otherwise, a read-write transaction is created, the values are
// written, standard bucket tests for read-write transactions are
// performed, and then the transaction is either commited or rolled
// back depending on the flag.
populateValues := func(writable, rollback bool, putValues map[string]string) bool {
var dbtx walletdb.ReadTx
var rootBucket walletdb.ReadBucket
var err error
if writable {
dbtx, err = db.BeginReadWriteTx()
if err != nil {
tc.t.Errorf("BeginReadWriteTx: unexpected error %v", err)
return false
}
rootBucket = dbtx.(walletdb.ReadWriteTx).ReadWriteBucket(bucketKey)
} else {
dbtx, err = db.BeginReadTx()
if err != nil {
tc.t.Errorf("BeginReadTx: unexpected error %v", err)
return false
}
rootBucket = dbtx.ReadBucket(bucketKey)
}
if rootBucket == nil {
tc.t.Errorf("ReadWriteBucket/ReadBucket: unexpected nil root bucket")
_ = dbtx.Rollback()
return false
}
if writable {
tc.isWritable = writable
if !testReadWriteBucketInterface(tc, rootBucket.(walletdb.ReadWriteBucket)) {
_ = dbtx.Rollback()
return false
}
}
if !writable {
// Rollback the transaction.
if err := dbtx.Rollback(); err != nil {
tc.t.Errorf("Commit: unexpected error %v", err)
return false
}
} else {
rootBucket := rootBucket.(walletdb.ReadWriteBucket)
if !testPutValues(tc, rootBucket, putValues) {
return false
}
if rollback {
// Rollback the transaction.
if err := dbtx.Rollback(); err != nil {
tc.t.Errorf("Rollback: unexpected "+
"error %v", err)
return false
}
} else {
// The commit should succeed.
if err := dbtx.(walletdb.ReadWriteTx).Commit(); err != nil {
tc.t.Errorf("Commit: unexpected error "+
"%v", err)
return false
}
}
}
return true
}
// checkValues starts a read-only transaction and checks that all of
// the key/value pairs specified in the expectedValues parameter match
// what's in the database.
checkValues := func(expectedValues map[string]string) bool {
// Begin another read-only transaction to ensure...
dbtx, err := db.BeginReadTx()
if err != nil {
tc.t.Errorf("BeginReadTx: unexpected error %v", err)
return false
}
rootBucket := dbtx.ReadBucket(bucketKey)
if rootBucket == nil {
tc.t.Errorf("ReadBucket: unexpected nil root bucket")
_ = dbtx.Rollback()
return false
}
if !testGetValues(tc, rootBucket, expectedValues) {
_ = dbtx.Rollback()
return false
}
// Rollback the read-only transaction.
if err := dbtx.Rollback(); err != nil {
tc.t.Errorf("Commit: unexpected error %v", err)
return false
}
return true
}
// deleteValues starts a read-write transaction and deletes the keys
// in the passed key/value pairs.
deleteValues := func(values map[string]string) bool {
dbtx, err := db.BeginReadWriteTx()
if err != nil {
tc.t.Errorf("BeginReadWriteTx: unexpected error %v", err)
_ = dbtx.Rollback()
return false
}
rootBucket := dbtx.ReadWriteBucket(bucketKey)
if rootBucket == nil {
tc.t.Errorf("RootBucket: unexpected nil root bucket")
_ = dbtx.Rollback()
return false
}
// Delete the keys and ensure they were deleted.
if !testDeleteValues(tc, rootBucket, values) {
_ = dbtx.Rollback()
return false
}
if !testGetValues(tc, rootBucket, rollbackValues(values)) {
_ = dbtx.Rollback()
return false
}
// Commit the changes and ensure it was successful.
if err := dbtx.Commit(); err != nil {
tc.t.Errorf("Commit: unexpected error %v", err)
return false
}
return true
}
// keyValues holds the keys and values to use when putting values
// into a bucket.
var keyValues = map[string]string{
"umtxkey1": "foo1",
"umtxkey2": "foo2",
"umtxkey3": "foo3",
}
// Ensure that attempting populating the values using a read-only
// transaction fails as expected.
if !populateValues(false, true, keyValues) {
return false
}
if !checkValues(rollbackValues(keyValues)) {
return false
}
// Ensure that attempting populating the values using a read-write
// transaction and then rolling it back yields the expected values.
if !populateValues(true, true, keyValues) {
return false
}
if !checkValues(rollbackValues(keyValues)) {
return false
}
// Ensure that attempting populating the values using a read-write
// transaction and then committing it stores the expected values.
if !populateValues(true, false, keyValues) {
return false
}
if !checkValues(keyValues) {
return false
}
// Clean up the keys.
if !deleteValues(keyValues) {
return false
}
return true
}
// testNamespaceAndTxInterfaces creates a namespace using the provided key and
// tests all facets of it interface as well as transaction and bucket
// interfaces under it.
func testNamespaceAndTxInterfaces(tc *testContext, namespaceKey string) bool {
namespaceKeyBytes := []byte(namespaceKey)
err := walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
_, err := tx.CreateTopLevelBucket(namespaceKeyBytes)
return err
})
if err != nil {
tc.t.Errorf("CreateTopLevelBucket: unexpected error: %v", err)
return false
}
defer func() {
// Remove the namespace now that the tests are done for it.
err := walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
return tx.DeleteTopLevelBucket(namespaceKeyBytes)
})
if err != nil {
tc.t.Errorf("DeleteTopLevelBucket: unexpected error: %v", err)
return
}
}()
if !testManualTxInterface(tc, namespaceKeyBytes) {
return false
}
// keyValues holds the keys and values to use when putting values
// into a bucket.
var keyValues = map[string]string{
"mtxkey1": "foo1",
"mtxkey2": "foo2",
"mtxkey3": "foo3",
}
// Test the bucket interface via a managed read-only transaction.
err = walletdb.View(tc.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadBucket: unexpected nil root bucket")
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
// Test that we can read the top level buckets.
var topLevelBuckets []string
walletdb.View(tc.db, func(tx walletdb.ReadTx) error {
return tx.ForEachBucket(func(key []byte) error {
topLevelBuckets = append(topLevelBuckets, string(key))
return nil
})
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
if len(topLevelBuckets) != 1 {
tc.t.Errorf("ForEachBucket: expected only one top level bucket")
return false
}
if topLevelBuckets[0] != namespaceKey {
tc.t.Errorf("ForEachBucket: expected %v, got %v", namespaceKey,
topLevelBuckets[0])
return false
}
// Test the bucket interface via a managed read-write transaction.
// Also, put a series of values and force a rollback so the following
// code can ensure the values were not stored.
forceRollbackError := fmt.Errorf("force rollback")
err = walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadWriteBucket: unexpected nil root bucket")
}
tc.isWritable = true
if !testReadWriteBucketInterface(tc, rootBucket) {
return errSubTestFail
}
if !testPutValues(tc, rootBucket, keyValues) {
return errSubTestFail
}
// Return an error to force a rollback.
return forceRollbackError
})
if err != forceRollbackError {
if err == errSubTestFail {
return false
}
tc.t.Errorf("Update: inner function error not returned - got "+
"%v, want %v", err, forceRollbackError)
return false
}
// Ensure the values that should have not been stored due to the forced
// rollback above were not actually stored.
err = walletdb.View(tc.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadBucket: unexpected nil root bucket")
}
if !testGetValues(tc, rootBucket, rollbackValues(keyValues)) {
return errSubTestFail
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
// Store a series of values via a managed read-write transaction.
err = walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadWriteBucket: unexpected nil root bucket")
}
if !testPutValues(tc, rootBucket, keyValues) {
return errSubTestFail
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
// Ensure the values stored above were committed as expected.
err = walletdb.View(tc.db, func(tx walletdb.ReadTx) error {
rootBucket := tx.ReadBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadBucket: unexpected nil root bucket")
}
if !testGetValues(tc, rootBucket, keyValues) {
return errSubTestFail
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
// Clean up the values stored above in a managed read-write transaction.
err = walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
rootBucket := tx.ReadWriteBucket(namespaceKeyBytes)
if rootBucket == nil {
return fmt.Errorf("ReadWriteBucket: unexpected nil root bucket")
}
if !testDeleteValues(tc, rootBucket, keyValues) {
return errSubTestFail
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
return true
}
// testAdditionalErrors performs some tests for error cases not covered
// elsewhere in the tests and therefore improves negative test coverage.
func testAdditionalErrors(tc *testContext) bool {
ns3Key := []byte("ns3")
err := walletdb.Update(tc.db, func(tx walletdb.ReadWriteTx) error {
// Create a new namespace
rootBucket, err := tx.CreateTopLevelBucket(ns3Key)
if err != nil {
return fmt.Errorf("CreateTopLevelBucket: unexpected error: %v", err)
}
// Ensure CreateBucket returns the expected error when no bucket
// key is specified.
wantErr := walletdb.ErrBucketNameRequired
if _, err := rootBucket.CreateBucket(nil); err != wantErr {
return fmt.Errorf("CreateBucket: unexpected error - "+
"got %v, want %v", err, wantErr)
}
// Ensure DeleteNestedBucket returns the expected error when no bucket
// key is specified.
wantErr = walletdb.ErrIncompatibleValue
if err := rootBucket.DeleteNestedBucket(nil); err != wantErr {
return fmt.Errorf("DeleteNestedBucket: unexpected error - "+
"got %v, want %v", err, wantErr)
}
// Ensure Put returns the expected error when no key is
// specified.
wantErr = walletdb.ErrKeyRequired
if err := rootBucket.Put(nil, nil); err != wantErr {
return fmt.Errorf("Put: unexpected error - got %v, "+
"want %v", err, wantErr)
}
return nil
})
if err != nil {
if err != errSubTestFail {
tc.t.Errorf("%v", err)
}
return false
}
// Ensure that attempting to rollback or commit a transaction that is
// already closed returns the expected error.
tx, err := tc.db.BeginReadWriteTx()
if err != nil {
tc.t.Errorf("Begin: unexpected error: %v", err)
return false
}
if err := tx.Rollback(); err != nil {
tc.t.Errorf("Rollback: unexpected error: %v", err)
return false
}
wantErr := walletdb.ErrTxClosed
if err := tx.Rollback(); err != wantErr {
tc.t.Errorf("Rollback: unexpected error - got %v, want %v", err,
wantErr)
return false
}
if err := tx.Commit(); err != wantErr {
tc.t.Errorf("Commit: unexpected error - got %v, want %v", err,
wantErr)
return false
}
return true
}
// testBatchInterface tests that if the target database implements the batch
// method, then the method functions as expected.
func testBatchInterface(tc *testContext) bool {
// If the database doesn't support the batch super-set of the
// interface, then we're done here.
batchDB, ok := tc.db.(walletdb.BatchDB)
if !ok {
return true
}
const numGoroutines = 5
errChan := make(chan error, numGoroutines)
var wg sync.WaitGroup
for i := 0; i < numGoroutines; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
err := walletdb.Batch(batchDB, func(tx walletdb.ReadWriteTx) error {
b, err := tx.CreateTopLevelBucket([]byte("test"))
if err != nil {
return err
}
byteI := []byte{byte(i)}
return b.Put(byteI, byteI)
})
errChan <- err
}(i)
}
wg.Wait()
close(errChan)
for err := range errChan {
if err != nil {
tc.t.Errorf("Batch: unexpected error: %v", err)
return false
}
}
err := walletdb.View(batchDB, func(tx walletdb.ReadTx) error {
b := tx.ReadBucket([]byte("test"))
for i := 0; i < numGoroutines; i++ {
byteI := []byte{byte(i)}
if v := b.Get(byteI); v == nil {
return fmt.Errorf("key %v not present", byteI)
} else if !bytes.Equal(v, byteI) {
return fmt.Errorf("key %v not equal to value: "+
"%v", byteI, v)
}
}
return nil
})
if err != nil {
tc.t.Errorf("Batch: unexpected error: %v", err)
return false
}
return true
}
// TestInterface performs all interfaces tests for this database driver.
func TestInterface(t Tester, dbType string, args ...interface{}) {
db, err := walletdb.Create(dbType, args...)
if err != nil {
t.Errorf("Failed to create test database (%s) %v", dbType, err)
return
}
defer db.Close()
// Run all of the interface tests against the database.
// Create a test context to pass around.
context := testContext{t: t, db: db}
// Create a namespace and test the interface for it.
if !testNamespaceAndTxInterfaces(&context, "ns1") {
return
}
// Create a second namespace and test the interface for it.
if !testNamespaceAndTxInterfaces(&context, "ns2") {
return
}
// Check a few more error conditions not covered elsewhere.
if !testAdditionalErrors(&context) {
return
}
// If applicable, also test the behavior of the Batch call.
if !testBatchInterface(&context) {
return
}
}
|
/*
* @lc app=leetcode.cn id=1678 lang=golang
*
* [1678] 设计 Goal 解析器
*/
// @lc code=start
package main
func interpret(command string) string {
b := []byte{}
for i := 0; i < len(command); i++ {
if command[i] == ')' {
if command[i-1] == '(' {
b = append(b, 'o')
}
} else {
if command[i] != '(' {
b = append(b, command[i])
}
}
}
return string(b)
}
//
// func main() {
// fmt.Println(interpret("G()(al)"))
// fmt.Println(interpret("G()()()()(al)"))
// fmt.Println(interpret("(al)G(al)()()G"))
// }
// @lc code=end
|
package main
import "fmt"
type ListNode struct {
Val int
Next *ListNode
}
func main() {
head := ListNode{4, &ListNode{5, &ListNode{6,nil}}}
re := removeNthFromEnd(&head, 2)
fmt.Println(re,re.Next)
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
a := new(ListNode)
a.Next = head
first := head
length := 0
for first != nil {
length++
first = first.Next
}
length -= n
first = a
for length > 0 {
length--
first = first.Next
}
first.Next = first.Next.Next
return a.Next
}
|
package skiplist
import (
"fmt"
"testing"
)
func TestInsertSearch(t *testing.T) {
sl := NewSkipList(10)
for i := 0; i < 10; i++ {
if err := sl.Insert(fmt.Sprintf("hello%d", i), fmt.Sprintf("world%d", i)); err != nil {
t.Fatal(err)
}
}
for i := 0; i < 10; i++ {
key := fmt.Sprintf("hello%d", i)
if _, ok := sl.Search(key); !ok {
t.Fatal(key, " exists after deletion")
}
}
}
func TestDelete(t *testing.T) {
sl := NewSkipList(10)
for i := 0; i < 10; i++ {
if err := sl.Insert(fmt.Sprintf("hello%d", i), fmt.Sprintf("world%d", i)); err != nil {
t.Fatal(err)
}
}
sl.Delete("hello0")
if _, ok := sl.Search("hello0"); !ok {
t.Fatal("hello0 exists after deletion")
}
for i := 1; i < 10; i++ {
key := fmt.Sprintf("hello%d", i)
if _, ok := sl.Search(key); !ok {
t.Fatal(key, " exists after deletion")
}
}
}
|
package common
import (
"encoding/json"
"logger"
"net/http"
)
func GetBuffer(req *http.Request, buf []byte) {
for {
_, err := req.Body.Read(buf)
if err != nil {
break
}
}
}
func Unmarshal(buf []byte, i interface{}) bool {
err := json.Unmarshal(buf, i)
if err != nil {
logger.PRINTLINE("Unmarshal error: ", err)
return false
}
return true
}
|
package main
func nextPermutation(nums []int) {
bigIndex := 0
smallIndex := 0
for i := len(nums) - 1; i > 0; i-- {
if nums[i] > nums[i-1] {
bigIndex = i
smallIndex = i - 1
break
}
}
if bigIndex != 0 {
for i := len(nums) - 1; i >= bigIndex; i-- {
if nums[i] > nums[smallIndex] {
nums[i], nums[smallIndex] = nums[smallIndex], nums[i]
break
}
}
}
//fmt.Println("small: ", smallIndex, "reverse: ", reverseIndex, "big: ", bigIndex, "value: ", nums)
left := bigIndex
right := len(nums) - 1
for left < right {
nums[left], nums[right] = nums[right], nums[left]
left++
right--
}
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strings"
)
// Copied from https://github.com/juliangruber/go-intersect
// Hash has complexity: O(n * x) where x is a factor of hash function efficiency (between 1 and 2)
func Hash(a []rune, b []rune) []rune {
set := make([]rune, 0)
hash := make(map[rune]bool)
for i := 0; i < len(a); i++ {
el := a[i]
hash[el] = true
}
for i := 0; i < len(b); i++ {
el := b[i]
if _, found := hash[el]; found {
set = append(set, el)
}
}
return set
}
func ReadInput(r io.Reader) ([]string, error) {
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanLines)
var result []string
var inputElement string
for scanner.Scan() {
// Check if there is an empty line to indicate a new inputElement.
if text := scanner.Text(); len(text) > 0 {
inputElement += " " + text
} else {
result = append(result, inputElement)
inputElement = ""
}
}
result = append(result, inputElement) // append the last inputElement
return result, scanner.Err()
}
func ProcessGroup(answers string) ([]rune, error) {
r := strings.NewReader(answers)
scanner := bufio.NewScanner(r)
scanner.Split(bufio.ScanWords)
var result []rune
for scanner.Scan() {
runeCollection := []rune(scanner.Text())
if result == nil {
result = runeCollection
} else {
result = Hash(result, runeCollection)
}
}
return result, scanner.Err()
}
func main() {
file, _ := os.Open("6/input.txt")
input, _ := ReadInput(file)
count := 0
for _, element := range input {
x, _ := ProcessGroup(element)
count += len(x)
}
fmt.Println(count)
}
|
package matrix
import (
"reflect"
"github.com/seemenkina/go-ntskem/ff"
"github.com/seemenkina/go-ntskem/poly"
)
type MatrixFF struct {
nRows uint32
nColumns uint32
m [][]uint16
}
func (mff *MatrixFF) New(nr, nc uint32) {
mff.nRows = nr
mff.nColumns = nc
mff.ZeroMatrix()
}
func (mff *MatrixFF) ZeroMatrix() {
buf := make([][]uint16, mff.nRows)
for i := range buf {
buf[i] = make([]uint16, mff.nColumns)
}
mff.m = buf
}
func (mff *MatrixFF) Copy() *MatrixFF {
duplicate := make([][]uint16, len(mff.m))
copy(duplicate, mff.m)
return &MatrixFF{
nRows: mff.nRows,
nColumns: mff.nColumns,
m: duplicate,
}
}
func (mff *MatrixFF) IsEqual(second *MatrixFF) bool {
return reflect.DeepEqual(mff, second)
}
func (mff *MatrixFF) ColumnSwap(i, j int) {
if i == j {
return
}
for r := uint32(0); r < mff.nRows; r++ {
mff.m[r][i], mff.m[r][j] = mff.m[r][j], mff.m[r][i]
}
}
func (mff *MatrixFF) ReduceRowEchelon(ff2 *ff.FF) int {
lead := uint32(0)
for r := uint32(0); r < mff.nRows; r++ {
if lead >= mff.nColumns {
return mff.GetRank()
}
i := r
for mff.m[i][lead] == 0 {
i++
if mff.nRows == i {
i = r
lead++
if mff.nColumns == lead {
return mff.GetRank()
}
}
}
mff.m[i], mff.m[r] = mff.m[r], mff.m[i]
if mff.m[r][lead] != 0 {
f := ff2.Inv(mff.m[r][lead])
for j := range mff.m[r] {
mff.m[r][j] = ff2.Mul(mff.m[r][j], f)
}
}
for i = 0; i < mff.nRows; i++ {
if i != r {
f := mff.m[i][lead]
for j, e := range mff.m[r] {
mff.m[i][j] = ff2.Add(mff.m[i][j], ff2.Mul(e, f))
}
}
}
lead++
}
return mff.GetRank()
}
func (mff *MatrixFF) GetRank() int {
rank := 0
for i := 0; i < int(mff.nRows); i++ {
for j := 0; j < int(mff.nColumns); j++ {
if mff.m[i][j] != 0 {
break
}
}
rank = i + 1
}
return rank
}
func (mff *MatrixFF) CreateMatrixG(pol *poly.Polynomial, p []uint16, ff2 *ff.FF, degree int) ([]uint16, []uint16) {
n := 1 << ff2.M
k := n - degree*ff2.M
a := make([]uint16, n)
h := make([]uint16, n)
aPr := make([]uint16, n)
hPr := make([]uint16, n)
aPr[0] = 0
for i := 0; i < n; i++ {
aPr[i] = 0
for j := 0; j < ff2.M; j++ {
aPr[i] ^= uint16((i&(1<<j))>>j) * ff2.Basis[j]
}
}
for i := 0; i < n; i++ {
a[i] = aPr[p[i]]
}
hPr = ff2.Roots(pol)
if hPr == nil {
return nil, nil
}
for i := 0; i < n; i++ {
h[i] = ff2.Sqr(ff2.Inv(hPr[p[i]]))
}
H := MatrixFF{}
H.New(uint32(degree*ff2.M), uint32(n))
for i := 0; i < n; i++ {
hPr[i] = 1
}
for i := 0; i < degree; i++ {
for j := 0; j < n; j++ {
e := uint16(ff2.M) - 1
for e > 0 {
f := ff2.Mul(hPr[j], h[j])
if f&(1<<e) != 0 {
H.m[i*ff2.M+(ff2.M-int(e)-1)][j] = uint16(j)
}
e--
}
hPr[j] = ff2.Mul(hPr[j], a[j])
}
}
rank := H.ReduceRowEchelon(ff2)
if n-degree*ff2.M != n-rank {
return nil, nil
}
for j, i := 0, int(H.nRows-1); i >= 0; i-- {
for j < int(H.nColumns)-1 {
if H.m[i][int(H.nColumns)-j-1] == 0 {
j++
}
}
H.ColumnSwap(k+i, int(H.nColumns)-j-1)
p[k+i], p[int(H.nColumns)-j-1] = p[int(H.nColumns)-j-1], p[k+i]
a[k+i], a[int(H.nColumns)-j-1] = a[int(H.nColumns)-j-1], a[k+i]
h[k+i], h[int(H.nColumns)-j-1] = h[int(H.nColumns)-j-1], h[k+i]
}
Q := MatrixFF{}
Q.New(uint32(k), uint32(n-k))
for i := 0; i < n-k; i++ {
for j := 0; j < k; j++ {
Q.m[j][i] = H.m[i][j]
}
}
mff.nRows = Q.nRows
mff.nColumns = Q.nColumns
mff.m = Q.m
return a, h
}
func (mff *MatrixFF) PolyOnMatriceMult(poly []uint16) []uint16 {
result := make([]uint16, len(mff.m[0]))
for i := 0; i < len(mff.m); i++ {
for j := 0; j < len(mff.m[i]); j++ {
result[j] ^= poly[i] * mff.m[i][j]
}
}
return result
}
func (mff *MatrixFF) CreateMatrixH(a, h []uint16, ff ff.FF) {
for i := uint32(0); i < mff.nRows; i++ {
for j := uint32(0); j < mff.nColumns; j++ {
mff.m[i][j] = ff.Pow(a[i], uint16(j)) * h[i]
}
}
}
|
package admin
import (
"github.com/labstack/echo"
coreTransaction "mix/test/api/admin/controller/core.transaction"
)
func SetRoutes(e *echo.Echo) {
coreTransactionGroup := e.Group("/core/transaction")
coreTransactionGroup.GET("/getAccount", coreTransaction.GetAccount)
coreTransactionGroup.GET("/getAccountList", coreTransaction.GetAccountList)
coreTransactionGroup.DELETE("/removeAccount", coreTransaction.RemoveAccount)
coreTransactionGroup.PUT("/updateAccount", coreTransaction.UpdateAccount)
coreTransactionGroup.GET("/getAccountByOpenId", coreTransaction.GetAccountByOpenId)
coreTransactionGroup.PUT("/updateAccountByOpenId", coreTransaction.UpdateAccountByOpenId)
coreTransactionGroup.DELETE("/removeAccountByOpenId", coreTransaction.RemoveAccountByOpenId)
coreTransactionGroup.POST("/createAddress", coreTransaction.CreateAddress)
coreTransactionGroup.GET("/getAddress", coreTransaction.GetAddress)
coreTransactionGroup.GET("/getAddressList", coreTransaction.GetAddressList)
coreTransactionGroup.DELETE("/removeAddress", coreTransaction.RemoveAddress)
coreTransactionGroup.PUT("/updateAddress", coreTransaction.UpdateAddress)
coreTransactionGroup.GET("/getAddressByAccountId", coreTransaction.GetAddressByAccountId)
coreTransactionGroup.PUT("/updateAddressByAccountId", coreTransaction.UpdateAddressByAccountId)
coreTransactionGroup.DELETE("/removeAddressByAccountId", coreTransaction.RemoveAddressByAccountId)
coreTransactionGroup.GET("/getAddressListByChain", coreTransaction.GetAddressListByChain)
coreTransactionGroup.PUT("/updateAddressListByChain", coreTransaction.UpdateAddressListByChain)
coreTransactionGroup.DELETE("/removeAddressListByChain", coreTransaction.RemoveAddressListByChain)
coreTransactionGroup.POST("/createAudit", coreTransaction.CreateAudit)
coreTransactionGroup.GET("/getAudit", coreTransaction.GetAudit)
coreTransactionGroup.GET("/getAuditList", coreTransaction.GetAuditList)
coreTransactionGroup.DELETE("/removeAudit", coreTransaction.RemoveAudit)
coreTransactionGroup.PUT("/updateAudit", coreTransaction.UpdateAudit)
coreTransactionGroup.GET("/getAuditByAddressId", coreTransaction.GetAuditByAddressId)
coreTransactionGroup.PUT("/updateAuditByAddressId", coreTransaction.UpdateAuditByAddressId)
coreTransactionGroup.DELETE("/removeAuditByAddressId", coreTransaction.RemoveAuditByAddressId)
coreTransactionGroup.POST("/createBalance", coreTransaction.CreateBalance)
coreTransactionGroup.GET("/getBalance", coreTransaction.GetBalance)
coreTransactionGroup.GET("/getBalanceList", coreTransaction.GetBalanceList)
coreTransactionGroup.DELETE("/removeBalance", coreTransaction.RemoveBalance)
coreTransactionGroup.PUT("/updateBalance", coreTransaction.UpdateBalance)
coreTransactionGroup.GET("/getBalanceByAddressId", coreTransaction.GetBalanceByAddressId)
coreTransactionGroup.PUT("/updateBalanceByAddressId", coreTransaction.UpdateBalanceByAddressId)
coreTransactionGroup.DELETE("/removeBalanceByAddressId", coreTransaction.RemoveBalanceByAddressId)
coreTransactionGroup.POST("/createCallback", coreTransaction.CreateCallback)
coreTransactionGroup.GET("/getCallback", coreTransaction.GetCallback)
coreTransactionGroup.GET("/getCallbackList", coreTransaction.GetCallbackList)
coreTransactionGroup.DELETE("/removeCallback", coreTransaction.RemoveCallback)
coreTransactionGroup.PUT("/updateCallback", coreTransaction.UpdateCallback)
coreTransactionGroup.GET("/getCallbackListByRequestId", coreTransaction.GetCallbackListByRequestId)
coreTransactionGroup.PUT("/updateCallbackListByRequestId", coreTransaction.UpdateCallbackListByRequestId)
coreTransactionGroup.DELETE("/removeCallbackListByRequestId", coreTransaction.RemoveCallbackListByRequestId)
coreTransactionGroup.POST("/createFromAddress", coreTransaction.CreateFromAddress)
coreTransactionGroup.GET("/getFromAddress", coreTransaction.GetFromAddress)
coreTransactionGroup.GET("/getFromAddressList", coreTransaction.GetFromAddressList)
coreTransactionGroup.DELETE("/removeFromAddress", coreTransaction.RemoveFromAddress)
coreTransactionGroup.PUT("/updateFromAddress", coreTransaction.UpdateFromAddress)
coreTransactionGroup.POST("/createHotAccount", coreTransaction.CreateHotAccount)
coreTransactionGroup.GET("/getHotAccount", coreTransaction.GetHotAccount)
coreTransactionGroup.GET("/getHotAccountList", coreTransaction.GetHotAccountList)
coreTransactionGroup.DELETE("/removeHotAccount", coreTransaction.RemoveHotAccount)
coreTransactionGroup.PUT("/updateHotAccount", coreTransaction.UpdateHotAccount)
coreTransactionGroup.POST("/createHotWithdraw", coreTransaction.CreateHotWithdraw)
coreTransactionGroup.GET("/getHotWithdraw", coreTransaction.GetHotWithdraw)
coreTransactionGroup.GET("/getHotWithdrawList", coreTransaction.GetHotWithdrawList)
coreTransactionGroup.DELETE("/removeHotWithdraw", coreTransaction.RemoveHotWithdraw)
coreTransactionGroup.PUT("/updateHotWithdraw", coreTransaction.UpdateHotWithdraw)
coreTransactionGroup.GET("/getHotWithdrawByMerchantId", coreTransaction.GetHotWithdrawByMerchantId)
coreTransactionGroup.PUT("/updateHotWithdrawByMerchantId", coreTransaction.UpdateHotWithdrawByMerchantId)
coreTransactionGroup.DELETE("/removeHotWithdrawByMerchantId", coreTransaction.RemoveHotWithdrawByMerchantId)
coreTransactionGroup.POST("/createMember", coreTransaction.CreateMember)
coreTransactionGroup.GET("/getMember", coreTransaction.GetMember)
coreTransactionGroup.GET("/getMemberList", coreTransaction.GetMemberList)
coreTransactionGroup.DELETE("/removeMember", coreTransaction.RemoveMember)
coreTransactionGroup.PUT("/updateMember", coreTransaction.UpdateMember)
coreTransactionGroup.POST("/createMerchant", coreTransaction.CreateMerchant)
coreTransactionGroup.GET("/getMerchant", coreTransaction.GetMerchant)
coreTransactionGroup.GET("/getMerchantList", coreTransaction.GetMerchantList)
coreTransactionGroup.DELETE("/removeMerchant", coreTransaction.RemoveMerchant)
coreTransactionGroup.PUT("/updateMerchant", coreTransaction.UpdateMerchant)
coreTransactionGroup.GET("/getMerchantByOpenId", coreTransaction.GetMerchantByOpenId)
coreTransactionGroup.PUT("/updateMerchantByOpenId", coreTransaction.UpdateMerchantByOpenId)
coreTransactionGroup.DELETE("/removeMerchantByOpenId", coreTransaction.RemoveMerchantByOpenId)
coreTransactionGroup.GET("/getMerchantByClientId", coreTransaction.GetMerchantByClientId)
coreTransactionGroup.PUT("/updateMerchantByClientId", coreTransaction.UpdateMerchantByClientId)
coreTransactionGroup.DELETE("/removeMerchantByClientId", coreTransaction.RemoveMerchantByClientId)
coreTransactionGroup.POST("/createMerchantToken", coreTransaction.CreateMerchantToken)
coreTransactionGroup.GET("/getMerchantToken", coreTransaction.GetMerchantToken)
coreTransactionGroup.GET("/getMerchantTokenList", coreTransaction.GetMerchantTokenList)
coreTransactionGroup.DELETE("/removeMerchantToken", coreTransaction.RemoveMerchantToken)
coreTransactionGroup.PUT("/updateMerchantToken", coreTransaction.UpdateMerchantToken)
coreTransactionGroup.GET("/getMerchantTokenByMerchantId", coreTransaction.GetMerchantTokenByMerchantId)
coreTransactionGroup.PUT("/updateMerchantTokenByMerchantId", coreTransaction.UpdateMerchantTokenByMerchantId)
coreTransactionGroup.DELETE("/removeMerchantTokenByMerchantId", coreTransaction.RemoveMerchantTokenByMerchantId)
coreTransactionGroup.POST("/createNetworkFee", coreTransaction.CreateNetworkFee)
coreTransactionGroup.GET("/getNetworkFee", coreTransaction.GetNetworkFee)
coreTransactionGroup.GET("/getNetworkFeeList", coreTransaction.GetNetworkFeeList)
coreTransactionGroup.DELETE("/removeNetworkFee", coreTransaction.RemoveNetworkFee)
coreTransactionGroup.PUT("/updateNetworkFee", coreTransaction.UpdateNetworkFee)
coreTransactionGroup.GET("/getNetworkFeeByTransactionId", coreTransaction.GetNetworkFeeByTransactionId)
coreTransactionGroup.PUT("/updateNetworkFeeByTransactionId", coreTransaction.UpdateNetworkFeeByTransactionId)
coreTransactionGroup.DELETE("/removeNetworkFeeByTransactionId", coreTransaction.RemoveNetworkFeeByTransactionId)
coreTransactionGroup.POST("/createTicket", coreTransaction.CreateTicket)
coreTransactionGroup.GET("/getTicket", coreTransaction.GetTicket)
coreTransactionGroup.GET("/getTicketList", coreTransaction.GetTicketList)
coreTransactionGroup.DELETE("/deleteTicket", coreTransaction.DeleteTicket)
coreTransactionGroup.DELETE("/removeTicket", coreTransaction.RemoveTicket)
coreTransactionGroup.PUT("/updateTicket", coreTransaction.UpdateTicket)
coreTransactionGroup.POST("/createToken", coreTransaction.CreateToken)
coreTransactionGroup.GET("/getToken", coreTransaction.GetToken)
coreTransactionGroup.GET("/getTokenList", coreTransaction.GetTokenList)
coreTransactionGroup.DELETE("/removeToken", coreTransaction.RemoveToken)
coreTransactionGroup.PUT("/updateToken", coreTransaction.UpdateToken)
coreTransactionGroup.GET("/getTokenBySymbol", coreTransaction.GetTokenBySymbol)
coreTransactionGroup.PUT("/updateTokenBySymbol", coreTransaction.UpdateTokenBySymbol)
coreTransactionGroup.DELETE("/removeTokenBySymbol", coreTransaction.RemoveTokenBySymbol)
coreTransactionGroup.POST("/createTransaction", coreTransaction.CreateTransaction)
coreTransactionGroup.GET("/getTransaction", coreTransaction.GetTransaction)
coreTransactionGroup.GET("/getTransactionList", coreTransaction.GetTransactionList)
coreTransactionGroup.DELETE("/removeTransaction", coreTransaction.RemoveTransaction)
coreTransactionGroup.PUT("/updateTransaction", coreTransaction.UpdateTransaction)
coreTransactionGroup.GET("/getTransactionByChainTxidUnique", coreTransaction.GetTransactionByChainTxidUnique)
coreTransactionGroup.PUT("/updateTransactionByChainTxidUnique", coreTransaction.UpdateTransactionByChainTxidUnique)
coreTransactionGroup.DELETE("/removeTransactionByChainTxidUnique", coreTransaction.RemoveTransactionByChainTxidUnique)
}
|
package middlewares
import (
"strconv"
"strings"
"time"
"github.com/valyala/fasthttp"
"github.com/authelia/authelia/v4/internal/metrics"
)
// NewMetricsRequest returns a middleware if provided with a metrics.Recorder, otherwise it returns nil.
func NewMetricsRequest(metrics metrics.Recorder) (middleware Basic) {
if metrics == nil {
return nil
}
return func(next fasthttp.RequestHandler) (handler fasthttp.RequestHandler) {
return func(ctx *fasthttp.RequestCtx) {
started := time.Now()
next(ctx)
statusCode := strconv.Itoa(ctx.Response.StatusCode())
requestMethod := string(ctx.Method())
metrics.RecordRequest(statusCode, requestMethod, time.Since(started))
}
}
}
// NewMetricsRequestOpenIDConnect returns a middleware if provided with a metrics.Recorder, otherwise it returns nil.
func NewMetricsRequestOpenIDConnect(metrics metrics.Recorder, endpoint string) (middleware Basic) {
if metrics == nil {
return nil
}
endpoint = strings.ReplaceAll(endpoint, "-", "_")
return func(next fasthttp.RequestHandler) (handler fasthttp.RequestHandler) {
return func(ctx *fasthttp.RequestCtx) {
started := time.Now()
next(ctx)
statusCode := strconv.Itoa(ctx.Response.StatusCode())
metrics.RecordRequestOpenIDConnect(endpoint, statusCode, time.Since(started))
}
}
}
// NewMetricsAuthzRequest returns a middleware if provided with a metrics.Recorder, otherwise it returns nil.
func NewMetricsAuthzRequest(metrics metrics.Recorder) (middleware Basic) {
if metrics == nil {
return nil
}
return func(next fasthttp.RequestHandler) (handler fasthttp.RequestHandler) {
return func(ctx *fasthttp.RequestCtx) {
next(ctx)
statusCode := strconv.Itoa(ctx.Response.StatusCode())
metrics.RecordAuthz(statusCode)
}
}
}
|
package appdynamics
import (
"github.com/HarryEMartland/terraform-provider-appdynamics/appdynamics/client"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"strconv"
)
func resourceHealthRule() *schema.Resource {
return &schema.Resource{
Create: resourceHealthRuleCreate,
Read: resourceHealthRuleRead,
Update: resourceHealthRuleUpdate,
Delete: resourceHealthRuleDelete,
Schema: map[string]*schema.Schema{
"application_id": {
Type: schema.TypeInt,
Required: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"evaluation_minutes": {
Type: schema.TypeInt,
Default: 30,
Optional: true,
},
"violation_length_minutes": {
Type: schema.TypeInt,
Default: 5,
Optional: true,
},
"affected_entity_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateList([]string{
"OVERALL_APPLICATION_PERFORMANCE",
"BUSINESS_TRANSACTION_PERFORMANCE",
"TIER_NODE_TRANSACTION_PERFORMANCE",
"TIER_NODE_HARDWARE",
"SERVERS_IN_APPLICATION",
"BACKENDS",
"ERRORS",
"SERVICE_ENDPOINTS",
"INFORMATION_POINTS",
"CUSTOM",
"DATABASES",
"SERVERS",
}),
},
"business_transaction_scope": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateList([]string{
"ALL_BUSINESS_TRANSACTIONS",
"SPECIFIC_BUSINESS_TRANSACTIONS",
"BUSINESS_TRANSACTIONS_IN_SPECIFIC_TIERS",
"BUSINESS_TRANSACTIONS_MATCHING_PATTERN",
}),
},
"evaluate_to_true_on_no_data": {
Type: schema.TypeBool,
Optional: true,
Default: false,
},
"warn_compare_value": {
Type: schema.TypeFloat,
Required: true,
},
"critical_compare_value": {
Type: schema.TypeFloat,
Required: true,
},
"eval_detail_type": {
Type: schema.TypeString,
Required: true,
},
"metric_aggregation_function": {
Type: schema.TypeString,
Required: true,
},
"metric_path": {
Type: schema.TypeString,
Required: true,
},
"metric_eval_detail_type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validateList([]string{
"SINGLE_METRIC",
"METRIC_EXPRESSION",
"BASELINE_TYPE",
"SPECIFIC_TYPE",
}),
},
"baseline_condition": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateList([]string{
"WITHIN_BASELINE",
"NOT_WITHIN_BASELINE",
"GREATER_THAN_BASELINE",
"LESS_THAN_BASELINE",
}),
},
"baseline_name": {
Type: schema.TypeString,
Optional: true,
},
"baseline_unit": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateList([]string{
"STANDARD_DEVIATIONS",
"PERCENTAGE",
}),
},
"compare_condition": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateList([]string{
"GREATER_THAN_SPECIFIC_VALUE",
"LESS_THAN_SPECIFIC_VALUE",
}),
},
"business_transactions": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
"specific_tiers": {
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{
Type: schema.TypeString,
},
},
},
}
}
func resourceHealthRuleCreate(d *schema.ResourceData, m interface{}) error {
appdClient := m.(*client.AppDClient)
applicationId := d.Get("application_id").(int)
healthRule := createHealthRule(d)
updatedHealthRule, err := appdClient.CreateHealthRule(&healthRule, applicationId)
if err != nil {
return err
}
d.SetId(strconv.Itoa(updatedHealthRule.ID))
return resourceHealthRuleRead(d, m)
}
func GetOrNilS(d *schema.ResourceData, key string) *string {
value, set := d.GetOk(key)
if set {
strValue := value.(string)
return &strValue
}
return nil
}
func GetOrNilL(d *schema.ResourceData, key string) *[]interface{} {
value, set := d.GetOk(key)
if set {
listValue := value.(*schema.Set).List()
return &listValue
}
return nil
}
func createHealthRule(d *schema.ResourceData) client.HealthRule {
name := d.Get("name").(string)
evaluationMinutes := d.Get("evaluation_minutes").(int)
violationLengthMinutes := d.Get("violation_length_minutes").(int)
affectedEntityType := d.Get("affected_entity_type").(string)
businessTransactionScope := d.Get("business_transaction_scope").(string)
evaluateToTrueOnNoData := d.Get("evaluate_to_true_on_no_data").(bool)
evalDetailType := d.Get("eval_detail_type").(string)
metricAggregationFunction := d.Get("metric_aggregation_function").(string)
metricPath := d.Get("metric_path").(string)
criticalCompareValue := d.Get("critical_compare_value").(float64)
compareCondition := GetOrNilS(d, "compare_condition")
warnCompareValue := d.Get("warn_compare_value").(float64)
metricEvalDetailType := d.Get("metric_eval_detail_type").(string)
baselineCondition := GetOrNilS(d, "baseline_condition")
baselineName := GetOrNilS(d, "baseline_name")
baselineUnit := GetOrNilS(d, "baseline_unit")
healthRule := client.HealthRule{
Name: name,
Enabled: true,
UseDataFromLastNMinutes: evaluationMinutes,
WaitTimeAfterViolation: violationLengthMinutes,
Affects: &client.Affects{
AffectedEntityType: affectedEntityType,
AffectedBusinessTransactions: &client.Transaction{
BusinessTransactionScope: businessTransactionScope,
BusinessTransactions: GetOrNilL(d, "business_transactions"),
SpecificTiers: GetOrNilL(d, "specific_tiers"),
},
},
Criterias: &client.Criterias{
Critical: &client.Criteria{
ConditionAggregationType: "ALL",
Conditions: []*client.Condition{{
Name: name,
ShortName: "A",
EvaluateToTrueOnNoData: evaluateToTrueOnNoData,
EvalDetail: &client.EvalDetail{
EvalDetailType: evalDetailType,
MetricAggregateFunction: metricAggregationFunction,
MetricPath: metricPath,
MetricEvalDetail: &client.MetricEvalDetail{
MetricEvalDetailType: metricEvalDetailType,
BaselineCondition: baselineCondition,
BaselineName: baselineName,
BaselineUnit: baselineUnit,
CompareValue: criticalCompareValue,
CompareCondition: compareCondition,
},
},
}},
},
Warning: &client.Criteria{
ConditionAggregationType: "ALL",
Conditions: []*client.Condition{{
Name: name,
ShortName: "A",
EvaluateToTrueOnNoData: evaluateToTrueOnNoData,
EvalDetail: &client.EvalDetail{
EvalDetailType: evalDetailType,
MetricAggregateFunction: metricAggregationFunction,
MetricPath: metricPath,
MetricEvalDetail: &client.MetricEvalDetail{
MetricEvalDetailType: metricEvalDetailType,
BaselineCondition: baselineCondition,
BaselineName: baselineName,
BaselineUnit: baselineUnit,
CompareValue: warnCompareValue,
CompareCondition: compareCondition,
},
},
}},
},
},
}
return healthRule
}
func resourceHealthRuleRead(d *schema.ResourceData, m interface{}) error {
appdClient := m.(*client.AppDClient)
applicationId := d.Get("application_id").(int)
id := d.Id()
healthRuleId, err := strconv.Atoi(id)
if err != nil {
return err
}
healthRule, err := appdClient.GetHealthRule(healthRuleId, applicationId) //read back into d
if err != nil {
return err
}
updateHealthRule(d, healthRule)
return nil
}
func updateHealthRule(d *schema.ResourceData, healthRule *client.HealthRule) {
d.Set("name", healthRule.Name)
d.Set("evaluation_minutes", healthRule.UseDataFromLastNMinutes)
d.Set("violation_length_minutes", healthRule.WaitTimeAfterViolation)
d.Set("affected_entity_type", healthRule.Affects.AffectedEntityType)
d.Set("business_transaction_scope", healthRule.Affects.AffectedBusinessTransactions.BusinessTransactionScope)
criticalCondition := healthRule.Criterias.Critical.Conditions[0]
d.Set("evaluate_to_true_on_no_data", criticalCondition.EvaluateToTrueOnNoData)
d.Set("eval_detail_type", criticalCondition.EvalDetail.EvalDetailType)
d.Set("metric_aggregation_function", criticalCondition.EvalDetail.MetricAggregateFunction)
d.Set("metric_path", criticalCondition.EvalDetail.MetricPath)
d.Set("critical_compare_value", criticalCondition.EvalDetail.MetricEvalDetail.CompareValue)
d.Set("warn_compare_value", healthRule.Criterias.Warning.Conditions[0].EvalDetail.MetricEvalDetail.CompareValue)
d.Set("metric_eval_detail_type", criticalCondition.EvalDetail.MetricEvalDetail.MetricEvalDetailType)
d.Set("baseline_condition", criticalCondition.EvalDetail.MetricEvalDetail.BaselineCondition)
d.Set("baseline_name", criticalCondition.EvalDetail.MetricEvalDetail.BaselineName)
d.Set("baseline_unit", criticalCondition.EvalDetail.MetricEvalDetail.BaselineUnit)
}
func resourceHealthRuleUpdate(d *schema.ResourceData, m interface{}) error {
appdClient := m.(*client.AppDClient)
applicationId := d.Get("application_id").(int)
healthRule := createHealthRule(d)
healthRuleId, err := strconv.Atoi(d.Id())
if err != nil {
return err
}
healthRule.ID = healthRuleId
_, err = appdClient.UpdateHealthRule(&healthRule, applicationId)
if err != nil {
return err
}
return resourceHealthRuleRead(d, m)
}
func resourceHealthRuleDelete(d *schema.ResourceData, m interface{}) error {
appdClient := m.(*client.AppDClient)
applicationId := d.Get("application_id").(int)
id := d.Id()
healthRuleId, err := strconv.Atoi(id)
if err != nil {
return err
}
err = appdClient.DeleteHealthRule(applicationId, healthRuleId)
if err != nil {
return err
}
return nil
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
|
package main
import (
"time"
)
// what you will see, Hello and world will print in the order that the go routines were created,
// you should see a bunch of hellos then a bunch of worlds
func main() {
// anonamous function
go func() {
for i := 0; i < 100; i++ {
println("Hello")
}
}()
// anonamous function
go func() {
for i := 0; i < 100; i++ {
println("World")
}
}()
// without a sleep the function will close out before the go routines has a chance to finish
dur, _ := time.ParseDuration("1s")
time.Sleep(dur)
}
|
package handlers
import (
"net/http"
"github.com/root-gg/plik/server/common"
"github.com/root-gg/plik/server/context"
)
// GetUpload return upload metadata
func GetUpload(ctx *context.Context, resp http.ResponseWriter, req *http.Request) {
config := ctx.GetConfig()
// Get upload from context
upload := ctx.GetUpload()
if upload == nil {
panic("missing upload from context")
}
files, err := ctx.GetMetadataBackend().GetFiles(upload.ID)
if err != nil {
ctx.InternalServerError("unable to get upload files", err)
return
}
upload.Files = files
// Hide private information (IP, data backend details, User ID, Login/Password, ...)
upload.Sanitize(config)
common.WriteJSONResponse(resp, upload)
}
|
/*
Given a string s and a string t, check if s is subsequence of t.
You may assume that there is only lower case English letters in both s and t. t is potentially a very long (length ~= 500,000) string, and s is a short string (<=100).
A subsequence of a string is a new string which is formed from the original string by deleting some (can be none) of the characters without disturbing the relative positions of the remaining characters. (ie, "ace" is a subsequence of "abcde" while "aec" is not).
Follow up:
If there are lots of incoming S, say S1, S2, ... , Sk where k >= 1B, and you want to check one by one to see if T has its subsequence. In this scenario, how would you change your code?
*/
package main
import (
"fmt"
"unicode/utf8"
)
func main() {
test("ace", "abcde")
test("aec", "abcde")
test("abc", "ahbgdc")
test("axc", "ahbgdc")
test("ahbgd", "ahbgdc")
test("ahbgdc", "ahbgdc")
}
func test(s, t string) {
var c csq
c.build(t)
fmt.Println(subseq(s, t), c.subseq(s))
}
func subseq(s, t string) bool {
i, j := 0, 0
for i < len(s) && j < len(t) {
a, n := utf8.DecodeRuneInString(s[i:])
b, m := utf8.DecodeRuneInString(t[j:])
if a == b {
i += n
}
j += m
}
return i == len(s)
}
type csq struct {
m map[rune][]int
}
func (c *csq) build(t string) {
c.m = make(map[rune][]int)
for i, r := range t {
c.m[r] = append(c.m[r], i)
}
}
func (c *csq) find(i int, r rune) int {
l := c.m[r]
if len(l) == 0 {
return -1
}
lo := 0
hi := len(l) - 1
if i <= l[lo] {
return l[lo]
}
if i > l[hi] {
return -1
}
for hi-lo != 1 {
mid := lo + (hi-lo)/2
if l[mid] < i {
lo = mid
} else {
hi = mid
}
}
return l[hi]
}
func (c *csq) subseq(s string) bool {
i := 0
for _, r := range s {
i = c.find(i, r) + 1
if i <= 0 {
return false
}
}
return true
}
|
package transportador
import (
"time"
"github.com/google/uuid"
)
type Voucher struct{
NumeroEntrega uuid.UUID `json:"numeroEntrega"`
PrevisaoParaEntrega time.Time `json:"previsaoParaEntrega"`
}
|
package cli
import (
"context"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strconv"
"github.com/btcsuite/btcutil/base58"
"github.com/koinos/koinos-cli/internal/cliutil"
"github.com/koinos/koinos-proto-golang/encoding/text"
"github.com/koinos/koinos-proto-golang/koinos"
"github.com/koinos/koinos-proto-golang/koinos/protocol"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/dynamicpb"
)
// ----------------------------------------------------------------------------
// Register Command
// ----------------------------------------------------------------------------
// RegisterCommand is a command that closes an open wallet
type RegisterCommand struct {
Name string
Address string
ABIFilename *string
}
// NewRegisterCommand creates a new close object
func NewRegisterCommand(inv *CommandParseResult) Command {
return &RegisterCommand{Name: *inv.Args["name"], Address: *inv.Args["address"], ABIFilename: inv.Args["abi-filename"]}
}
// Execute closes the wallet
func (c *RegisterCommand) Execute(ctx context.Context, ee *ExecutionEnvironment) (*ExecutionResult, error) {
if ee.Contracts.Contains(c.Name) {
return nil, fmt.Errorf("%w: contract %s already exists", cliutil.ErrContract, c.Name)
}
// Ensure that the name is a valid command name
_, err := ee.Parser.parseCommandName([]byte(c.Name))
if err != nil {
return nil, fmt.Errorf("%w: invalid characters in contract name %s", cliutil.ErrContract, err)
}
// Get the ABI
var abiBytes []byte
if c.ABIFilename != nil { // If an ABI file was given, use it
jsonFile, err := os.Open(*c.ABIFilename)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
defer jsonFile.Close()
abiBytes, err = ioutil.ReadAll(jsonFile)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
} else { // Otherwise ask the RPC server for the ABI
if !ee.IsOnline() {
return nil, fmt.Errorf("%w: %s", cliutil.ErrOffline, "could not fetch contract ABI")
}
meta, err := ee.RPCClient.GetContractMeta(ctx, base58.Decode(c.Address))
if err != nil {
return nil, err
}
abiBytes = []byte(meta.GetAbi())
}
var abi ABI
err = json.Unmarshal(abiBytes, &abi)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
files, err := abi.GetFiles()
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
commands := []*CommandDeclaration{}
// Iterate through the methods and construct the commands
for name, method := range abi.Methods {
d, err := files.FindDescriptorByName(protoreflect.FullName(method.Argument))
if err != nil {
return nil, fmt.Errorf("%w: could not find type %s", cliutil.ErrInvalidABI, method.Argument)
}
md, ok := d.(protoreflect.MessageDescriptor)
if !ok {
return nil, fmt.Errorf("%w: %s is not a message", cliutil.ErrInvalidABI, method.Argument)
}
params, err := ParseABIFields(md)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
d, err = files.FindDescriptorByName(protoreflect.FullName(method.Return))
if err != nil {
return nil, fmt.Errorf("%w: could not find type %s", cliutil.ErrInvalidABI, method.Argument)
}
_, ok = d.(protoreflect.MessageDescriptor)
if !ok {
return nil, fmt.Errorf("%w: %s is not a message", cliutil.ErrInvalidABI, method.Argument)
}
commandName := fmt.Sprintf("%s.%s", c.Name, name)
// Create the command
var cmd *CommandDeclaration
if method.ReadOnly {
cmd = NewCommandDeclaration(commandName, method.Description, false, NewReadContractCommand, params...)
} else {
cmd = NewCommandDeclaration(commandName, method.Description, false, NewWriteContractCommand, params...)
}
commands = append(commands, cmd)
}
// Register the contract
err = ee.Contracts.Add(c.Name, c.Address, &abi, files)
if err != nil {
return nil, err
}
for _, cmd := range commands {
ee.Parser.Commands.AddCommand(cmd)
}
er := NewExecutionResult()
er.AddMessage(fmt.Sprintf("Contract '%s' at address %s registered", c.Name, c.Address))
return er, nil
}
// ----------------------------------------------------------------------------
// Read Contract Command
// ----------------------------------------------------------------------------
// ReadContractCommand is a backend for generated commands that read from a contract
type ReadContractCommand struct {
ParseResult *CommandParseResult
}
// NewReadContractCommand creates a new read contract command
func NewReadContractCommand(inv *CommandParseResult) Command {
return &ReadContractCommand{ParseResult: inv}
}
// Execute executes the read contract command
func (c *ReadContractCommand) Execute(ctx context.Context, ee *ExecutionEnvironment) (*ExecutionResult, error) {
if !ee.IsOnline() {
return nil, fmt.Errorf("%w: cannot execute method", cliutil.ErrOffline)
}
contract := ee.Contracts.GetFromMethodName(c.ParseResult.CommandName)
entryPoint, err := strconv.ParseUint(ee.Contracts.GetMethod(c.ParseResult.CommandName).EntryPoint[2:], 16, 32)
if err != nil {
return nil, err
}
// Form a protobuf message from the command input
msg, err := ParseResultToMessage(c.ParseResult, ee.Contracts)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
// Get the bytes of the message
argBytes, err := proto.Marshal(msg)
if err != nil {
return nil, err
}
// Get the contractID
contractID := base58.Decode(contract.Address)
cResp, err := ee.RPCClient.ReadContract(ctx, argBytes, contractID, uint32(entryPoint))
if err != nil {
return nil, err
}
// Get return message descriptor
md, err := ee.Contracts.GetMethodReturn(c.ParseResult.CommandName)
if err != nil {
return nil, err
}
dMsg := dynamicpb.NewMessage(md)
err = proto.Unmarshal(cResp.GetResult(), dMsg)
if err != nil {
return nil, err
}
er := NewExecutionResult()
err = DecodeMessageBytes(dMsg, md)
if err != nil {
return nil, err
}
b, err := text.MarshalPretty(dMsg)
if err != nil {
return nil, err
}
er.AddMessage(string(b))
return er, nil
}
func DecodeMessageBytes(dMsg *dynamicpb.Message, md protoreflect.MessageDescriptor) error {
l := md.Fields().Len()
for i := 0; i < l; i++ {
modified := false
fd := md.Fields().Get(i)
value := dMsg.Get(fd)
switch fd.Kind() {
case protoreflect.BytesKind:
var b []byte
var err error
opts := fd.Options()
if opts != nil {
fieldOpts := opts.(*descriptorpb.FieldOptions)
ext := koinos.E_Btype.TypeDescriptor()
enum := fieldOpts.ProtoReflect().Get(ext).Enum()
switch koinos.BytesType(enum) {
case koinos.BytesType_HEX, koinos.BytesType_BLOCK_ID, koinos.BytesType_TRANSACTION_ID:
b, err = hex.DecodeString(string(value.Bytes()))
if err != nil {
return err
}
case koinos.BytesType_BASE58, koinos.BytesType_CONTRACT_ID, koinos.BytesType_ADDRESS:
b = base58.Decode(string(value.Bytes()))
case koinos.BytesType_BASE64:
fallthrough
default:
b, err = base64.URLEncoding.DecodeString(string(value.Bytes()))
if err != nil {
return err
}
}
} else {
b, err = base64.URLEncoding.DecodeString(string(value.Bytes()))
if err != nil {
return err
}
}
value = protoreflect.ValueOfBytes(b)
modified = true
}
if fd.IsList() && value.List().Len() == 0 {
continue
}
// Set the value on the message
if modified {
dMsg.Set(fd, value)
}
}
return nil
}
// ----------------------------------------------------------------------------
// Write Contract Command
// ----------------------------------------------------------------------------
// WriteContractCommand is a backend for generated commands that write to a contract
type WriteContractCommand struct {
ParseResult *CommandParseResult
}
// NewWriteContractCommand creates a new write contract command
func NewWriteContractCommand(inv *CommandParseResult) Command {
return &WriteContractCommand{ParseResult: inv}
}
// Execute executes the write contract command
func (c *WriteContractCommand) Execute(ctx context.Context, ee *ExecutionEnvironment) (*ExecutionResult, error) {
if !ee.IsWalletOpen() {
return nil, fmt.Errorf("%w: cannot execute method", cliutil.ErrWalletClosed)
}
if !ee.IsOnline() && !ee.Session.IsValid() {
return nil, fmt.Errorf("%w: cannot execute method", cliutil.ErrOffline)
}
contract := ee.Contracts.GetFromMethodName(c.ParseResult.CommandName)
entryPoint, err := strconv.ParseUint(ee.Contracts.GetMethod(c.ParseResult.CommandName).EntryPoint[2:], 16, 32)
if err != nil {
return nil, err
}
// Form a protobuf message from the command input
msg, err := ParseResultToMessage(c.ParseResult, ee.Contracts)
if err != nil {
return nil, fmt.Errorf("%w: %s", cliutil.ErrInvalidABI, err)
}
// Get the contractID
contractID := base58.Decode(contract.Address)
args, err := proto.Marshal(msg)
if err != nil {
return nil, err
}
op := &protocol.Operation{
Op: &protocol.Operation_CallContract{
CallContract: &protocol.CallContractOperation{
ContractId: contractID,
EntryPoint: uint32(entryPoint),
Args: args,
},
},
}
textMsg, _ := text.MarshalPretty(msg)
result := NewExecutionResult()
result.AddMessage(fmt.Sprintf("Calling %s with arguments '%s'", c.ParseResult.CommandName, textMsg))
logMessage := fmt.Sprintf("Call %s with arguments '%s'", c.ParseResult.CommandName, textMsg)
err = ee.Session.AddOperation(op, logMessage)
if err == nil {
result.AddMessage("Adding operation to transaction session")
}
if err != nil {
err := ee.SubmitTransaction(ctx, result, op)
if err != nil {
return result, fmt.Errorf("cannot make call, %w", err)
}
}
return result, nil
}
|
package models
import (
"fmt"
"time"
"github.com/colinrs/ffly-plus/internal/config"
"github.com/colinrs/pkgx/logger"
"gorm.io/driver/mysql"
"gorm.io/gorm"
glogger "gorm.io/gorm/logger"
)
// DB ...
var DB *gorm.DB
// Database ...
func Database(mysqlConfig config.MySQLConfig) error {
logger.Info("mysql {%#v}", mysqlConfig)
dsn := fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local",
mysqlConfig.UserName,
mysqlConfig.Password,
mysqlConfig.Addr,
mysqlConfig.DB)
logger.Info("connect to mysql %s", dsn)
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{
Logger: glogger.Default.LogMode(glogger.Silent),
})
if err != nil {
logger.Error("conect db err:%v", err)
return err
}
//设置连接池
//空闲
sqlDB, err := db.DB()
if err != nil {
logger.Error("get sqlDB err:%v", err)
return err
}
sqlDB.SetMaxIdleConns(mysqlConfig.MaxIdleConn)
//打开
sqlDB.SetMaxOpenConns(mysqlConfig.MaxOpenConn)
//超时
sqlDB.SetConnMaxLifetime(time.Second * time.Duration(mysqlConfig.ConnMaxLifeTime))
DB = db
migration()
return nil
}
|
package controllor
import (
"encoding/json"
"os"
"strconv"
"strings"
"time"
"xiaodaimeng/models"
"xiaodaimeng/public"
)
type Lucky struct {
Key string `json:"key"`
Number string `json:"number"`
Content []string `json:"content"`
}
type LuckyData struct {
GuanYin []Lucky `json:"guan_yin"`
YueLao []Lucky `json:"yue_lao"`
}
var LuckyDataList = new(LuckyData)
//抽签
func init() {
//获取灵签数据
//观音
guanYinLingQianPtr, _ := os.Open("data/guanyinlingqian.json")
defer guanYinLingQianPtr.Close()
decoder := json.NewDecoder(guanYinLingQianPtr)
err := decoder.Decode(&LuckyDataList.GuanYin)
if err != nil {
public.Error("观音签数据解码失败,", err.Error())
}
//月老
yueLaoLingQianPtr, _ := os.Open("data/yuelaolingqian.json")
defer yueLaoLingQianPtr.Close()
decoder = json.NewDecoder(yueLaoLingQianPtr)
err = decoder.Decode(&LuckyDataList.YueLao)
if err != nil {
public.Error("月老签数据解码失败,", err.Error())
}
}
//抽签
func Draw(msg Msg) {
public.Debug("Draw")
//判断今天是否已经抽签
work := models.Work{
WxId: msg.Sender,
Type: "draw",
Msg: time.Now().Format("2006-01-02"),
}
err := models.SelectWork(&work)
if err != nil {
public.Error(err)
SendMsg(GetReceiver(msg), FailText, TXT_MSG)
return
}
if work.Wid > 0 {
//您今天已经抽了签
SendMsg(GetReceiver(msg), HasDrawText, TXT_MSG)
return
}
//随机签的类型
key := public.GenerateRangeNum(0, 1)
linQianList := LuckyDataList.GuanYin
lenLinQian := len(LuckyDataList.GuanYin)
linQianType := "GuanYin"
if key == 1 {
linQianList = LuckyDataList.YueLao
lenLinQian = len(LuckyDataList.YueLao)
linQianType = "YueLao"
}
key = public.GenerateRangeNum(0, lenLinQian)
linQian := linQianList[key]
SendMsg(GetReceiver(msg), linQian.Number + "\n回复8解签", TXT_MSG)
//插入数据库
work.Other = linQianType + "/" + linQian.Key
go models.InsertWork(&work)
}
//解签
func UnDraw(msg Msg) {
public.Debug("UnDraw")
//判断今天是否已经抽签
work := models.Work{
WxId: msg.Sender,
Type: "draw",
Msg: time.Now().Format("2006-01-02"),
}
err := models.SelectWork(&work)
if err != nil {
public.Error(err)
SendMsg(GetReceiver(msg), FailText, TXT_MSG)
return
}
if work.Wid < 1 {
//您今天还没抽签喔
SendMsg(GetReceiver(msg), NotDrawText, TXT_MSG)
return
}
linQianInfo := strings.Split(work.Other, "/")
if len(linQianInfo) < 2 {
SendMsg(GetReceiver(msg), FailText, TXT_MSG)
return
}
linQianType := linQianInfo[0] //签类型
key, err1 := strconv.Atoi(linQianInfo[1]) //第几签
if err1 != nil {
public.Error(err1)
SendMsg(GetReceiver(msg), FailText, TXT_MSG)
return
}
InWork[msg.Sender] = true //加入工作名单
linQianList := LuckyDataList.GuanYin
if linQianType == "YueLao" {
linQianList = LuckyDataList.YueLao
}
linQian := linQianList[key]
lcStr := ""
for i, lq := range linQian.Content {
isSend := false
if len(lcStr+lq) > 2000 {
SendMsg(GetReceiver(msg), lcStr, TXT_MSG)
lcStr = ""
isSend = true
}
lcStr += lq + "\n"
if i == len(linQian.Content)-1 {
if !isSend {
SendMsg(GetReceiver(msg), lcStr, TXT_MSG)
}
InWork[msg.Sender] = false //移除工作名单
}
}
}
|
package qnamegen
import "math/rand"
var tldList WeightedList
func init() {
tldList = DefaultTLDList.ToWeightedList()
}
type TLDList map[string]uint
type WeightedList []string
func (w WeightedList) Shuffle(rounds int) {
for i := 0; i < rounds; i++ {
pos := rand.Intn(len(w))
w[i], w[pos] = w[pos], w[i]
}
}
func (t TLDList) ToWeightedList() WeightedList {
s := make(WeightedList, 0)
for tld, count := range t {
for i := uint(0); i < count; i++ {
s = append(s, tld)
}
}
s.Shuffle(25)
return s
}
var DefaultTLDList = TLDList{
"com": 10,
"co": 1,
"de": 10,
"eu": 1,
"net": 1,
"org": 1,
"xyz": 3,
"me": 5,
"io": 5,
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
"net/http"
"strconv"
)
type Customer struct {
Id int `json:"id"`
FirstName string `json:"firstName"`
MiddleName string `json:"middleName"`
LastName string `json:"lastName"`
DateOfBirth string `json:"dateOfBirth"`
Address string `json:"address"`
Email string `json:"email"`
Phone string `json:"phone"`
}
type Response struct {
Code int `json:"code"`
Message string `json:"message"`
Data []Customer `json:"data"`
}
const (
host = "localhost"
port = 5432
user = "postgres"
password = "********"
dbname = "customer_db"
)
var connStr = fmt.Sprintf("host=%s port=%d user=%s "+
"password=%s dbname=%s sslmode=disable", host, port, user, password, dbname)
func (Customer) TableName() string {
return "customer"
}
func createCustomer(w http.ResponseWriter, r *http.Request) {
var response Response
w.Header().Set("Content-Type", "application/json")
db, err := gorm.Open("postgres", connStr)
if err != nil {
response.Code = 1
response.Message = "Database connection failed!"
}
decoder := json.NewDecoder(r.Body)
var customer Customer
decoder.Decode(&customer)
db.Create(&customer)
db.Close()
response.Code = 0
response.Message = "Customer created!"
json.NewEncoder(w).Encode(response)
}
func updateCustomer(w http.ResponseWriter, r *http.Request) {
var response Response
w.Header().Set("Content-Type", "application/json")
db, err := gorm.Open("postgres", connStr)
if err != nil {
response.Code = 1
response.Message = "Database connection failed!"
}
decoder := json.NewDecoder(r.Body)
var customer Customer
decoder.Decode(&customer)
db.Save(&customer)
db.Close()
response.Code = 0
response.Message = "Customer updated!"
json.NewEncoder(w).Encode(response)
}
func getAllCustomer(w http.ResponseWriter, r *http.Request) {
var response Response
w.Header().Set("Content-Type", "application/json")
db, err := gorm.Open("postgres", connStr)
if err != nil {
response.Code = 1
response.Message = "Database connection failed!"
}
var list []Customer
db = db.Limit(5)
db.Find(&list)
db.Close()
response.Code = 0
response.Message = "Success!"
response.Data=list
json.NewEncoder(w).Encode(response)
}
func getCustomerById(w http.ResponseWriter, r *http.Request) {
var response Response
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
id := vars["id"]
db, err := gorm.Open("postgres", connStr)
if err != nil {
response.Code = 1
response.Message = "Database connection failed!"
}
db = db.Where("id=?", id)
var customers []Customer
db = db.Limit(5)
db.Find(&customers)
db.Close()
response.Code = 0
response.Message = "Success!"
response.Data=customers
json.NewEncoder(w).Encode(response)
}
func deleteCustomer(w http.ResponseWriter, r *http.Request) {
var response Response
w.Header().Set("Content-Type", "application/json")
vars := mux.Vars(r)
id, _ := strconv.ParseInt(vars["id"], 10, 64)
db, err := gorm.Open("postgres", connStr)
if err != nil {
response.Code = 1
response.Message = "Failed to connect to databases"
}
db = db.Where("id=?", id)
var customer Customer
customer.Id = int(id)
db.Delete(&customer)
db.Close()
response.Code = 0
response.Message = "Customer deleted!"
json.NewEncoder(w).Encode(response)
}
|
package commands
import (
"encoding/json"
"os"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"github.com/argoproj/pkg/cli"
kubecli "github.com/argoproj/pkg/kube/cli"
"github.com/argoproj/argo"
"github.com/argoproj/argo/util"
"github.com/argoproj/argo/util/cmd"
"github.com/argoproj/argo/workflow/common"
"github.com/argoproj/argo/workflow/executor"
"github.com/argoproj/argo/workflow/executor/docker"
"github.com/argoproj/argo/workflow/executor/k8sapi"
"github.com/argoproj/argo/workflow/executor/kubelet"
"github.com/argoproj/argo/workflow/executor/pns"
)
const (
// CLIName is the name of the CLI
CLIName = "argoexec"
)
var (
clientConfig clientcmd.ClientConfig
logLevel string // --loglevel
glogLevel int // --gloglevel
podAnnotationsPath string // --pod-annotations
)
func init() {
cobra.OnInitialize(initConfig)
}
func initConfig() {
log.SetFormatter(&log.TextFormatter{
TimestampFormat: "2006-01-02T15:04:05.000Z",
FullTimestamp: true,
})
cli.SetLogLevel(logLevel)
cli.SetGLogLevel(glogLevel)
}
func NewRootCommand() *cobra.Command {
var command = cobra.Command{
Use: CLIName,
Short: "argoexec is the executor sidecar to workflow containers",
Run: func(cmd *cobra.Command, args []string) {
cmd.HelpFunc()(cmd, args)
},
}
command.AddCommand(NewInitCommand())
command.AddCommand(NewResourceCommand())
command.AddCommand(NewWaitCommand())
command.AddCommand(cmd.NewVersionCmd(CLIName))
clientConfig = kubecli.AddKubectlFlagsToCmd(&command)
command.PersistentFlags().StringVar(&podAnnotationsPath, "pod-annotations", common.PodMetadataAnnotationsPath, "Pod annotations file from k8s downward API")
command.PersistentFlags().StringVar(&logLevel, "loglevel", "info", "Set the logging level. One of: debug|info|warn|error")
command.PersistentFlags().IntVar(&glogLevel, "gloglevel", 0, "Set the glog logging level")
return &command
}
func initExecutor() *executor.WorkflowExecutor {
log.WithField("version", argo.GetVersion().Version).Info("Starting Workflow Executor")
config, err := clientConfig.ClientConfig()
checkErr(err)
namespace, _, err := clientConfig.Namespace()
checkErr(err)
clientset, err := kubernetes.NewForConfig(config)
checkErr(err)
podName, ok := os.LookupEnv(common.EnvVarPodName)
if !ok {
log.Fatalf("Unable to determine pod name from environment variable %s", common.EnvVarPodName)
}
tmpl, err := executor.LoadTemplate(podAnnotationsPath)
checkErr(err)
var cre executor.ContainerRuntimeExecutor
switch os.Getenv(common.EnvVarContainerRuntimeExecutor) {
case common.ContainerRuntimeExecutorK8sAPI:
cre, err = k8sapi.NewK8sAPIExecutor(clientset, config, podName, namespace)
case common.ContainerRuntimeExecutorKubelet:
cre, err = kubelet.NewKubeletExecutor()
case common.ContainerRuntimeExecutorPNS:
cre, err = pns.NewPNSExecutor(clientset, podName, namespace, tmpl.Outputs.HasOutputs())
default:
cre, err = docker.NewDockerExecutor()
}
checkErr(err)
wfExecutor := executor.NewExecutor(clientset, podName, namespace, podAnnotationsPath, cre, *tmpl)
yamlBytes, _ := json.Marshal(&wfExecutor.Template)
vers := argo.GetVersion()
log.Infof("Executor (version: %s, build_date: %s) initialized (pod: %s/%s) with template:\n%s", vers.Version, vers.BuildDate, namespace, podName, string(yamlBytes))
return &wfExecutor
}
// checkErr is a convenience function to panic upon error
func checkErr(err error) {
if err != nil {
util.WriteTeriminateMessage(err.Error())
panic(err.Error())
}
}
|
package main
import (
"net/http"
"log"
"encoding/json"
"fmt"
)
type Codes struct {
Name string `json:"n"`
Code int `json:"c"`
}
func main() {
searchCity("МОСКВА")
}
func searchCity(city string) {
foo1 := new([]Codes)
url := fmt.Sprintf("http://www.rzd.ru/suggester?compactMode=y&stationNamePart=%s&lang=ru", city)
getCode(url, foo1)
une := []rune(city)
val := len(une)
foo2 := make([]Codes, 0)
for _, code := range *foo1 {
runes := []rune(code.Name)
safeSubstring := string(runes[0:val])
if (safeSubstring == city) {
foo2 = append(foo2, code)
log.Println(safeSubstring, code.Code, code.Name)
}
}
for _, i := range foo2 {
log.Println(" >>>", i.Name)
}
}
func getCode(url string, target interface{}) (error) {
var myClient = &http.Client{}
log.Println("--API-CITY---> URL request", url)
r, err := myClient.Get(url)
if err != nil {
log.Println(err)
}
defer r.Body.Close()
return json.NewDecoder(r.Body).Decode(target)
}
|
package db
import (
"time"
"universe/data"
"github.com/andy-zhangtao/golog"
)
// GetLanguage 返回需要查询的语言列表
func GetLanguage() (map[int]data.DLanguage, error) {
stmtOut, err := db.Query("SELECT ID, Name, `Keys`, Date FROM universe.language ")
if err != nil {
golog.Error(err.Error())
return nil, err
}
defer stmtOut.Close()
lan := make(map[int]data.DLanguage)
for stmtOut.Next() {
var id int
var lg data.DLanguage
err = stmtOut.Scan(&id, &lg.Name, &lg.Keys, &lg.Date)
if err != nil {
golog.Error(err.Error())
continue
}
lan[id] = lg
}
return lan, nil
}
// AddLanguage 增加检索语言
func AddLanguage(lan data.DLanguage) error {
stmtIns, err := db.Prepare("INSERT INTO universe.language(Name, `Keys`, Date) values (?,?,?) ")
if err != nil {
golog.Error(err.Error())
return err
}
defer stmtIns.Close()
if lan.Date == "" {
lan.Date = time.Now().Format("2006-01-02 15:04:05")
}
_, err = stmtIns.Exec(lan.Name, lan.Keys, lan.Date)
if err != nil {
golog.Error(err.Error())
return err
}
return nil
}
|
package admin
type IndexController struct {
BaseController
}
func (c *IndexController) Index() {
c.Islogin()
c.Redirect("/admin/dashboard", 302)
}
func (c *IndexController) Login(){
c.SetSession("admin_login", int(1))
c.Redirect("/admin/dashboard", 302)
}
|
package drivestream
import (
"github.com/scjalliance/drivestream/fileversion"
"github.com/scjalliance/drivestream/fileview"
"github.com/scjalliance/drivestream/resource"
)
// FileReference is a reference to a drivestream file.
type FileReference interface {
// FileID returns the resource ID of the file.
FileID() resource.ID
// Exists returns true if the file exists.
Exists() (bool, error)
// Versions returns the version map for the file.
Versions() fileversion.Map
// Version returns a file version reference. Equivalent to Versions().Ref(s).
Version(v resource.Version) fileversion.Reference
// AddVersion adds a version to the file.
//AddVersion(v resource.Version, data resource.FileData) error
// Views returns the view map for the file.
Views() fileview.Map
// View returns a view of the file for a particular drive.
View(driveID resource.ID) fileview.Reference
// Tree returns the file tree for the file.
//Tree() filetree.Tree
}
|
package metrics
import (
"fmt"
"io/ioutil"
"plugins"
"strings"
)
func (u *UptimeStats) createPayload(r *plugins.Result) error {
content, err := ioutil.ReadFile("/proc/uptime")
if nil != err {
return err
}
uptime_idle := strings.Split(strings.Trim(string(content), " \n"), " ")
r.Add(fmt.Sprintf("uptime %s", uptime_idle[0]))
r.Add(fmt.Sprintf("uptime_idle %s", uptime_idle[1]))
return nil
}
|
package broker
import "net"
import "fmt"
import "log"
import . "../packet"
import . "../message"
type Server struct {
//TODO checar esse socket aqui
//MyServerSocket net.Conn
Listener net.Listener
NextHandlerId int
Handlers map[int]*ConnectionHandler
Senders map[string]*ConnectionHandler
Receivers map[string]*ConnectionHandler
MyTopicManager TopicManager
//MyAdminManager AdminManager
}
func (server *Server) CreateServer(port string) {
server.Handlers = make(map[int]*ConnectionHandler)
server.Senders = make(map[string]*ConnectionHandler)
server.Receivers = make(map[string]*ConnectionHandler)
tmanager := TopicManager{}
tmanager.CreateTopicManager(server)
server.MyTopicManager = tmanager
server.NextHandlerId = 0
ln, _ := net.Listen("tcp", port)
server.Listener = ln
println("==> Server created!")
}
func (server *Server) Init() {
for{
println("Waiting for a new Connection...")
conn, _ := server.Listener.Accept()
log.Print("new connetion from ", conn.RemoteAddr())
id := server.getNextInt()
connHandler := ConnectionHandler{}
connHandler.NewCH(id,conn,*server)
server.Handlers[id] = &connHandler
go connHandler.Execute()
//fmt.Println(" ®®®®® HANDLERS ",server.Handlers)
}
}
func (server *Server) getNextInt() int{
ret := server.NextHandlerId
server.NextHandlerId++
return ret
}
func (server *Server) HandleRegisterSender(pkt Packet, id int){
println("*** Server handle[SENDER]")
server.Senders[pkt.GetClientID()] = server.Handlers[id]
}
func (server *Server) HandleRegisterReceiver(pkt Packet, id int){
println("*** Server handle[RECEIVER]")
println("*** Server receiver added : ",pkt.GetClientID())
server.Receivers[pkt.GetClientID()] = server.Handlers[id]
}
func (server *Server) HandleSubscribe(pkt Packet){
println("*** Server handle[SUBSCRIBE]")
server.MyTopicManager.Subscribe(pkt.Params[1], pkt.GetClientID())
}
func (server *Server) HandleUnsubscribe(pkt Packet){
println("*** Server handle[UNSUBSCRIBE]")
server.MyTopicManager.Unsubscribe(pkt.Params[1], pkt.GetClientID())
}
func (server *Server) HandleCreateTopic(pkt Packet){
println("*** Server handle[CREATETOPIC]")
println("*** Server ", pkt.GetMessage().Destination)
server.MyTopicManager.CreateTopic(pkt.Params[1])
}
func (server *Server) HandleMessage(pkt Packet){
println("*** Server handle[MESSAGE]")
topic := pkt.GetMessage().Destination
fmt.Println(topic)
//server.MyTopicManager.AddMessageToTopic(topic, pkt.GetMessage())
err := server.MyTopicManager.AddMessageToTopic(topic, pkt)
if(err != nil){
panic(err)
}
pkt_ := Packet{}
//fmt.Println("*** Server packet ::: ",pkt)
//fmt.Println("*** Server clientID ::: ",pkt.GetClientID())
params := []string{pkt.GetClientID(), pkt.MessageID}
pkt_.CreatePacket(ACK.Ordinal(), 0, params, Message{})
//server.Receivers[pkt.GetClientID()].ToSend <- pkt_
//fmt.Println("*** Server receivers ::: ",server.Receivers)
// current_receiver := ""
for key, _ := range server.Receivers {
fmt.Println("Key:", key)
// current_receiver = key
}
handler := server.Receivers[pkt.GetClientID()]
if(handler == nil){
println("FUDEO")
}
handler.ToSend <- pkt_
// server.Receivers[current_receiver].ToSend <- pkt_
}
|
package main
import (
"fmt"
)
/*
与C++、Java等完整支持面向对象的语言不同,Golang没有显式的继承,而是通过组合实现继承
定义一个基类Person,提供姓名和年龄两个属性,以及SayHi一个方法(Init类似于构造函数)
*/
type Person struct {
name string
age int
}
/*
如果函数需要更新一个变量,或者如果一个实参太大而我们希望避免复制整个实参,必须使用指针来传递变量的地址
*/
func (p *Person) Init(name string, age int) {
p.name = name
p.age = age
}
func (p Person) SayHi() {
fmt.Printf("Hi, I am %s, %d years old.\n", p.name, p.age)
}
// 通过组合的方式继承这个基类,实现Employee子类
// Employee组合了Person这个成员,除此之外它还拥有自己的成员company,即所属公司,
type Employee struct {
Person
company string
}
func (e *Employee) Init(name string, age int, company string) {
e.Person.Init(name, age)
e.company = company
}
// 雇员除了是一个Person之外,还需要工作,因此我们定义了Work这个方法
func (e Employee) Work() {
fmt.Printf("I'm working at %s.\n", e.company)
}
func main() {
p := Person{}
p.Init("Tom", 22)
p.SayHi()
e := Employee{}
e.Init("Jerry", 30, "ABC")
e.SayHi()
e.Work()
}
|
package targetselector
import (
"strings"
"github.com/devspace-cloud/devspace/pkg/devspace/config/versions/latest"
"github.com/devspace-cloud/devspace/pkg/devspace/kubectl"
)
// SelectorParameter holds the information from the config and the command overrides
type SelectorParameter struct {
ConfigParameter ConfigParameter
CmdParameter CmdParameter
}
// CmdParameter holds the parameter we receive from the command
type CmdParameter struct {
LabelSelector string
Namespace string
ContainerName string
PodName string
Pick *bool
Interactive bool
}
// ConfigParameter holds the parameter we receive from the config
type ConfigParameter struct {
LabelSelector map[string]string
Namespace string
ContainerName string
}
// GetNamespace retrieves the target namespace
func (t *SelectorParameter) GetNamespace(config *latest.Config, kubeClient kubectl.Client) (string, error) {
if t.CmdParameter.Namespace != "" {
return t.CmdParameter.Namespace, nil
}
if t.ConfigParameter.Namespace != "" {
return t.ConfigParameter.Namespace, nil
}
return kubeClient.Namespace(), nil
}
// GetLabelSelector retrieves the label selector of the target
func (t *SelectorParameter) GetLabelSelector(config *latest.Config) (string, error) {
if t.CmdParameter.LabelSelector != "" {
return t.CmdParameter.LabelSelector, nil
}
if t.ConfigParameter.LabelSelector != nil {
labelSelector := labelSelectorMapToString(t.ConfigParameter.LabelSelector)
return labelSelector, nil
}
return "", nil
}
func labelSelectorMapToString(m map[string]string) string {
labels := []string{}
for key, value := range m {
labels = append(labels, key+"="+value)
}
return strings.Join(labels, ",")
}
// GetPodName retrieves the pod name from the parameters
func (t *SelectorParameter) GetPodName() string {
if t.CmdParameter.PodName != "" {
return t.CmdParameter.PodName
}
return ""
}
// GetContainerName retrieves the container name from the parameters
func (t *SelectorParameter) GetContainerName() string {
if t.CmdParameter.ContainerName != "" {
return t.CmdParameter.ContainerName
}
if t.ConfigParameter.ContainerName != "" {
return t.ConfigParameter.ContainerName
}
return ""
}
|
package notice
import (
"io/ioutil"
"net/http"
"net/url"
)
type SmsNoticer struct {
config *SmsConfig
}
func (this *SmsNoticer) SendSms(num string, content string) (string, error) {
payload := this.newPayload(num, content)
resp, err := http.PostForm(this.config.Addr, payload)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
return string(body), nil
}
func (this *SmsNoticer) newPayload(num string, content string) url.Values {
v := url.Values{}
v.Add("task", this.config.User)
v.Add("key", this.config.Password)
v.Add("num", num)
v.Add("content", content)
return v
}
func NewSmsNoticer(config *SmsConfig) *SmsNoticer {
sn := SmsNoticer{}
if config == nil {
sn.config = NewSmsConfig()
} else {
sn.config = config
}
return &sn
}
|
package middlewares
import (
"ginDemo/utils"
"github.com/gin-gonic/gin"
"net/http"
)
func JWTAuth() gin.HandlerFunc {
return func(c *gin.Context) {
authHeader := c.GetHeader("Authorization")
claims, _ := utils.ParseToken(authHeader)
if claims == nil {
c.AbortWithStatus(http.StatusUnauthorized)
} else {
c.Set("userid", claims.UserID)
}
}
}
|
package main
import (
"fmt"
"time"
)
func main() {
DONE:
for {
fmt.Println("for")
select {
case <-time.After(5 * time.Second):
//goto DONE
break DONE
}
}
fmt.Println("DONE")
}
|
package main
import (
"fmt"
)
func main() {
mySlices := []int{2, 4, 6, 8, 10}
fmt.Println(mySlices)
fmt.Println(mySlices[1:4])
fmt.Println(mySlices[:3])
fmt.Println(mySlices[4:])
}
|
/*
Chef has just started watching Game of Thrones, and he wants to first calculate the exact time (in minutes) that it'll take him to complete the series.
The series has S seasons, and the ith season has Ei episodes, each of which are Li,1,Li,2,…,Li,Ei minutes long.
Note that these Li,j include the duration of the beginning intro song in each episode. The streaming service that he uses, allows Chef to skip the intro song.
The intro song changes slightly each season, and so he wants to watch the intro song in the first episode of each season, but he'll skip it in all other episodes of that season (yes, we know, a sacrilege!).
You know that the intro song lasts for Qi minutes in the ith season.
Find the total time in minutes, that he has to watch.
Input:
First line will contain a single integer, T, denoting the number of testcases. Then the testcases follow.
The first line of each testcase will contain a single integer S, denoting the total number of seasons.
The second line contains S space separated integers, Q1,Q2,…,QS, where Qi denotes the duration of the intro song in the ith season.
The ith of the next S lines contains Ei+1 space separated integers, where the first integer is Ei, denoting the number of episodes in the ith season. That is followed by the duration of each of the Ei episodes, Li,1,Li,2,…,Li,Ei.
Output:
For each testcase, output the answer in a single line.
Constraints
1≤T≤5
1≤S≤10^5
2≤Li,j≤10^5
1≤Ei
Sum of all Ei in a single testcase is at most 10^5
1≤Qi<Li,j, for all valid j.
*/
package main
func main() {
assert(minutes(
[]int{1, 2},
[][]int{
{2},
{3, 4},
},
) == 7)
assert(minutes(
[]int{10},
[][]int{
{11, 11, 11, 11, 11},
},
) == 15)
assert(minutes(
[]int{10, 10, 10, 10, 10},
[][]int{
{11},
{11},
{11},
{11},
{11},
},
) == 55)
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
func minutes(q []int, e [][]int) int {
r := 0
for i := range q {
for j := range e[i] {
r += e[i][j]
if j != 0 {
r -= q[i]
}
}
}
return r
}
|
package main
import (
"fmt"
"github.com/garyburd/redigo/redis"
"log"
"time"
)
func main() {
//normal()
//args()
//fbool()
//sortset()
//sortset1()
expireTime()
}
func normal() {
conn, err := redis.Dial("tcp", "127.0.0.1:6379")
if err != nil {
log.Fatalln(err)
}
defer conn.Close()
resp, err := conn.Do("SET", "myKey", "abcd")
if err != nil {
fmt.Println(err)
}
fmt.Println(resp)
resp, err = conn.Do("GET", "myKey")
if err != nil {
fmt.Println(err)
}
value, _ := redis.String(resp, err)
fmt.Println(value)
}
func args() {
conn, err := redis.Dial("tcp", "127.0.0.1:6379")
if err != nil {
log.Fatal(err)
}
defer conn.Close()
var p1, p2 struct {
Title string `redis:"title"`
Author string `redis:"author"`
Body string `redis:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
if _, err := conn.Do("HMSET", redis.Args{}.Add("id1").AddFlat(&p1)...); err != nil {
fmt.Println(err)
return
}
m := map[string]string{
"title": "Example2",
"author": "Steve",
"body": "Map",
}
if _, err := conn.Do("HMSET", redis.Args{}.Add("id2").AddFlat(m)...); err != nil {
fmt.Println(err)
return
}
for _, id := range []string{"id1", "id2"} {
v, err := redis.Values(conn.Do("HGETALL", id))
if err != nil {
fmt.Println(err)
return
}
if err := redis.ScanStruct(v, &p2); err != nil {
fmt.Println(err)
return
}
fmt.Printf("%+v\n", p2)
}
}
func fbool() {
c, err := redis.Dial("tcp", "127.0.0.1:6379")
if err != nil {
fmt.Println(err)
return
}
defer c.Close()
c.Do("SET", "foo", 1)
exists, _ := redis.Bool(c.Do("EXISTS", "foo"))
fmt.Printf("%#v\n", exists)
}
func sortset() {
c, err := redis.Dial("tcp", "127.0.0.1:6379")
if err != nil {
fmt.Println(err)
return
}
defer c.Close()
for i, member := range []string{"red", "blue", "green"} {
c.Do("ZADD", "zset", i, member)
}
c.Do("ZADD", "zset", 1, "uio")
resp, err := c.Do("ZCARD", "zset")
if err != nil {
log.Fatal(err)
}
count, err := redis.Int(resp, err)
if err != nil {
log.Fatal(err)
}
fmt.Println(count)
resp, err = c.Do("ZCOUNT", "zset", 1, 1)
count, err = redis.Int(resp, err)
if err != nil {
log.Fatal(err)
}
fmt.Println(count)
}
func sortset1() {
c, err := redis.Dial("tcp", "127.0.0.1:6379")
if err != nil {
log.Fatal(err)
}
defer c.Close()
//c.Do("DEL", "myset")
// temp := book {
// Title: "uio",
// Author: "uuu",
// Body: "qqqqqqqqqqqqq",
// }
// if _, err := c.Do("ZADD", "myset", 0, redis.Args{}.AddFlat(&temp)); err != nil {
// log.Fatal(err)
// }
// // resp, err := c.Do("ZCARD", "myset")
// // count, err := redis.Int(resp, err)
// // if err != nil {
// // log.Fatal(err)
// // }
// // fmt.Println(count)
// resp, err := c.Do("ZRANGE", "myset", 0, -1)
// values, err := redis.Values(resp, err)
// if err != nil {
// log.Fatal(err)
// }
// var bs []book
// if err := redis.ScanStruct(values, &bs); err != nil {
// log.Fatal(err)
// }
// fmt.Println(bs)
}
type book struct {
Title string
Author string
Body string
}
//only set support expiretime
func expireTime() {
conn, err := redis.Dial("tcp", "39.108.80.66:6379")
if err != nil {
log.Fatal(err)
}
defer conn.Close()
conn.Do("AUTH", "launch*2018")
var p1, p2 struct {
Title string `redis:"title"`
Author string `redis:"author"`
Body string `redis:"body"`
}
p1.Title = "Example"
p1.Author = "Gary"
p1.Body = "Hello"
_, err = conn.Do("SET", redis.Args{}.Add("go-key").Add("go-value").Add("EX").Add(3)...)
if err != nil {
fmt.Println(err)
return
}
if _, err := conn.Do("HMSET", redis.Args{}.Add("id10").AddFlat(&p1).Add("EX").Add(3)...); err != nil {
fmt.Println(err)
return
}
fmt.Println(redis.Args{}.Add("id10").AddFlat(&p1).Add("EX").Add(3))
m := map[string]string{
"title": "Example2",
"author": "Steve",
"body": "Map",
}
if _, err := conn.Do("HMSET", redis.Args{}.Add("id11").AddFlat(m)...); err != nil {
fmt.Println(err)
return
}
s, e := redis.String(conn.Do("GET", "go-key"))
if e != nil {
fmt.Println(e)
return
}
fmt.Println("1111", s)
time.Sleep(4 * time.Second)
fmt.Println("****************************")
s, _ = redis.String(conn.Do("GET", "go-key"))
fmt.Println("1111", s)
for _, id := range []string{"id10", "id11"} {
v, err := redis.Values(conn.Do("HGETALL", id))
if err != nil {
fmt.Println(err)
return
}
if err := redis.ScanStruct(v, &p2); err != nil {
fmt.Println(err)
return
}
fmt.Printf("%+v\n", p2)
}
}
|
package redis
import (
"context"
"github.com/go-redis/redis/v8"
"github.com/pkg/errors"
)
type RedisConfig struct {
Ctx context.Context
ConnString string
}
func (c *RedisConfig) Connect() *Redis {
return &Redis{
ctx: c.Ctx,
rdb: redis.NewClient(&redis.Options{
Addr: c.ConnString,
Password: "",
DB: 0,
}),
}
}
type Redis struct {
ctx context.Context
rdb *redis.Client
}
func (r *Redis) SetValue(key string, value string) error {
return r.rdb.Set(r.ctx, key, value, 0).Err()
}
func (r *Redis) GetValue(key string) (value string, err error) {
value, err = r.rdb.Get(r.ctx, key).Result()
if err == redis.Nil {
return "", &NotExistError{key: key}
}
return
}
func (r *Redis) Close() error {
return r.rdb.Close()
}
func (r *Redis) KeySet(pattern string, pageSize int) ([]string, error) {
return nil, errors.New("Unimplemented")
}
func (r *Redis) Remove(keys ...string) (uint64, error) {
return 0, errors.New("Unimplemented")
}
func (r *Redis) Size() (uint64, error) {
return 0, errors.New("Unimplemented")
}
func (r *Redis) Clear() error {
return errors.New("Unimplemented")
}
|
package config
import (
"gopkg.in/gcfg.v1"
"io/ioutil"
"log"
"os"
)
type Product struct {
Url string
Timeout int //millisecond
}
type ProductList struct {
Url string
Timeout int //millisecond
}
type ProductCache struct {
CacheResetTimeOut int64 //second
Timeout int64 //second
}
type Conf struct {
Product Product
PrdCache ProductCache
ProductList ProductList
DB DBConfig
GAuth GoogleCredentials
}
type GoogleCredentials struct {
Cid string
Csecret string
RedirectUrl string
}
type DBConfig struct {
DBStr string
}
var CF *Conf
func init() {
CF = &Conf{}
GOPATH := os.Getenv("GOPATH")
fname := GOPATH + "/src/github.com/itsmeadi/cart/files/config.ini"
ok := ReadConfig(CF, fname)
if !ok {
log.Fatal("Failed to read config file")
}
}
func ReadConfig(cfg *Conf, path string) bool {
configString, err := ioutil.ReadFile(path)
if err != nil {
log.Println("config.go [ReadFile] function ReadConfig", err)
return false
}
err = gcfg.ReadStringInto(cfg, string(configString))
if err != nil {
log.Println("config.go [ReadStringInto] function ReadConfig", err)
return false
}
return true
}
func GetConfig() *Conf {
return CF
}
|
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
func main() {
dbUser := "docker"
dbPassword := "docker"
dbDatabase := "sampledb"
dbConn := fmt.Sprintf("%s:%s@tcp(127.0.0.1:3306)/%s?parseTime=true", dbUser, dbPassword, dbDatabase)
db, err := sql.Open("mysql", dbConn)
if err != nil {
fmt.Println(err)
return
}
defer db.Close()
tx, err := db.Begin()
if err != nil {
fmt.Println(err)
return
}
article_id := 1
const sqlGetNice = `
select nice
from articles
where article_id = ?;
`
row := tx.QueryRow(sqlGetNice, article_id)
if err := row.Err(); err != nil {
fmt.Println(err)
tx.Rollback()
return
}
var nicenum int
err = row.Scan(&nicenum)
if err != nil {
fmt.Println(err)
tx.Rollback()
return
}
const sqlUpdateNice = `
update articles
set nice = ?
where article_id = ?
`
_, err = tx.Exec(sqlUpdateNice, nicenum+1, article_id)
if err != nil {
fmt.Println(err)
tx.Rollback()
return
}
tx.Commit()
}
|
package fileshandler
import(
"path/filepath"
"log"
"fmt"
"io/ioutil"
"os"
"strings"
)
func GetPath(htmlPath ...string) string {
var path string
if len(htmlPath) == 0{
abs,err := filepath.Abs("./demos/")
if err != nil {
log.Fatal(err)
}
path = abs
} else {
for _,customPath := range htmlPath {
abs,err := filepath.Abs(customPath)
if err != nil {
log.Fatal(err)
}
path = abs
}
}
return path
}
func GetHTMLFiles(path string) []os.FileInfo{
files, err := ioutil.ReadDir(path)
if err != nil {
log.Fatal(err)
}
fmt.Println("I found ", len(files), " files inside", path)
return files
}
func FileNames(files []os.FileInfo, path string) []string{
var filePaths []string
for _, f := range files {
if strings.Contains(f.Name(),".html"){
//if you want to use html from your local filesystem use file:/// + absolute path to your html file
thisPath := "file:///" + path + "/" + f.Name()
filePaths = append(filePaths,thisPath)
}
}
return filePaths
}
|
package chanqueue
import (
"sync"
"testing"
"github.com/textnode/gringo"
)
func BenchmarkChan(b *testing.B) {
c := make(chan struct{}, 10)
b.StartTimer()
go func() {
for i := 0; i < b.N; i++ {
c <- struct{}{}
}
close(c)
}()
for _ = range c {
}
}
func BenchmarkQueue(b *testing.B) {
q := NewQueue()
var wg sync.WaitGroup
wg.Add(1)
b.StartTimer()
go func() {
defer wg.Done()
for i := 0; i < b.N; i++ {
q.Push()
}
}()
for i := 0; i < b.N; i++ {
q.Pop()
}
wg.Wait()
}
func BenchmarkCQueue(b *testing.B) {
q := NewCQueue()
var wg sync.WaitGroup
wg.Add(1)
b.StartTimer()
go func() {
defer wg.Done()
for i := 0; i < b.N; i++ {
q.Push()
}
}()
for i := 0; i < b.N; i++ {
q.Pop()
}
wg.Wait()
}
var payload = *gringo.NewPayload(1)
func BenchmarkGringo(b *testing.B) {
q := gringo.NewGringo()
var wg sync.WaitGroup
wg.Add(1)
b.StartTimer()
go func() {
defer wg.Done()
for i := 0; i < b.N; i++ {
q.Write(payload)
}
}()
for i := 0; i < b.N; i++ {
q.Read()
}
wg.Wait()
}
|
package hpke
import (
"net/url"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestEncryptURLValues(t *testing.T) {
t.Parallel()
k1, err := GeneratePrivateKey()
require.NoError(t, err)
k2, err := GeneratePrivateKey()
require.NoError(t, err)
t.Run("v1", func(t *testing.T) {
t.Parallel()
encrypted, err := EncryptURLValuesV1(k1, k2.PublicKey(), url.Values{
"a": {"b", "c"},
"x": {"y", "z"},
})
assert.NoError(t, err)
assert.True(t, encrypted.Has(paramSenderPublicKey))
assert.True(t, encrypted.Has(paramQuery))
assert.True(t, IsEncryptedURL(encrypted))
encrypted.Set("extra", "value")
encrypted.Set("a", "notb")
senderPublicKey, decrypted, err := DecryptURLValues(k2, encrypted)
assert.NoError(t, err)
assert.Equal(t, url.Values{
"a": {"b", "c"},
"x": {"y", "z"},
"extra": {"value"},
}, decrypted)
assert.Equal(t, k1.PublicKey().String(), senderPublicKey.String())
})
t.Run("v2", func(t *testing.T) {
t.Parallel()
encrypted, err := EncryptURLValuesV2(k1, k2.PublicKey(), url.Values{
"a": {"b", "c"},
"x": {"y", "z"},
})
assert.NoError(t, err)
assert.True(t, encrypted.Has(paramSenderPublicKeyV2))
assert.True(t, encrypted.Has(paramQueryV2))
assert.True(t, IsEncryptedURL(encrypted))
encrypted.Set("extra", "value")
encrypted.Set("a", "notb")
senderPublicKey, decrypted, err := DecryptURLValues(k2, encrypted)
assert.NoError(t, err)
assert.Equal(t, url.Values{
"a": {"b", "c"},
"x": {"y", "z"},
"extra": {"value"},
}, decrypted)
assert.Equal(t, k1.PublicKey().String(), senderPublicKey.String())
})
t.Run("compresses", func(t *testing.T) {
t.Parallel()
encrypted, err := EncryptURLValuesV2(k1, k2.PublicKey(), url.Values{
"a": {strings.Repeat("b", 1024*128)},
})
assert.NoError(t, err)
assert.Less(t, len(encrypted.Encode()), 1024)
})
}
|
/**
*@Author: haoxiongxiao
*@Date: 2019/3/18
*@Description: CREATE GO FILE repositories
*/
package repositories
import (
"bysj/models"
"fmt"
"github.com/jinzhu/gorm"
"log"
)
type OrderRepositories struct {
db *gorm.DB
}
func NewOrderRepositories() *OrderRepositories {
return &OrderRepositories{db: models.DB.Mysql}
}
func (this *OrderRepositories) List(result *models.PageResult) {
result.Message = "success"
result.Code = 10000
var orders []models.Order
qs := this.db
qc := this.db.Model(&models.Order{})
if result.UserId != 0 {
qs = qs.Where("user_id = ?", result.UserId)
qc = qc.Where("user_id = ?", result.UserId)
}
if result.Status != 0 {
qs = qs.Where("status = ?", result.Status)
qc = qc.Where("status = ?", result.Status)
}
s := "%" + result.Search + "%"
if result.Search != "" {
qs = qs.Where("order_number like ? or room_info like ? or hotel_item like ?", s, s, s)
qc = qc.Where("order_number like ? or room_info like ? or hotel_item like ?", s, s, s)
}
log.Println(result.UserId)
if result.UserId != 0 {
qs = qs.Where("user_id = ?", result.UserId)
}
qs.Limit(result.Per).Preload("User").Offset((result.Page - 1) * result.Per).Find(&orders)
qc.Count(&result.Total)
result.Data = orders
}
func (this *OrderRepositories) Insert(order *models.Order) error {
u := models.User{}
if err := this.db.Where("id = ?", order.UserId).First(&u).Error; err != nil || u.IsBind != 1 {
return err
}
fmt.Println("创建订单")
if err := this.db.Create(order).Error; err != nil {
return err
}
this.db.Where("id = ?", order.UserId).First(&u)
order.User = &u
return nil
}
func (this *OrderRepositories) Update(m map[string]interface{}) error {
var order models.Order
var payRecord models.PayRecord
var user models.User
if err := this.db.Model(&models.Order{}).Updates(m).Error; err != nil {
return err
}
this.db.Where("id = ?", m["ID"]).First(&order)
this.db.Where("id = ?", order.UserId).First(&user)
if order.Status == 3 {
payRecord.Amount = order.Amount * 100
payRecord.Content = fmt.Sprintf("昵称为:%s的用户为订单号为:%s支付了:%d",
user.NickName, order.OrderNumber, order.Amount/100)
payRecord.UserId = user.ID
payRecord.UserName = user.Username
if err := this.db.Create(&payRecord).Error; err != nil {
return err
}
}
return nil
}
func (this *OrderRepositories) Delete(ids map[string][]uint) error {
return this.db.Where("id in (?)", ids["ids"]).Unscoped().Delete(&models.Order{}).Error
}
func (this *OrderRepositories) NotPayCount(user_id int) (count int) {
this.db.Model(&models.Order{}).Where("status = ? and user_id =?",
2, user_id).Count(&count)
return
}
|
/*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package models
import (
"encoding/json"
"github.com/trustbloc/sidetree-core-go/pkg/api/operation"
"github.com/trustbloc/sidetree-core-go/pkg/versions/0_1/model"
)
// ChunkFile defines chunk file schema.
type ChunkFile struct {
// Deltas included in this chunk file, each delta is an encoded string
Deltas []*model.DeltaModel `json:"deltas"`
}
// CreateChunkFile will combine all operation deltas into chunk file.
// returns chunk file model.
func CreateChunkFile(ops []*model.Operation) *ChunkFile {
var deltas []*model.DeltaModel
deltas = append(deltas, getDeltas(operation.TypeCreate, ops)...)
deltas = append(deltas, getDeltas(operation.TypeRecover, ops)...)
deltas = append(deltas, getDeltas(operation.TypeUpdate, ops)...)
return &ChunkFile{Deltas: deltas}
}
// ParseChunkFile will parse chunk file model from content.
func ParseChunkFile(content []byte) (*ChunkFile, error) {
cf, err := getChunkFile(content)
if err != nil {
return nil, err
}
return cf, nil
}
func getDeltas(filter operation.Type, ops []*model.Operation) []*model.DeltaModel {
var deltas []*model.DeltaModel
for _, op := range ops {
if op.Type == filter {
deltas = append(deltas, op.Delta)
}
}
return deltas
}
// get chunk file struct from bytes.
var getChunkFile = func(bytes []byte) (*ChunkFile, error) {
return unmarshalChunkFile(bytes)
}
// unmarshal chunk file bytes into chunk file model.
func unmarshalChunkFile(bytes []byte) (*ChunkFile, error) {
file := &ChunkFile{}
err := json.Unmarshal(bytes, file)
if err != nil {
return nil, err
}
return file, nil
}
|
package api
import "net/mail"
// Email contains the information about email
type Email struct {
From *mail.Address
To *mail.Address
Subject string
Body string
}
// EmailService ...
type EmailService interface {
UseTemplate(e *Email, data interface{}, template string) error
Send(e *Email) error
}
|
package kafka
import (
"context"
"time"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
"github.com/containers-ai/alameda/datahub/pkg/entities"
autoscalingv1alpha1 "github.com/containers-ai/alameda/operator/api/v1alpha1"
datahubpkg "github.com/containers-ai/alameda/pkg/datahub"
k8sutils "github.com/containers-ai/alameda/pkg/utils/kubernetes"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func SyncWithDatahub(k8sClient client.Client,
datahubClient *datahubpkg.Client) error {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
clusterUID, err := k8sutils.GetClusterUID(k8sClient)
if err != nil {
return errors.Wrap(err, "get cluster uid failed")
}
empty := struct{}{}
alamedaScalerSet := make(map[string]map[string]struct{})
alamedaScalerList := autoscalingv1alpha1.AlamedaScalerList{}
err = k8sClient.List(ctx, &alamedaScalerList)
if err != nil {
return errors.Wrap(err, "list AlamedaScaler failed")
}
for _, item := range alamedaScalerList.Items {
namesapce := item.Namespace
name := item.Name
if _, exist := alamedaScalerSet[namesapce]; !exist {
alamedaScalerSet[namesapce] = make(map[string]struct{})
}
alamedaScalerSet[namesapce][name] = empty
}
wg := errgroup.Group{}
wg.Go(func() error {
consumerGroups := []entities.ApplicationKafkaConsumerGroup{}
err := datahubClient.List(&consumerGroups, datahubpkg.Option{
Entity: entities.ApplicationKafkaConsumerGroup{
ClusterName: clusterUID,
},
Fields: []string{"ClusterName"},
})
if err != nil {
return errors.Wrap(err, "list consumerGroups from Datahub failed")
}
consumerGroupsToDelete := make([]entities.ApplicationKafkaConsumerGroup, 0)
for _, consumerGroup := range consumerGroups {
alamedaScalerNamespace := consumerGroup.AlamedaScalerNamespace
alamedaScalerName := consumerGroup.AlamedaScalerName
exist := true
if nameSet, ok := alamedaScalerSet[alamedaScalerNamespace]; ok {
if _, ok := nameSet[alamedaScalerName]; !ok {
exist = false
}
} else {
exist = false
}
if !exist {
consumerGroupsToDelete = append(consumerGroupsToDelete, consumerGroup)
}
}
if err := datahubClient.Delete(&consumerGroupsToDelete); err != nil {
return errors.Wrap(err, "delete consumerGroups from Datahub failed")
}
return nil
})
wg.Go(func() error {
topics := []entities.ApplicationKafkaTopic{}
err := datahubClient.List(&topics, datahubpkg.Option{
Entity: entities.ApplicationKafkaTopic{
ClusterName: clusterUID,
},
Fields: []string{"ClusterName"},
})
if err != nil {
return errors.Wrap(err, "list topics from Datahub failed")
}
topicsToDelete := make([]entities.ApplicationKafkaTopic, 0)
for _, topic := range topics {
alamedaScalerNamespace := topic.AlamedaScalerNamespace
alamedaScalerName := topic.AlamedaScalerName
exist := true
if nameSet, ok := alamedaScalerSet[alamedaScalerNamespace]; ok {
if _, ok := nameSet[alamedaScalerName]; !ok {
exist = false
}
} else {
exist = false
}
if !exist {
topicsToDelete = append(topicsToDelete, topic)
}
}
if err := datahubClient.Delete(&topicsToDelete); err != nil {
return errors.Wrap(err, "delete topics from Datahub failed")
}
return nil
})
return wg.Wait()
}
|
package slack
import (
"net/http"
"testing"
"github.com/stretchr/testify/assert"
)
func getTeamList(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json")
response := []byte(`{
"ok": true,
"teams": [
{
"name": "Shinichi's workspace",
"id": "T12345678"
},
{
"name": "Migi's workspace",
"id": "T12345679"
}
],
"response_metadata": {
"next_cursor": "dXNlcl9pZDo5MTQyOTI5Mzkz"
}
}`)
rw.Write(response)
}
func TestListTeams(t *testing.T) {
http.HandleFunc("/auth.teams.list", getTeamList)
once.Do(startServer)
api := New("testing-token", OptionAPIURL("http://"+serverAddr+"/"))
teams, cursor, err := api.ListTeams(ListTeamsParameters{})
if err != nil {
t.Errorf("Unexpected error: %s", err)
return
}
assert.Len(t, teams, 2)
assert.Equal(t, "T12345678", teams[0].ID)
assert.Equal(t, "Shinichi's workspace", teams[0].Name)
assert.Equal(t, "T12345679", teams[1].ID)
assert.Equal(t, "Migi's workspace", teams[1].Name)
assert.Equal(t, "dXNlcl9pZDo5MTQyOTI5Mzkz", cursor)
}
|
package main
import (
"fmt"
studentSMS "sms/str-sms"
)
type student struct {
id int
name string
}
var students = make(map[int]*student)
func main() {
/*var (
input int
inputName string
inputId int
)
fmt.Println("欢迎使用学生管理系统:")
for {
fmt.Println("请输入数字选择:1.查看全部学生 2.添加学生(输入name 和 id)3.根据id 删除学生")
_, err := fmt.Scanln(&input)
if err != nil {
fmt.Println("输入不合法,请重新输入")
continue
}
switch input {
case 1:
{
fmt.Println("全部学生如下")
getAllStudent()
}
case 2:
{
fmt.Println("请输入name 和 id")
for {
fmt.Scanf("%s %d", &inputName, &inputId)
err := addStudent(inputName, inputId)
if err != nil {
fmt.Println(err, "请重新输入")
continue
}
fmt.Println("添加成功")
break
}
}
case 3:
{
fmt.Println("请输入要删除的学生id")
for {
fmt.Scanln(&inputId)
err := rmStudentById(inputId)
if err != nil {
fmt.Println(err, "请重新输入")
continue
}
fmt.Println("删除成功")
break
}
}
default:
{
fmt.Println("无效数字")
}
}
fmt.Println("按回车返回主菜单")
_,err1 := fmt.Scanln(&input)
if err1 != nil {
continue
}
}*/
var input int
system := studentSMS.NewSystem()
fmt.Println("欢迎使用学生管理系统:")
for {
fmt.Println("请输入数字选择:1.查看全部学生 2.添加学生(输入name 和 id)3.根据id 删除学生")
_, err := fmt.Scanln(&input)
if err != nil {
fmt.Println("输入不合法,请重新输入")
continue
}
switch input {
case 1:
system.ListStudents()
case 2:
system.AddStudent()
case 3:
system.RemoveStudentById()
default:
fmt.Println("无效数字")
}
fmt.Println("按回车返回主菜单")
_,err1 := fmt.Scanln(&input)
if err1 != nil {
continue
}
}
}
func getAllStudent() {
for _, v := range students {
fmt.Println("name:", v.name, ",id:", v.id)
}
}
func addStudent(name string, id int) error {
if _, ok := students[id]; ok {
return fmt.Errorf("添加失败:已存在id为%d的学生", id)
}
students[id] = &student{
name: name,
id: id,
}
return nil
}
func rmStudentById(id int) error{
if _, ok := students[id]; !ok {
return fmt.Errorf("删除失败:不存在id为%d的学生", id)
}
delete(students, id)
return nil
}
|
/*
# -*- coding: utf-8 -*-
# @Author : joker
# @Time : 2020-07-09 09:31
# @File : _110_Balanced_Binary_Tree.go
# @Description : 判断是否是一颗平衡二叉树
1. 左右子树高度差不超过1
// 左边需要平衡 && 右边需要平衡
# @Attention :
注意 ,当为nil的时候, 是平衡的,意味着是true
*/
package v0
func isBalanced(root *TreeNode) bool {
b, _ := balanced(root)
return b
}
func balanced(root *TreeNode) (bool, int) {
if root == nil {
return true, 0
}
left, lh := balanced(root.Left)
right, rh := balanced(root.Right)
if !left || !right || (lh-rh) > 1 || (rh-lh) > 1 {
return false, 0
}
if lh > rh {
return true, lh + 1
}
return true, rh + 1
}
|
package account
import (
"github.com/stretchr/testify/assert"
application "go_gin_gonic/core/application/account"
infrastructue "go_gin_gonic/core/infrastructure/account"
"testing"
)
func TestCreateAccount(t *testing.T) {
accountId := "123"
customerId := "456"
accountRepository := infrastructue.NewInMemoryAccountRepository()
createAccount := application.NewCreateAccount(accountRepository)
err := createAccount.Execute(accountId, customerId)
newAccount, errFindById := accountRepository.FindById(accountId)
assert.Nil(t, err)
assert.Nil(t, errFindById)
assert.NotNil(t, newAccount)
assert.EqualValues(t, newAccount.Id(), accountId)
assert.EqualValues(t, newAccount.CustomerId(), customerId)
assert.EqualValues(t, newAccount.Balance(), 0)
}
|
package cmd
import (
"database/sql"
_ "embed"
"fmt"
_ "github.com/go-sql-driver/mysql"
"github.com/spf13/cobra"
"go.uber.org/zap"
"log"
"os"
"strings"
"text/template"
)
const (
schemaTable = "information_schema"
outputFolderModel = "models"
outputFolderRepo = "repositories"
outputFolderService = "services"
)
var genAPI = &cobra.Command{
Use: "api",
Short: "api",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
zap.S().Info("Start command api")
g := &Generator{
DBUser: dbUser,
DBPass: dbPass,
DBHost: dbHost,
DBPort: dbPort,
DBName: dbName,
DBTable: dbTable,
OutputFolder: outputFolder,
}
g.processGenerate(args)
zap.S().Info("Stop command api")
},
}
type dbModel struct {
ModelName string
TableName string
Attributes []*dbModelAttribute
NeedImport bool
NeedImportTime bool
NeedImportGorm bool
}
type dbModelAttribute struct {
FieldName string
FieldType string
ColumnName string
IsPrimaryKey bool
IsNullable bool
}
type Generator struct {
DBUser string
DBPass string
DBHost string
DBPort int32
DBName string
DBTable string
OutputFolder string
}
func (g *Generator) processGenerate(args []string) {
// example: user:pass@tcp(host:port)/database?param=value
dataSourceName := fmt.Sprintf("%s:%s@tcp(%s:%v)/%s", g.DBUser, g.DBPass, g.DBHost, g.DBPort, schemaTable)
mysqlDB, err := sql.Open("mysql", dataSourceName)
if err != nil {
log.Fatal("Can not connect to mysql, detail: ", err)
}
defer func() {
err = mysqlDB.Close()
if err != nil {
zap.S().Error(err)
}
}()
stmt, err := mysqlDB.Prepare("SELECT COLUMN_NAME, DATA_TYPE, IS_NULLABLE, COLUMN_KEY FROM COLUMNS WHERE TABLE_SCHEMA = ? AND TABLE_NAME =?")
if err != nil {
log.Println("Error when prepare query, detail: ", err)
return
}
rows, err := stmt.Query(g.DBName, g.DBTable)
if err != nil {
log.Println("Error when exec query, detail: ", err)
return
}
m := &dbModel{
ModelName: GetCamelCase(g.DBTable),
TableName: g.DBTable,
Attributes: make([]*dbModelAttribute, 0),
}
for rows.Next() {
var columnName, dataType, isNullable, columnKey string
err = rows.Scan(&columnName, &dataType, &isNullable, &columnKey)
if err != nil {
log.Println("Error when scan rows, detail: ", err)
return
}
attr := &dbModelAttribute{
FieldName: GetCamelCase(columnName),
FieldType: GetGoDataType(dataType, isNullable),
ColumnName: columnName,
}
if columnKey == "PRI" {
attr.IsPrimaryKey = true
}
if isNullable == "YES" {
attr.IsNullable = true
}
if attr.FieldType == "time.Time" || attr.FieldType == "*time.Time" {
m.NeedImport = true
m.NeedImportTime = true
}
m.Attributes = append(m.Attributes, attr)
}
err = g.generateModel(m)
if err != nil {
zap.S().Error("Error when generateModel, detail: ", err)
}
}
//go:embed templates/model.tmpl
var modelTemplateContent string
func (g *Generator) generateModel(m *dbModel) error {
tmpl, err := template.New("test_model").Parse(modelTemplateContent)
if err != nil {
return err
}
// open output file
fo, err := os.Create(fmt.Sprintf("./%v/%v/%v.go", g.OutputFolder, outputFolderModel, g.DBTable))
if err != nil {
return err
}
// close fo on exit and check for its returned error
defer func() {
if err := fo.Close(); err != nil {
zap.S().Error("Error when exec query, detail: ", err)
return
}
}()
err = tmpl.Execute(fo, m)
if err != nil {
return err
}
return nil
}
func GetGoDataType(mysqlType, isNullable string) string {
switch mysqlType {
case "varchar", "longtext", "text":
return "string"
case "smallint", "int", "bigint", "timestamp":
return "int64"
case "tinyint":
return "bool"
case "decimal":
return "double"
case "date", "datetime":
if isNullable == "YES" {
return "*time.Time"
}
return "time.Time"
default:
return ""
}
}
func GetCamelCase(input string) string {
if input == "id" {
return "ID"
}
output := ""
capNext := true
for _, v := range input {
if v >= 'A' && v <= 'Z' {
output += string(v)
}
if v >= '0' && v <= '9' {
output += string(v)
}
if v >= 'a' && v <= 'z' {
if capNext {
output += strings.ToUpper(string(v))
} else {
output += string(v)
}
}
if v == '_' || v == ' ' || v == '-' {
capNext = true
} else {
capNext = false
}
}
return output
}
|
package persistence_test
import (
"strconv"
"testing"
"github.com/alicebob/miniredis"
"github.com/rafael84/shortener/persistence"
)
func TestRedis(t *testing.T) {
s, err := miniredis.Run()
if err != nil {
t.Fatal(err)
}
defer s.Close()
redis := persistence.NewRedis(s.Addr(), "", 0)
type KeyValue struct {
Key string
Value string
}
for _, tc := range []struct {
Scenario string
Set KeyValue
Get KeyValue
Increment bool
Count int
SetErr string
}{
{
Scenario: "Set [A] 1 Get [A] 1",
Set: KeyValue{"A", "1"},
Get: KeyValue{"A", "1"},
Increment: true,
Count: 1,
},
{
Scenario: "Set [ ] 2 Get [ ] 2",
Set: KeyValue{" ", "2"},
Get: KeyValue{" ", "2"},
Increment: true,
Count: 2,
},
{
Scenario: "Set [C] 3 Get [D] ''",
Set: KeyValue{"C", "3"},
Get: KeyValue{"D", ""},
Increment: true,
Count: 3,
},
{
Scenario: "Set [C] 4 Get [C] 4",
Set: KeyValue{"C", "4"},
SetErr: "could not set alias[C] url[4]",
},
} {
t.Run(tc.Scenario, func(t *testing.T) {
// set
setErr := ""
err := redis.Set(tc.Set.Key, tc.Set.Value)
if err != nil {
setErr = err.Error()
}
if setErr != tc.SetErr {
t.Fatal(setErr)
}
// increment
if tc.Increment {
if err := redis.Increment(); err != nil {
t.Fatal(err)
}
}
// get
url, _ := redis.Get(tc.Get.Key)
if url != tc.Get.Value {
t.Fatalf("unexpected value\nwant\t[%v]\ngot\t[%v]",
tc.Get.Value, url)
}
// count
if tc.Count > 0 {
count := redis.Count()
if count != tc.Count {
t.Fatalf("unexpected count\nwant\t[%v]\ngot\t[%v]",
tc.Count, count)
}
}
})
}
}
func benchmarkMiniRedisSet(keyCount int, b *testing.B) {
s, err := miniredis.Run()
if err != nil {
b.Fatal(err)
}
defer s.Close()
redis := persistence.NewRedis(s.Addr(), "", 0)
aliases := []string{}
for n := 0; n < keyCount; n++ {
aliases = append(aliases, strconv.Itoa(n))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
redis.Set(aliases[n%keyCount], "url")
}
}
func BenchmarkMiniRedisSet1(b *testing.B) { benchmarkMiniRedisSet(1, b) }
func BenchmarkMiniRedisSet10(b *testing.B) { benchmarkMiniRedisSet(10, b) }
func BenchmarkMiniRedisSet100(b *testing.B) { benchmarkMiniRedisSet(100, b) }
func BenchmarkMiniRedisSet1000(b *testing.B) { benchmarkMiniRedisSet(1000, b) }
func benchmarkMiniRedisGet(keyCount int, b *testing.B) {
s, err := miniredis.Run()
if err != nil {
b.Fatal(err)
}
defer s.Close()
redis := persistence.NewRedis(s.Addr(), "", 0)
for n := 0; n < keyCount; n++ {
redis.Set(strconv.Itoa(n), "url")
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
redis.Get(strconv.Itoa(n % keyCount))
}
}
func BenchmarkMiniRedisGet1(b *testing.B) { benchmarkMiniRedisGet(1, b) }
func BenchmarkMiniRedisGet10(b *testing.B) { benchmarkMiniRedisGet(10, b) }
func BenchmarkMiniRedisGet100(b *testing.B) { benchmarkMiniRedisGet(100, b) }
func BenchmarkMiniRedisGet1000(b *testing.B) { benchmarkMiniRedisGet(1000, b) }
func benchmarkLocalRedisSet(keyCount int, b *testing.B) {
redis := persistence.NewRedis("localhost:6379", "", 0)
aliases := []string{}
for n := 0; n < keyCount; n++ {
aliases = append(aliases, strconv.Itoa(n))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
redis.Set(aliases[n%keyCount], "url")
}
}
func BenchmarkLocalRedisSet1(b *testing.B) { benchmarkLocalRedisSet(1, b) }
func BenchmarkLocalRedisSet10(b *testing.B) { benchmarkLocalRedisSet(10, b) }
func BenchmarkLocalRedisSet100(b *testing.B) { benchmarkLocalRedisSet(100, b) }
func benchmarkLocalRedisGet(keyCount int, b *testing.B) {
redis := persistence.NewRedis("localhost:6379", "", 0)
for n := 0; n < keyCount; n++ {
redis.Set(strconv.Itoa(n), "url")
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
redis.Get(strconv.Itoa(n % keyCount))
}
}
func BenchmarkLocalRedisGet1(b *testing.B) { benchmarkLocalRedisGet(1, b) }
func BenchmarkLocalRedisGet10(b *testing.B) { benchmarkLocalRedisGet(10, b) }
func BenchmarkLocalRedisGet100(b *testing.B) { benchmarkLocalRedisGet(100, b) }
|
package main
import (
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"os"
)
func getSignature(query string) string {
key := []byte(os.Getenv("SECRET_KEY"))
h := hmac.New(sha256.New, key)
h.Write([]byte(query))
return hex.EncodeToString(h.Sum(nil))
}
|
package intToRoman
import "testing"
func Test_intToRoman(t *testing.T) {
type args struct {
num int
}
tests := []struct {
name string
args args
want string
}{
// TODO: Add test cases.
{
name: "first",
args: args{
num: 1,
},
want: "I",
},
{
name: "second",
args: args{
num: 2,
},
want: "II",
},
{
name: "three",
args: args{
num: 3,
},
want: "III",
},
{
name: "four",
args: args{
num: 4,
},
want: "IV",
},
{
name: "five",
args: args{
num: 5,
},
want: "V",
},
{
name: "six",
args: args{
num: 6,
},
want: "VI",
},
{
name: "seven",
args: args{
num: 9,
},
want: "IX",
},
{
name: "eight",
args: args{
num: 58,
},
want: "LVIII",
},
{
name: "nine",
args: args{
num: 1994,
},
want: "MCMXCIV",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := intToRoman(tt.args.num); got != tt.want {
t.Errorf("intToRoman() = %v, want %v", got, tt.want)
}
})
}
}
|
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2021 The TiChi Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
The original file of the code is at:
https://github.com/kubernetes/test-infra/blob/master/prow/external-plugins/cherrypicker/server.go,
which we modified to add support for copying the labels.
*/
package cherrypicker
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"regexp"
"strings"
"sync"
"time"
"github.com/sirupsen/logrus"
tiexternalplugins "github.com/ti-community-infra/tichi/internal/pkg/externalplugins"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/git/v2"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pluginhelp"
"k8s.io/test-infra/prow/pluginhelp/externalplugins"
"k8s.io/test-infra/prow/plugins"
"k8s.io/utils/exec"
)
const PluginName = "ti-community-cherrypicker"
var (
cherryPickRe = regexp.MustCompile(`(?m)^(?:/cherrypick|/cherry-pick)\s+(.+)$`)
cherryPickBranchFmt = "cherry-pick-%d-to-%s"
cherryPickTipFmt = "This is an automated cherry-pick of #%d"
)
const upstreamRemoteName = "upstream"
type githubClient interface {
AddLabels(org, repo string, number int, labels ...string) error
AssignIssue(org, repo string, number int, logins []string) error
CreateComment(org, repo string, number int, comment string) error
CreateFork(org, repo string) (string, error)
CreatePullRequest(org, repo, title, body, head, base string, canModify bool) (int, error)
CreateIssue(org, repo, title, body string, milestone int, labels, assignees []string) (int, error)
EnsureFork(forkingUser, org, repo string) (string, error)
GetPullRequest(org, repo string, number int) (*github.PullRequest, error)
GetPullRequestPatch(org, repo string, number int) ([]byte, error)
GetPullRequests(org, repo string) ([]github.PullRequest, error)
GetRepo(owner, name string) (github.FullRepo, error)
IsMember(org, user string) (bool, error)
ListIssueComments(org, repo string, number int) ([]github.IssueComment, error)
GetIssueLabels(org, repo string, number int) ([]github.Label, error)
ListOrgMembers(org, role string) ([]github.TeamMember, error)
}
// HelpProvider constructs the PluginHelp for this plugin that takes into account enabled repositories.
// HelpProvider defines the type for function that construct the PluginHelp for plugins.
func HelpProvider(epa *tiexternalplugins.ConfigAgent) externalplugins.ExternalPluginHelpProvider {
return func(enabledRepos []config.OrgRepo) (*pluginhelp.PluginHelp, error) {
configInfo := map[string]string{}
cfg := epa.Config()
for _, repo := range enabledRepos {
opts := cfg.CherrypickerFor(repo.Org, repo.Repo)
var configInfoStrings []string
configInfoStrings = append(configInfoStrings, "The plugin has these configurations:<ul>")
if len(opts.LabelPrefix) != 0 {
configInfoStrings = append(configInfoStrings, "<li>The current label prefix for cherrypicker is: "+
opts.LabelPrefix+"</li>")
}
if len(opts.PickedLabelPrefix) != 0 {
configInfoStrings = append(configInfoStrings, "<li>The current picked label prefix for cherrypicker is: "+
opts.PickedLabelPrefix+"</li>")
}
if opts.AllowAll {
configInfoStrings = append(configInfoStrings, "<li>For this repository, cherry-pick is available to all.</li>")
} else {
configInfoStrings = append(configInfoStrings, "<li>For this repository, "+
"only organization members are allowed to do cherry-pick.</li>")
}
if opts.IssueOnConflict {
configInfoStrings = append(configInfoStrings, "<li>When a cherry-pick PR conflicts, "+
"an issue will be created to track it.</li>")
} else {
configInfoStrings = append(configInfoStrings, "<li>When a cherry-pick PR conflicts, "+
"cherrypicker will create the PR with conflicts.</li>")
}
configInfoStrings = append(configInfoStrings, "</ul>")
configInfo[repo.String()] = strings.Join(configInfoStrings, "\n")
}
yamlSnippet, err := plugins.CommentMap.GenYaml(&tiexternalplugins.Configuration{
TiCommunityCherrypicker: []tiexternalplugins.TiCommunityCherrypicker{
{
Repos: []string{"ti-community-infra/test-dev"},
LabelPrefix: "needs-cherry-pick-",
PickedLabelPrefix: "type/cherry-pick-for-",
AllowAll: true,
ExcludeLabels: []string{"status/can-merge"},
},
},
})
if err != nil {
logrus.WithError(err).Warnf("cannot generate comments for %s plugin", PluginName)
}
pluginHelp := &pluginhelp.PluginHelp{
Description: "The cherrypicker plugin is used for cherry-pick PRs across branches. " +
"For every successful cherry-pick invocation a new PR is opened " +
"against the target branch and assigned to the requestor. ",
Config: configInfo,
Snippet: yamlSnippet,
Events: []string{tiexternalplugins.PullRequestEvent, tiexternalplugins.IssueCommentEvent},
}
pluginHelp.AddCommand(pluginhelp.Command{
Usage: "/cherry-pick [branch]",
Description: "Cherrypick a PR to a different branch. " +
"This command works both in merged PRs (the cherry-pick PR is opened immediately) " +
"and open PRs (the cherry-pick PR opens as soon as the original PR merges).",
Featured: true,
WhoCanUse: "Members of the trusted organization for the repo or anyone(depends on the AllowAll configuration).",
Examples: []string{"/cherrypick release-3.9", "/cherry-pick release-1.15"},
})
return pluginHelp, nil
}
}
// Server implements http.Handler. It validates incoming GitHub webhooks and
// then dispatches them to the appropriate plugins.
type Server struct {
TokenGenerator func() []byte
BotUser *github.UserData
Email string
GitClient git.ClientFactory
// Used for unit testing
Push func(forkName, newBranch string, force bool) error
GitHubClient githubClient
Log *logrus.Entry
ConfigAgent *tiexternalplugins.ConfigAgent
Bare *http.Client
PatchURL string
GitHubURL string
repoLock sync.Mutex
Repos []github.Repo
mapLock sync.Mutex
lockMap map[cherryPickRequest]*sync.Mutex
}
type cherryPickRequest struct {
org string
repo string
pr int
targetBranch string
}
// ServeHTTP validates an incoming webhook and puts it into the event channel.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
eventType, eventGUID, payload, ok, _ := github.ValidateWebhook(w, r, s.TokenGenerator)
if !ok {
return
}
fmt.Fprint(w, "Event received. Have a nice day.")
if err := s.handleEvent(eventType, eventGUID, payload); err != nil {
logrus.WithError(err).Error("Error parsing event.")
}
}
func (s *Server) handleEvent(eventType, eventGUID string, payload []byte) error {
l := logrus.WithFields(logrus.Fields{
"event-type": eventType,
github.EventGUID: eventGUID,
})
switch eventType {
case "issue_comment":
var ic github.IssueCommentEvent
if err := json.Unmarshal(payload, &ic); err != nil {
return err
}
go func() {
if err := s.handleIssueComment(l, ic); err != nil {
s.Log.WithError(err).WithFields(l.Data).Info("Cherry-pick failed.")
}
}()
case "pull_request":
var pr github.PullRequestEvent
if err := json.Unmarshal(payload, &pr); err != nil {
return err
}
go func() {
if err := s.handlePullRequest(l, pr); err != nil {
s.Log.WithError(err).WithFields(l.Data).Info("Cherry-pick failed.")
}
}()
default:
logrus.Debugf("Skipping event of type %q.", eventType)
}
return nil
}
func (s *Server) handleIssueComment(l *logrus.Entry, ic github.IssueCommentEvent) error {
// Only consider new comments in PRs.
if !ic.Issue.IsPullRequest() || ic.Action != github.IssueCommentActionCreated {
return nil
}
org := ic.Repo.Owner.Login
repo := ic.Repo.Name
num := ic.Issue.Number
commentAuthor := ic.Comment.User.Login
opts := s.ConfigAgent.Config().CherrypickerFor(org, repo)
// Do not create a new logger, its fields are re-used by the caller in case of errors.
*l = *l.WithFields(logrus.Fields{
github.OrgLogField: org,
github.RepoLogField: repo,
github.PrLogField: num,
})
cherryPickMatches := cherryPickRe.FindAllStringSubmatch(ic.Comment.Body, -1)
if len(cherryPickMatches) == 0 || len(cherryPickMatches[0]) != 2 {
return nil
}
targetBranchesSet := sets.NewString()
for _, match := range cherryPickMatches {
targetBranch := strings.TrimSpace(match[1])
targetBranchesSet.Insert(targetBranch)
}
if ic.Issue.State != "closed" {
if !opts.AllowAll {
// Only members should be able to do cherry-picks.
ok, err := s.GitHubClient.IsMember(org, commentAuthor)
if err != nil {
return err
}
if !ok {
resp := fmt.Sprintf("only [%s](https://github.com/orgs/%s/people) org members may request cherry-picks. "+
"You can still do the cherry-pick manually.", org, org)
l.Info(resp)
return s.GitHubClient.CreateComment(org, repo, num, tiexternalplugins.FormatICResponse(ic.Comment, resp))
}
}
resp := fmt.Sprintf("once the present PR merges, "+
"I will cherry-pick it on top of %s in the new PR and assign it to you.",
strings.Join(targetBranchesSet.List(), "/"))
l.Info(resp)
return s.GitHubClient.CreateComment(org, repo, num, tiexternalplugins.FormatICResponse(ic.Comment, resp))
}
pr, err := s.GitHubClient.GetPullRequest(org, repo, num)
if err != nil {
return fmt.Errorf("failed to get pull request %s/%s#%d: %w", org, repo, num, err)
}
baseBranch := pr.Base.Ref
// Cherry-pick only merged PRs.
if !pr.Merged {
resp := "cannot cherry-pick an unmerged PR."
l.Info(resp)
return s.GitHubClient.CreateComment(org, repo, num, tiexternalplugins.FormatICResponse(ic.Comment, resp))
}
if !opts.AllowAll {
// Only org members should be able to do cherry-picks.
ok, err := s.GitHubClient.IsMember(org, commentAuthor)
if err != nil {
return err
}
if !ok {
resp := fmt.Sprintf("only [%s](https://github.com/orgs/%s/people) org members may request cherry picks. "+
"You can still do the cherry-pick manually.", org, org)
l.Info(resp)
return s.GitHubClient.CreateComment(org, repo, num, tiexternalplugins.FormatICResponse(ic.Comment, resp))
}
}
for _, targetBranch := range targetBranchesSet.List() {
if baseBranch == targetBranch {
resp := fmt.Sprintf("base branch (%s) needs to differ from target branch (%s).", baseBranch, targetBranch)
l.Info(resp)
if err := s.GitHubClient.CreateComment(org, repo, num,
tiexternalplugins.FormatICResponse(ic.Comment, resp)); err != nil {
l.WithError(err).WithField("response", resp).Error("Failed to create comment.")
}
continue
}
*l = *l.WithFields(logrus.Fields{
"requestor": ic.Comment.User.Login,
"target_branch": targetBranch,
})
l.Debug("Cherrypick request.")
err := s.handle(l, ic.Comment.User.Login, &ic.Comment, org, repo, targetBranch, pr)
if err != nil {
l.WithError(err).Error("Cherrypick failed.")
}
}
return nil
}
func (s *Server) handlePullRequest(log *logrus.Entry, pre github.PullRequestEvent) error {
// Only consider merged PRs.
pr := pre.PullRequest
if !pr.Merged || pr.MergeSHA == nil {
return nil
}
org := pr.Base.Repo.Owner.Login
repo := pr.Base.Repo.Name
baseBranch := pr.Base.Ref
num := pr.Number
opts := s.ConfigAgent.Config().CherrypickerFor(org, repo)
// requestor -> target branch -> issue comment.
requestorToComments := make(map[string]map[string]*github.IssueComment)
// NOTICE: This will set the requestor to the author of the PR.
if requestorToComments[pr.User.Login] == nil {
requestorToComments[pr.User.Login] = make(map[string]*github.IssueComment)
}
switch pre.Action {
// Considering close event.
case github.PullRequestActionClosed:
{
comments, err := s.GitHubClient.ListIssueComments(org, repo, num)
if err != nil {
return fmt.Errorf("failed to list comments: %w", err)
}
// First look for our special comments.
for i := range comments {
c := comments[i]
cherryPickMatches := cherryPickRe.FindAllStringSubmatch(c.Body, -1)
for _, match := range cherryPickMatches {
targetBranch := strings.TrimSpace(match[1])
if requestorToComments[c.User.Login] == nil {
requestorToComments[c.User.Login] = make(map[string]*github.IssueComment)
}
requestorToComments[c.User.Login][targetBranch] = &c
}
}
foundCherryPickComments := len(requestorToComments) != 0
// Now look for our special labels.
labels, err := s.GitHubClient.GetIssueLabels(org, repo, num)
if err != nil {
return fmt.Errorf("failed to get issue labels: %w", err)
}
foundCherryPickLabels := false
for _, label := range labels {
if strings.HasPrefix(label.Name, opts.LabelPrefix) {
// leave this nil which indicates a label-initiated cherry-pick.
requestorToComments[pr.User.Login][label.Name[len(opts.LabelPrefix):]] = nil
foundCherryPickLabels = true
}
}
// No need to cherry pick.
if !foundCherryPickComments && !foundCherryPickLabels {
return nil
}
}
// Considering labeled event(Processes only the label that was added).
case github.PullRequestActionLabeled:
{
if strings.HasPrefix(pre.Label.Name, opts.LabelPrefix) {
// leave this nil which indicates a label-initiated cherry-pick.
requestorToComments[pr.User.Login][pre.Label.Name[len(opts.LabelPrefix):]] = nil
} else {
return nil
}
}
default:
return nil
}
// Figure out membership.
if !opts.AllowAll {
members, err := s.GitHubClient.ListOrgMembers(org, "all")
if err != nil {
return err
}
for requestor := range requestorToComments {
isMember := false
for _, m := range members {
if requestor == m.Login {
isMember = true
break
}
}
if !isMember {
delete(requestorToComments, requestor)
}
}
}
// Do not create a new logger, its fields are re-used by the caller in case of errors.
*log = *log.WithFields(logrus.Fields{
github.OrgLogField: org,
github.RepoLogField: repo,
github.PrLogField: num,
})
// Handle multiple comments serially. Make sure to filter out
// comments targeting the same branch.
handledBranches := make(map[string]bool)
var errs []error
for requestor, branches := range requestorToComments {
for targetBranch, ic := range branches {
if handledBranches[targetBranch] {
// Branch already handled. Skip.
continue
}
if targetBranch == baseBranch {
resp := fmt.Sprintf("base branch (%s) needs to differ from target branch (%s).", baseBranch, targetBranch)
log.Info(resp)
if err := s.createComment(log, org, repo, num, ic, resp); err != nil {
log.WithError(err).WithField("response", resp).Error("Failed to create comment.")
}
continue
}
handledBranches[targetBranch] = true
l := log.WithFields(logrus.Fields{
"requestor": requestor,
"target_branch": targetBranch,
})
l.Debug("Cherrypick request.")
err := s.handle(l, requestor, ic, org, repo, targetBranch, &pr)
if err != nil {
errs = append(errs, fmt.Errorf("failed to create cherrypick: %w", err))
}
}
}
return utilerrors.NewAggregate(errs)
}
//nolint:gocyclo
// TODO: refactoring to reduce complexity.
func (s *Server) handle(logger *logrus.Entry, requestor string,
comment *github.IssueComment, org, repo, targetBranch string, pr *github.PullRequest) error {
num := pr.Number
title := pr.Title
body := pr.Body
var lock *sync.Mutex
func() {
s.mapLock.Lock()
defer s.mapLock.Unlock()
if _, ok := s.lockMap[cherryPickRequest{org, repo, num, targetBranch}]; !ok {
if s.lockMap == nil {
s.lockMap = map[cherryPickRequest]*sync.Mutex{}
}
s.lockMap[cherryPickRequest{org, repo, num, targetBranch}] = &sync.Mutex{}
}
lock = s.lockMap[cherryPickRequest{org, repo, num, targetBranch}]
}()
lock.Lock()
defer lock.Unlock()
opts := s.ConfigAgent.Config().CherrypickerFor(org, repo)
forkName, err := s.ensureForkExists(org, repo)
if err != nil {
logger.WithError(err).Warn("Failed to ensure fork exists.")
resp := fmt.Sprintf("cannot fork %s/%s: %v.", org, repo, err)
return s.createComment(logger, org, repo, num, comment, resp)
}
// Clone the repo, checkout the target branch.
startClone := time.Now()
r, err := s.GitClient.ClientFor(org, repo)
if err != nil {
return fmt.Errorf("failed to get git client for %s/%s: %w", org, forkName, err)
}
defer func() {
if err := r.Clean(); err != nil {
logger.WithError(err).Error("Error cleaning up repo.")
}
}()
if err := r.Checkout(targetBranch); err != nil {
logger.WithError(err).Warn("Failed to checkout target branch.")
resp := fmt.Sprintf("cannot checkout `%s`: %v", targetBranch, err)
return s.createComment(logger, org, repo, num, comment, resp)
}
logger.WithField("duration", time.Since(startClone)).Info("Cloned and checked out target branch.")
// Fetch the patch from GitHub
localPath, err := s.getPatch(org, repo, targetBranch, num)
if err != nil {
return fmt.Errorf("failed to get patch: %w", err)
}
// Setup git name and email.
if err := r.Config("user.name", s.BotUser.Login); err != nil {
return fmt.Errorf("failed to configure git user: %w", err)
}
email := s.Email
if email == "" {
email = s.BotUser.Email
}
if err := r.Config("user.email", email); err != nil {
return fmt.Errorf("failed to configure git Email: %w", err)
}
// New branch for the cherry-pick.
newBranch := fmt.Sprintf(cherryPickBranchFmt, num, targetBranch)
// Check if that branch already exists, which means there is already a PR for that cherry-pick.
if r.BranchExists(newBranch) {
// Find the PR and link to it.
prs, err := s.GitHubClient.GetPullRequests(org, repo)
if err != nil {
return fmt.Errorf("failed to get pullrequests for %s/%s: %w", org, repo, err)
}
for _, pr := range prs {
if pr.Head.Ref == fmt.Sprintf("%s:%s", s.BotUser.Login, newBranch) {
logger.WithField("preexisting_cherrypick", pr.HTMLURL).Info("PR already has cherrypick.")
resp := fmt.Sprintf("looks like #%d has already been cherry picked in %s.", num, pr.HTMLURL)
return s.createComment(logger, org, repo, num, comment, resp)
}
}
}
// Create the branch for the cherry-pick.
if err := r.CheckoutNewBranch(newBranch); err != nil {
return fmt.Errorf("failed to checkout %s: %w", newBranch, err)
}
// Title for GitHub issue/PR.
title = fmt.Sprintf("%s (#%d)", title, num)
// Try git am --3way localPath.
if err := r.Am(localPath); err != nil {
var errs []error
logger.WithError(err).Warnf("Failed to apply #%d on top of target branch %q.", num, targetBranch)
if opts.IssueOnConflict {
resp := fmt.Sprintf("manual cherrypick required.\n\nFailed to apply #%d on top of branch %q:\n```\n%v\n```",
num, targetBranch, err)
if err := s.createIssue(logger, org, repo, title, resp, num, comment, nil, []string{requestor}); err != nil {
errs = append(errs, fmt.Errorf("failed to create issue: %w", err))
} else {
// Return after issue created.
return nil
}
} else {
// Try to fetch upstream.
ex := exec.New()
dir := r.Directory()
// Add the upstream remote.
upstreamURL := fmt.Sprintf("%s/%s", s.GitHubURL, pr.Base.Repo.FullName)
addUpstreamRemote := ex.Command("git", "remote", "add", upstreamRemoteName, upstreamURL)
addUpstreamRemote.SetDir(dir)
out, err := addUpstreamRemote.CombinedOutput()
if err != nil {
logger.WithError(err).Warnf("Failed to git remote add %s and the output look like: %s.", upstreamURL, out)
errs = append(errs, fmt.Errorf("failed to git remote add: %w", err))
}
// Fetch the upstream remote.
fetchUpstreamRemote := ex.Command("git", "fetch", upstreamRemoteName)
fetchUpstreamRemote.SetDir(dir)
out, err = fetchUpstreamRemote.CombinedOutput()
if err != nil {
logger.WithError(err).Warnf("Failed to fetch %s remote and the output look like: %s.", upstreamRemoteName, out)
errs = append(errs, fmt.Errorf("failed to git fetch upstream: %w", err))
}
// Try git cherry-pick.
cherrypick := ex.Command("git", "cherry-pick", "-m", "1", *pr.MergeSHA)
cherrypick.SetDir(dir)
out, err = cherrypick.CombinedOutput()
if err != nil {
logger.WithError(err).Warnf("Failed to cherrypick and the output look like: %s.", out)
// Try git add *.
add := ex.Command("git", "add", "*")
add.SetDir(dir)
out, err = add.CombinedOutput()
if err != nil {
logger.WithError(err).Warnf("Failed to git add conflicting files and the output look like: %s.", out)
errs = append(errs, fmt.Errorf("failed to git add conflicting files: %w", err))
}
// Try commit with sign off.
commit := ex.Command("git", "commit", "-s", "-m", fmt.Sprintf(cherryPickTipFmt, num))
commit.SetDir(dir)
out, err = commit.CombinedOutput()
if err != nil {
logger.WithError(err).Warnf("Failed to git commit and the output look like: %s", out)
errs = append(errs, fmt.Errorf("failed to git commit: %w", err))
}
}
}
if utilerrors.NewAggregate(errs) != nil {
resp := fmt.Sprintf("failed to apply #%d on top of branch %q:\n```\n%v\n```",
num, targetBranch, utilerrors.NewAggregate(errs).Error())
if err := s.createComment(logger, org, repo, num, comment, resp); err != nil {
errs = append(errs, fmt.Errorf("failed to create comment: %w", err))
}
return utilerrors.NewAggregate(errs)
}
}
push := r.PushToNamedFork
if s.Push != nil {
push = s.Push
}
// Push the new branch in the bot's fork.
if err := push(forkName, newBranch, true); err != nil {
logger.WithError(err).Warn("failed to Push chery-picked changes to GitHub")
resp := fmt.Sprintf("failed to Push cherry-picked changes in GitHub: %v", err)
return utilerrors.NewAggregate([]error{err, s.createComment(logger, org, repo, num, comment, resp)})
}
// Open a PR in GitHub.
cherryPickBody := createCherrypickBody(num, body)
head := fmt.Sprintf("%s:%s", s.BotUser.Login, newBranch)
createdNum, err := s.GitHubClient.CreatePullRequest(org, repo, title, cherryPickBody, head, targetBranch, true)
if err != nil {
logger.WithError(err).Warn("failed to create new pull request")
resp := fmt.Sprintf("new pull request could not be created: %v", err)
return utilerrors.NewAggregate([]error{err, s.createComment(logger, org, repo, num, comment, resp)})
}
*logger = *logger.WithField("new_pull_request_number", createdNum)
resp := fmt.Sprintf("new pull request created: #%d.", createdNum)
logger.Info("new pull request created")
if err := s.createComment(logger, org, repo, num, comment, resp); err != nil {
return fmt.Errorf("failed to create comment: %w", err)
}
// Copying original pull request labels.
excludeLabelsSet := sets.NewString(opts.ExcludeLabels...)
labels := sets.NewString()
for _, label := range pr.Labels {
if !excludeLabelsSet.Has(label.Name) && !strings.HasPrefix(label.Name, opts.LabelPrefix) {
labels.Insert(label.Name)
}
}
// Add picked label.
if len(opts.PickedLabelPrefix) > 0 {
pickedLabel := opts.PickedLabelPrefix + targetBranch
labels.Insert(pickedLabel)
}
if err := s.GitHubClient.AddLabels(org, repo, createdNum, labels.List()...); err != nil {
logger.WithError(err).Warnf("Failed to add labels %v", labels.List())
}
// Assign pull request to requestor.
if err := s.GitHubClient.AssignIssue(org, repo, createdNum, []string{requestor}); err != nil {
logger.WithError(err).Warn("failed to assign to new PR")
// Ignore returning errors on failure to assign as this is most likely
// due to users not being members of the org so that they can't be assigned
// in PRs.
return nil
}
return nil
}
func (s *Server) createComment(l *logrus.Entry, org, repo string,
num int, comment *github.IssueComment, resp string) error {
if err := func() error {
if comment != nil {
return s.GitHubClient.CreateComment(org, repo, num, tiexternalplugins.FormatICResponse(*comment, resp))
}
return s.GitHubClient.CreateComment(org, repo, num, fmt.Sprintf("In response to a cherrypick label: %s", resp))
}(); err != nil {
l.WithError(err).Warn("failed to create comment")
return err
}
logrus.Debug("Created comment")
return nil
}
// createIssue creates an issue on GitHub.
func (s *Server) createIssue(l *logrus.Entry, org, repo, title, body string, num int,
comment *github.IssueComment, labels, assignees []string) error {
issueNum, err := s.GitHubClient.CreateIssue(org, repo, title, body, 0, labels, assignees)
if err != nil {
return s.createComment(l, org, repo, num,
comment, fmt.Sprintf("new issue could not be created for failed cherrypick: %v", err))
}
return s.createComment(l, org, repo, num, comment,
fmt.Sprintf("new issue created for failed cherrypick: #%d", issueNum))
}
// ensureForkExists ensures a fork of org/repo exists for the bot.
func (s *Server) ensureForkExists(org, repo string) (string, error) {
fork := s.BotUser.Login + "/" + repo
// fork repo if it doesn't exist.
repo, err := s.GitHubClient.EnsureFork(s.BotUser.Login, org, repo)
if err != nil {
return repo, err
}
s.repoLock.Lock()
defer s.repoLock.Unlock()
s.Repos = append(s.Repos, github.Repo{FullName: fork, Fork: true})
return repo, nil
}
// getPatch gets the patch for the provided PR and creates a local
// copy of it. It returns its location in the filesystem and any
// encountered error.
func (s *Server) getPatch(org, repo, targetBranch string, num int) (string, error) {
patch, err := s.GitHubClient.GetPullRequestPatch(org, repo, num)
if err != nil {
return "", err
}
localPath := fmt.Sprintf("/tmp/%s_%s_%d_%s.patch", org, repo, num, normalize(targetBranch))
out, err := os.Create(localPath)
if err != nil {
return "", err
}
defer out.Close()
if _, err := io.Copy(out, bytes.NewBuffer(patch)); err != nil {
return "", err
}
return localPath, nil
}
func normalize(input string) string {
return strings.ReplaceAll(input, "/", "-")
}
// CreateCherrypickBody creates the body of a cherrypick PR
func createCherrypickBody(num int, note string) string {
cherryPickBody := fmt.Sprintf(cherryPickTipFmt, num)
if len(note) != 0 {
cherryPickBody = fmt.Sprintf("%s\n\n%s", cherryPickBody, note)
}
return cherryPickBody
}
|
package main
import (
"encoding/json"
"text/template"
"github.com/seletskiy/godiff"
"github.com/seletskiy/tplutil"
)
var updatedHeaderTpl = template.Must(
template.New(`updated`).Parse(tplutil.Strip(`
Update at [{{.Date}}]{{"\n"}}
==={{"\n\n"}}
`)))
var rescopedTpl = template.Must(
template.New(`rescoped`).Funcs(tplutil.Last).Parse(tplutil.Strip(`
{{$prefix := .Prefix}}
{{range $i, $_ := .Data}}
{{$prefix}} {{.DisplayId}} | {{.Author.DisplayName}} | {{.AuthorTimestamp}}{{"\n"}}
---{{"\n"}}
{{.Message}}{{"\n"}}
---
{{if not (last $i $.Data)}}
{{"\n\n"}}
{{end}}
{{end}}
`)))
var openedTpl = template.Must(
template.New(`rescoped`).Parse(tplutil.Strip(`
Pull request opened by: {{.User.DisplayName}} <{{.User.EmailAddress}}>
`)))
var approvedTpl = template.Must(
template.New(`approved`).Parse(tplutil.Strip(`
Pull request approved by: {{.User.DisplayName}} <{{.User.EmailAddress}}>
`)))
var mergedTpl = template.Must(
template.New(`merged`).Parse(tplutil.Strip(`
Pull request merged by: {{.User.DisplayName}} <{{.User.EmailAddress}}>
`)))
var declinedTpl = template.Must(
template.New(`declined`).Parse(tplutil.Strip(`
Pull request declined by: {{.User.DisplayName}} <{{.User.EmailAddress}}>
`)))
var reopenedTpl = template.Must(
template.New(`reopened`).Parse(tplutil.Strip(`
Pull request reopened by: {{.User.DisplayName}} <{{.User.EmailAddress}}>
`)))
var commentOnFileTpl = template.Must(
template.New(`filecomment`).Parse(tplutil.Strip(`
{{.Comment.Author.DisplayName}} commented on file {{.CommentAnchor.Path}}:
`)))
type ReviewActivity struct {
godiff.Changeset
}
type reviewAction interface {
json.Unmarshaler
GetDiff() *godiff.Diff
}
type reviewActionBasic struct {
diff *godiff.Diff
Action string
}
type reviewActionCommented struct {
diff *godiff.Diff
}
type reviewActionRescoped struct {
diff *godiff.Diff
}
type rescopedChangeset struct {
AuthorTimestamp UnixTimestamp
Id string
DisplayId string
Author struct {
Id int
Name string
EmailAddress string
DisplayName string
}
Message string
}
func (activity *ReviewActivity) UnmarshalJSON(data []byte) error {
values := []json.RawMessage{}
err := json.Unmarshal(data, &values)
if err != nil {
return err
}
for _, rawActivity := range values {
head := struct{ Action string }{}
err := json.Unmarshal(rawActivity, &head)
if err != nil {
return err
}
var diff *godiff.Diff
var value reviewAction
switch head.Action {
case "COMMENTED":
value = &reviewActionCommented{}
case "RESCOPED":
value = &reviewActionRescoped{}
default:
value = &reviewActionBasic{Action: head.Action}
}
err = json.Unmarshal(rawActivity, value)
if err != nil {
return err
}
diff = value.GetDiff()
if diff != nil {
activity.Changeset.Diffs = append(activity.Changeset.Diffs, diff)
}
}
return nil
}
func (rc *reviewActionCommented) UnmarshalJSON(data []byte) error {
value := struct {
Comment *godiff.Comment
CommentAnchor *godiff.CommentAnchor
Diff *godiff.Diff
}{}
err := json.Unmarshal(data, &value)
if err != nil {
return err
}
// in case of comment to overall review or file, not to line
if value.Diff == nil {
rc.diff = &godiff.Diff{
FileComments: godiff.CommentsTree{value.Comment},
}
// in case of comment to file
if anchor := value.CommentAnchor; anchor != nil {
if anchor.Path != "" && anchor.LineType == "" {
rc.diff.Source.ToString = anchor.SrcPath
rc.diff.Destination.ToString = anchor.Path
rc.diff.Note, _ = tplutil.ExecuteToString(commentOnFileTpl,
value)
}
}
return nil
}
value.Diff.Source = value.Diff.Destination
// in case of line comment or reply
value.Diff.ForEachLine(
func(
diff *godiff.Diff,
_ *godiff.Hunk,
s *godiff.Segment,
l *godiff.Line,
) error {
if value.CommentAnchor.LineType != s.Type {
return nil
}
if s.GetLineNum(l) != value.CommentAnchor.Line {
return nil
}
l.Comments = append(l.Comments, value.Comment)
value.Diff.LineComments = append(value.Diff.LineComments,
value.Comment)
return nil
})
rc.diff = value.Diff
return nil
}
func (rc *reviewActionCommented) GetDiff() *godiff.Diff {
return rc.diff
}
func (rr *reviewActionRescoped) UnmarshalJSON(data []byte) error {
value := struct {
CreatedDate UnixTimestamp
FromHash string
PreviousFromHash string
PreviousToHash string
ToHash string
Added struct {
Commits []rescopedChangeset
}
Removed struct {
Commits []rescopedChangeset
}
}{}
err := json.Unmarshal(data, &value)
if err != nil {
return err
}
components := []struct {
Data []rescopedChangeset
Prefix string
}{
{value.Added.Commits, "+"},
{value.Removed.Commits, "-"},
}
header, err := tplutil.ExecuteToString(updatedHeaderTpl, struct {
Date UnixTimestamp
}{
value.CreatedDate,
})
if err != nil {
return err
}
rr.diff = &godiff.Diff{}
for _, val := range components {
if len(val.Data) > 0 {
result, err := tplutil.ExecuteToString(rescopedTpl, val)
if err != nil {
return err
}
if rr.diff.Note != "" {
rr.diff.Note += "\n\n"
}
rr.diff.Note += result
}
}
rr.diff.Note = header + rr.diff.Note
return err
}
func (rr *reviewActionRescoped) GetDiff() *godiff.Diff {
return rr.diff
}
func (rb *reviewActionBasic) UnmarshalJSON(data []byte) error {
var tpl *template.Template
switch rb.Action {
case "MERGED":
tpl = mergedTpl
case "OPENED":
tpl = openedTpl
case "APPROVED":
tpl = approvedTpl
case "DECLINED":
tpl = declinedTpl
case "REOPENED":
tpl = reopenedTpl
default:
logger.Warning("unknown activity action: '%s'", rb.Action)
return nil
}
value := struct {
CreatedDate int64
User struct {
EmailAddress string
DisplayName string
}
}{}
err := json.Unmarshal(data, &value)
if err != nil {
return err
}
result, err := tplutil.ExecuteToString(tpl, value)
rb.diff = &godiff.Diff{
Note: result,
}
return err
}
func (rb *reviewActionBasic) GetDiff() *godiff.Diff {
return rb.diff
}
|
// Copyright 2023 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package expression
import (
"regexp"
"github.com/pingcap/tidb/util/chunk"
)
// memorized regexp means the constant pattern.
// Sometimes user may input a constant pattern, and it's unnecessary to compile
// the regexp.Regexp each time.
type regexpMemorizedSig struct {
memorizedRegexp *regexp.Regexp
memorizedErr error
}
func (reg *regexpMemorizedSig) isMemorizedRegexpInitialized() bool {
return !(reg.memorizedRegexp == nil && reg.memorizedErr == nil)
}
func (reg *regexpMemorizedSig) memorize(compile func(string) (*regexp.Regexp, error), pattern string) {
re, err := compile(pattern)
reg.memorizedRegexp = re
reg.memorizedErr = err
}
func releaseBuffers(bf *baseBuiltinFunc, params []*funcParam) {
for _, pa := range params {
if pa.getCol() != nil {
bf.bufAllocator.put(pa.getCol())
}
}
}
func getBuffers(params []*funcParam) []*chunk.Column {
buffers := make([]*chunk.Column, 0, 6)
for _, pa := range params {
if pa.getCol() != nil {
buffers = append(buffers, pa.getCol())
}
}
return buffers
}
func isResultNull(columns []*chunk.Column, i int) bool {
for _, col := range columns {
if col.IsNull(i) {
return true
}
}
return false
}
func fillNullStringIntoResult(result *chunk.Column, num int) {
result.ReserveString(num)
for i := 0; i < num; i++ {
result.AppendNull()
}
}
// check if this is a valid position argument when position is out of range
func checkOutRangePos(strLen int, pos int64) bool {
// false condition:
// 1. non-empty string
// 2. empty string and pos != 1
return strLen != 0 || pos != 1
}
|
package leetcode
// 斐波那契数列
func climbStairs(n int) int {
p, q := 0, 1
for i := 0; i < n; i++ {
p, q = q, p+q
}
return q
}
// 递归
func climbStairs2(n int) int {
h := map[int]int{0: 0, 1: 1, 2: 2}
return _climbStairs(n, h)
}
func _climbStairs(n int, h map[int]int) int {
if v, ok := h[n]; ok {
return v
}
h[n] = _climbStairs(n-1, h) + _climbStairs(n-2, h)
return h[n]
}
// 矩阵快速幂
// 陈独秀
func climbStairs3(n int) int {
result := 0
switch n {
case 1:
result = 1
break
case 2:
result = 2
break
case 3:
result = 3
break
case 4:
result = 5
break
case 5:
result = 8
break
case 6:
result = 13
break
case 7:
result = 21
break
case 8:
result = 34
break
case 9:
result = 55
break
case 10:
result = 89
break
case 11:
result = 144
break
case 12:
result = 233
break
case 13:
result = 377
break
case 14:
result = 610
break
case 15:
result = 987
break
case 16:
result = 1597
break
case 17:
result = 2584
break
case 18:
result = 4181
break
case 19:
result = 6765
break
case 20:
result = 10946
break
case 21:
result = 17711
break
case 22:
result = 28657
break
case 23:
result = 46368
break
case 24:
result = 75025
break
case 25:
result = 121393
break
case 26:
result = 196418
break
case 27:
result = 317811
break
case 28:
result = 514229
break
case 29:
result = 832040
break
case 30:
result = 1346269
break
case 31:
result = 2178309
break
case 32:
result = 3524578
break
case 33:
result = 5702887
break
case 34:
result = 9227465
break
case 35:
result = 14930352
break
case 36:
result = 24157817
break
case 37:
result = 39088169
break
case 38:
result = 63245986
break
case 39:
result = 102334155
break
case 40:
result = 165580141
break
case 41:
result = 267914296
break
case 42:
result = 433494437
break
case 43:
result = 701408733
break
case 44:
result = 1134903170
break
case 45:
result = 1836311903
break
}
return result
}
|
package main
import (
"fmt"
"hack-assember/code"
"hack-assember/parser"
"hack-assember/symbol"
"io"
"os"
"strconv"
"strings"
)
func main() {
f, err := os.Open(os.Args[1])
if err != nil {
fmt.Println(err)
return
}
defer f.Close()
p := parser.New(f)
var line int
var lineRom int64
for p.HasMoreCommand() {
err = p.Advance(&line)
if err != nil {
fmt.Println(err)
}
if p.CommandType() == parser.CCommand || p.CommandType() == parser.ACommand {
lineRom++
}
if p.CommandType() == parser.LCommand {
if _, b := symbol.Table[p.Symbol()]; !b {
symbol.Table[p.Symbol()] = lineRom
}
}
}
_, err = f.Seek(0, io.SeekStart)
if err != nil {
fmt.Println(err)
return
}
p = parser.New(f)
line = 0
var newAddress int64 = 16
for p.HasMoreCommand() {
err = p.Advance(&line)
if err != nil {
fmt.Println(err)
return
}
if p.CommandType() == parser.LCommand {
continue
}
if p.CommandType() == parser.ACommand {
v := p.Symbol()
var vInt int64
vInt, err = strconv.ParseInt(v, 10, 64)
if err != nil {
var b bool
vInt, b = symbol.Table[p.Symbol()]
if !b {
vInt = newAddress
symbol.Table[p.Symbol()] = newAddress
newAddress++
}
}
command := strconv.FormatInt(vInt, 2)
leading := 16 - len(command)
command = strings.Repeat("0", leading) + command
fmt.Println(command)
continue
}
if p.CommandType() == parser.CCommand {
d := p.Dest()
c := p.Comp()
j := p.Jump()
command := "111" + code.Comp[c] + code.Dest[d] + code.Jump[j]
fmt.Println(command)
}
}
}
|
package main
import (
"fmt"
"strconv"
"math"
)
func reverse(num int) int {
str := strconv.Itoa(num)
runes := []rune(str)
for i, j := 0, len(runes)-1; i < j; i, j = i+1, j-1 {
runes[i], runes[j] = runes[j], runes[i]
}
i, _ := strconv.Atoi(string(runes))
return i
}
func main() {
start, end, div, beauties := 0, 0, 0, 0
fmt.Scanf("%d", &start)
fmt.Scanf("%d", &end)
fmt.Scanf("%d", &div)
for i := start; i <= end; i++ {
if int(math.Abs(float64(i - reverse(i)))) % div == 0 {
beauties++
}
}
fmt.Println(beauties)
}
|
package app
import (
"github.com/mjibson/goon"
"appengine"
"appengine/datastore"
)
type Child struct {
ID string `goon:"id" datastore:"-" json:"-"`
Parent *datastore.Key `goon:"parent" datastore:"-"`
Text string
}
func (src *Child) Save(c appengine.Context, p *Person) error {
g := goon.FromContext(c)
src.Parent = g.Key(p)
if _, err := g.Put(src); err != nil {
return err
}
return nil
}
|
package drivers
import "testing"
func TestSQLColDefinitions(t *testing.T) {
t.Parallel()
cols := []Column{
{Name: "one", Type: "int64"},
{Name: "two", Type: "string"},
{Name: "three", Type: "string"},
}
defs := SQLColDefinitions(cols, []string{"one"})
if len(defs) != 1 {
t.Error("wrong number of defs:", len(defs))
}
if got := defs[0].String(); got != "one int64" {
t.Error("wrong def:", got)
}
defs = SQLColDefinitions(cols, []string{"one", "three"})
if len(defs) != 2 {
t.Error("wrong number of defs:", len(defs))
}
if got := defs[0].String(); got != "one int64" {
t.Error("wrong def:", got)
}
if got := defs[1].String(); got != "three string" {
t.Error("wrong def:", got)
}
}
func TestTypes(t *testing.T) {
t.Parallel()
defs := SQLColumnDefs{
{Type: "thing1"},
{Type: "thing2"},
}
ret := defs.Types()
if ret[0] != "thing1" {
t.Error("wrong type:", ret[0])
}
if ret[1] != "thing2" {
t.Error("wrong type:", ret[1])
}
}
func TestNames(t *testing.T) {
t.Parallel()
defs := SQLColumnDefs{
{Name: "thing1"},
{Name: "thing2"},
}
ret := defs.Names()
if ret[0] != "thing1" {
t.Error("wrong type:", ret[0])
}
if ret[1] != "thing2" {
t.Error("wrong type:", ret[1])
}
}
|
package leetcode
/*You are given coins of different denominations and a total amount of money amount. Write a function to compute the fewest number of coins that you need to make up that amount. If that amount of money cannot be made up by any combination of the coins, return -1.
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/coin-change
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。*/
func coinChange(coins []int, amount int) int {
dp := make([]int, amount+1, amount+1)
for i := 0; i < amount+1; i++ {
dp[i] = amount + 1
}
//base case
dp[0] = 0
for i := 0; i < amount+1; i++ {
for _, v := range coins {
if i-v < 0 {
continue
}
if dp[i] > dp[i-v]+1 {
dp[i] = dp[i-v] + 1
}
}
}
if dp[amount] == amount+1 {
return -1
} else {
return dp[amount]
}
}
|
package tude
import (
"math"
)
const (
R = 6371000
)
type Shape interface {
Contains(point *Point) bool
}
type Point struct {
lng, lat float64
}
func Radians(x float64) float64 {
return x * math.Pi / 180
}
func Distance(p1, p2 *Point) float64 {
avgLat := Radians(p1.lat+p2.lat) / 2
disLat := R * math.Cos(avgLat) * Radians(p1.lng-p2.lng)
disLon := R * Radians(p1.lat-p2.lat)
return math.Sqrt(disLat*disLat + disLon*disLon)
}
func Angle(p1, p2 *Point) float64 {
numerator := math.Sin(Radians(p2.lng-p1.lng)) * math.Cos(Radians(p2.lat))
denominator := math.Cos(Radians(p1.lat))*math.Sin(Radians(p2.lat)) - math.Sin(Radians(p1.lat))*math.Cos(Radians(p2.lat))*math.Cos(Radians(p2.lng-p1.lng))
angle := math.Atan2(math.Abs(numerator), math.Abs(denominator))
if p2.lng > p1.lng {
if p2.lat < p1.lat {
angle = math.Pi - angle
} else if p2.lat == p1.lat {
angle = math.Pi / 2
}
} else if p2.lng < p1.lng {
if p2.lat > p1.lat {
angle = 2*math.Pi - angle
} else if p2.lat < p1.lat {
angle = math.Pi + angle
} else {
angle = math.Pi * 3 / 2
}
} else {
if p2.lat >= p1.lat {
angle = 0
} else {
angle = math.Pi
}
}
return angle * 180 / math.Pi
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2020 Intel Corporation
package daemon
import (
"io/ioutil"
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
nvmupdateOutput = `<?xml version="1.0" encoding="UTF-8"?>
<DeviceUpdate lang="en">
<Instance vendor="8086" device="1572" subdevice="0" subvendor="8086" bus="7" dev="0" func="1" PBA="H58362-002" port_id="Port 2 of 2" display="Intel(R) Ethernet Converged Network Adapter X710">
<Module type="PXE" version="1.0.2" display="">
</Module>
<Module type="EFI" version="1.0.5" display="">
</Module>
<Module type="NVM" version="8000191B" previous_version="8000143F" display="">
<Status result="Success" id="0">All operations completed successfully.</Status>
</Module>
<VPD>
<VPDField type="String">XL710 40GbE Controller</VPDField>
<VPDField type="Readable" key="PN"></VPDField>
<VPDField type="Readable" key="EC"></VPDField>
<VPDField type="Readable" key="FG"></VPDField>
<VPDField type="Readable" key="LC"></VPDField>
<VPDField type="Readable" key="MN"></VPDField>
<VPDField type="Readable" key="PG"></VPDField>
<VPDField type="Readable" key="SN"></VPDField>
<VPDField type="Readable" key="V0"></VPDField>
<VPDField type="Checksum" key="RV">86</VPDField>
<VPDField type="Writable" key="V1"></VPDField>
</VPD>
<MACAddresses>
<MAC address="6805CA3AA725">
</MAC>
<SAN address="6805CA3AA727">
</SAN>
</MACAddresses>
</Instance>
<NextUpdateAvailable> 1 </NextUpdateAvailable>
<RebootRequired> 0 </RebootRequired>
<PowerCycleRequired> 1 </PowerCycleRequired>
</DeviceUpdate>`
nvmupdateOutputMissingModuleClose = `<?xml version="1.0" encoding="UTF-8"?>
<DeviceUpdate lang="en">
<Instance vendor="8086" device="1572" subdevice="0" subvendor="8086" bus="7" dev="0" func="1" PBA="H58362-002" port_id="Port 2 of 2" display="Intel(R) Ethernet Converged Network Adapter X710">
<Module type="" version="1.0.2" display="">
`
nvmupdateOutputMissingNextUpdateAvailableClose = `<?xml version="1.0" encoding="UTF-8"?>
<DeviceUpdate lang="en">
<Instance vendor="8086" device="1572" subdevice="0" subvendor="8086" bus="7" dev="0" func="1" PBA="H58362-002" port_id="Port 2 of 2" display="Intel(R) Ethernet Converged Network Adapter X710">
<Module type="PXE" version="1.0.2" display="">
</Module>
<Module type="EFI" version="1.0.5" display="">
</Module>
<Module type="NVM" version="8000191B" previous_version="8000143F" display="">
<Status result="Success" id="0">All operations completed successfully.</Status>
</Module>
<VPD>
<VPDField type="String">XL710 40GbE Controller</VPDField>
<VPDField type="Readable" key="PN"></VPDField>
<VPDField type="Readable" key="EC"></VPDField>
<VPDField type="Readable" key="FG"></VPDField>
<VPDField type="Readable" key="LC"></VPDField>
<VPDField type="Readable" key="MN"></VPDField>
<VPDField type="Readable" key="PG"></VPDField>
<VPDField type="Readable" key="SN"></VPDField>
<VPDField type="Readable" key="V0"></VPDField>
<VPDField type="Checksum" key="RV">86</VPDField>
<VPDField type="Writable" key="V1"></VPDField>
</VPD>
<MACAddresses>
<MAC address="6805CA3AA725">
</MAC>
<SAN address="6805CA3AA727">
</SAN>
</MACAddresses>
</Instance>
<NextUpdateAvailable>`
)
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return valid DeviceUpdate ", func() {
tmpfile, err := ioutil.TempFile(".", "update")
Expect(err).ToNot(HaveOccurred())
defer os.Remove(tmpfile.Name())
_, err = tmpfile.Write([]byte(nvmupdateOutput))
Expect(err).ToNot(HaveOccurred())
result, err := getDeviceUpdateFromFile(tmpfile.Name())
Expect(err).ToNot(HaveOccurred())
Expect(len(result.Modules)).To(Equal(3))
Expect(result.Modules[0].Type).To(Equal("PXE"))
Expect(result.Modules[0].Version).To(Equal("1.0.2"))
Expect(result.Modules[0].Status).To(Equal(moduleStatus{}))
Expect(result.Modules[1].Type).To(Equal("EFI"))
Expect(result.Modules[1].Version).To(Equal("1.0.5"))
Expect(result.Modules[1].Status).To(Equal(moduleStatus{}))
Expect(result.Modules[2].Type).To(Equal("NVM"))
Expect(result.Modules[2].Version).To(Equal("8000191B"))
Expect(result.Modules[2].Status).To(Equal(moduleStatus{Result: "Success"}))
Expect(result.NextUpdateAvailable).To(Equal(1))
})
})
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return error if too large file is provided", func() {
tmpfile, err := ioutil.TempFile(".", "update")
Expect(err).ToNot(HaveOccurred())
defer os.Remove(tmpfile.Name())
blob := make([]byte, 50000)
_, err = tmpfile.Write([]byte(blob))
Expect(err).ToNot(HaveOccurred())
_, err = getDeviceUpdateFromFile(tmpfile.Name())
Expect(err).To(HaveOccurred())
})
})
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return error if parsing xml exceeds timeout value", func() {
tmpfile, err := ioutil.TempFile(".", "update")
Expect(err).ToNot(HaveOccurred())
defer os.Remove(tmpfile.Name())
_, err = tmpfile.Write([]byte(nvmupdateOutput))
Expect(err).ToNot(HaveOccurred())
updateXMLParseTimeout = 1 * time.Nanosecond
_, err = getDeviceUpdateFromFile(tmpfile.Name())
updateXMLParseTimeout = 100 * time.Millisecond
Expect(err).To(HaveOccurred())
})
})
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return error if unable to open file", func() {
_, err := getDeviceUpdateFromFile("/dev/null/fake")
Expect(err).To(HaveOccurred())
})
})
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return error if missing Module closing tag", func() {
tmpfile, err := ioutil.TempFile(".", "update")
Expect(err).ToNot(HaveOccurred())
defer os.Remove(tmpfile.Name())
_, err = tmpfile.Write([]byte(nvmupdateOutputMissingModuleClose))
Expect(err).ToNot(HaveOccurred())
_, err = getDeviceUpdateFromFile(tmpfile.Name())
Expect(err).To(HaveOccurred())
})
})
var _ = Describe("getDeviceUpdateFromFile", func() {
var _ = It("will return error if missing NextUpdateAvailable closing tag", func() {
tmpfile, err := ioutil.TempFile(".", "update")
Expect(err).ToNot(HaveOccurred())
defer os.Remove(tmpfile.Name())
_, err = tmpfile.Write([]byte(nvmupdateOutputMissingNextUpdateAvailableClose))
Expect(err).ToNot(HaveOccurred())
_, err = getDeviceUpdateFromFile(tmpfile.Name())
Expect(err).To(HaveOccurred())
})
})
|
package types
// IndicatorsAndStatistics holds indicators and statistics data for Telco companies in Greece.
type IndicatorsAndStatistics struct {
Category string `json:"category" fake:"{randomstring:[General,Landlines,Mobile]}"`
Indicator string `json:"indicator" fake:"{randomstring:[Σταθερές,Κινητές,ΕΕ,Ελλάδα]}"`
Year int `json:"year" fake:"{year}"`
Value float64 `json:"value" fake:"{float64}"`
}
|
package ginja
import (
"encoding/json"
"log"
"net/http"
"reflect"
"strings"
"sync"
"github.com/gin-gonic/gin"
)
type GinApi struct {
*gin.RouterGroup
Api
}
// New returns a new ginja.Api struct
func New(server *gin.Engine, config Config, middleware ...gin.HandlerFunc) *GinApi {
config.ApplyDefaults()
api := &GinApi{
RouterGroup: server.Group(config.buildUrl()),
Api: Api{Config: config},
}
api.init()
api.Use(middleware...)
// TODO extract!
if api.MountStats {
api.GET(api.StatsURL, func(c *gin.Context) {
c.JSON(http.StatusOK, gin.H{"config": api.Config})
})
}
return api
}
func contentTypeSetter(isDebug bool) gin.HandlerFunc {
return func(c *gin.Context) {
if isDebug {
c.Header("Content-type", "application/json")
} else {
c.Header("Content-type", "application/vnd.api+json")
}
c.Next()
}
}
func (a *GinApi) init() *GinApi {
if a.Debug == false {
gin.SetMode(gin.ReleaseMode)
} else {
gin.SetMode(gin.DebugMode)
}
a.Use(contentTypeSetter(a.Debug))
if a.Debug {
config, _ := json.MarshalIndent(a.Config, " ", " ")
log.Println("ginja API Config:")
log.Println(" " + string(config))
}
return a
}
type Resource interface {
GetId() string
Attributes() interface{}
// setType(interface{})
getType() string
}
type ResourceObject struct {
Id string
Object interface{}
}
func (r ResourceObject) GetId() string {
return r.Id
}
func (r ResourceObject) getType() string {
if reflect.TypeOf(r.Object).Kind() == reflect.Ptr {
return strings.ToLower(reflect.TypeOf(r.Object).Elem().Name())
} else {
return strings.ToLower(reflect.TypeOf(r.Object).Name())
}
}
func (r ResourceObject) Attributes() interface{} {
return &r.Object
}
type Fragment struct {
Type string `json:"type"`
Id string `json:"id"`
Attributes interface{} `json:"attributes,omitempty"`
}
type Document struct {
Data interface{}
Meta map[string]interface{}
Errors []Error
isError bool
}
type metaObject struct {
Meta map[string]interface{} `json:"meta,omitempty"`
}
type document struct {
Data interface{} `json:"data"`
metaObject
}
type errorDocument struct {
Errors []Error `json:"errors"`
metaObject
}
type ErrorDocument Document
type collection []interface{}
// func (ic collection) String() string {
// return "a slice of interfaces"
// }
func NewDocument() Document {
return Document{}
}
func NewCollectionDocument() Document {
return Document{Data: []Fragment{}}
}
func NewErrorDocument() Document {
return Document{Errors: []Error{}, isError: true}
}
func (d *Document) AddMeta(meta map[string]interface{}) *Document {
d.Meta = meta
return d
}
func (d *Document) AddData(data Resource) {
d.Data = Fragment{
Type: data.getType(),
Id: data.GetId(),
Attributes: data.Attributes(),
}
}
func (d *Document) AddError(err error) *Document {
d.Errors = append(d.Errors, NewError(err))
return d
}
var documentPool = sync.Pool{
New: func() interface{} {
return &document{}
},
}
var errorDocumentPool = sync.Pool{
New: func() interface{} {
return &errorDocument{}
},
}
func (d Document) MarshalJSON() ([]byte, error) {
if len(d.Errors) > 0 || d.isError {
payload := errorDocumentPool.Get().(*errorDocument)
payload.Errors = d.Errors
payload.Meta = nil
if len(d.Meta) > 0 {
payload.Meta = d.Meta
}
defer errorDocumentPool.Put(payload)
return json.Marshal(&payload)
} else {
payload := documentPool.Get().(*document)
payload.Data = d.Data
payload.Meta = nil
if len(d.Meta) > 0 {
payload.Meta = d.Meta
}
defer documentPool.Put(payload)
return json.Marshal(&payload)
}
}
// func (d Document) UnmarshalJSON(data []byte) error {
// log.Println("testing unmarshalling")
// return nil
// }
|
package nmsapi
import (
"github.com/Centny/gwf/log"
"github.com/Centny/gwf/routing"
"github.com/Centny/gwf/util"
"github.com/Centny/nms/nmsdb"
"html/template"
"path/filepath"
"strings"
)
var WWW = ""
var Alias = util.Map{}
func LoadAlias(fcfg *util.Fcfg) {
for key, _ := range fcfg.Map {
if !strings.HasPrefix(key, "alias/uri_") {
continue
}
var id = strings.TrimPrefix(key, "alias/uri_")
Alias[fcfg.Val2(key, "")] = fcfg.Val2("alias/alias_"+id, "")
}
log.D("load %v alias success, alias->\n%v", len(Alias), util.S2Json(Alias))
}
func IndexHtml(hs *routing.HTTPSession) routing.HResult {
var beg int64
var err = hs.ValidCheckVal(`
beg,O|I,R:0;
`, &beg)
if err != nil {
return hs.Printf("%v", err)
}
var data = util.Map{}
nodes, err := nmsdb.ListNode_m()
if err != nil {
log.E("IndexHtml list node fail with error(%v)", err)
return hs.Printf("%v", err)
}
if len(nodes) > 0 {
avg, err := nmsdb.CountActionAvg(beg)
if err != nil {
log.E("IndexHtml count action avg fail with error(%v)", err)
return hs.Printf("%v", err)
}
sub, err := nmsdb.CountActionSub(beg)
if err != nil {
log.E("IndexHtml count action sub fail with error(%v)", err)
return hs.Printf("%v", err)
}
data = nmsdb.JoinAvgSub(avg, sub)
}
tmpl, err := template.New("index.html").Funcs(index_fm).ParseFiles(filepath.Join(WWW, "index.html"))
if err != nil {
log.E("IndexHtml parse template file(%v) fail with error(%v)", "index.html", err)
return hs.Printf("%v", err)
}
err = tmpl.Execute(hs.W, util.Map{
"alias": Alias,
"nodes": nodes,
"beg": beg,
"data": data,
})
if err == nil {
return routing.HRES_RETURN
} else {
return hs.Printf("%v", err)
}
}
var index_fm = template.FuncMap{
"strval": func(v util.Map, p string) string {
var val = v.StrVal(p)
if len(val) < 1 {
val = "-"
}
return val
},
"strvalp": func(v util.Map, p ...string) string {
var val = v.StrValP(strings.Join(p, "/"))
if len(val) < 1 {
val = "-"
}
return val
},
"stime": func(t int64) string {
return util.Time(t).Format("2006-01-02 15:04:05")
},
"node_alias": func(nodes map[string]*nmsdb.Node, nid string) string {
if node, ok := nodes[nid]; ok {
return node.Alias
} else {
return "-"
}
},
"sjson": util.S2Json,
}
func TaskHtml(hs *routing.HTTPSession) routing.HResult {
var act = &nmsdb.Action{}
var pn, ps = 0, 50
var err = hs.ValidCheckVal(`
nid,O|S,L:0;
uri,R|S,L:0;
sub,O|S,L:0;
code,O|I,R:-999;
used,O|I,R:0;
beg,O|I,R:0;
pn,O|I,R:-1;
ps,O|I,R:0;
`, &act.Nid, &act.Uri, &act.Sub, &act.Code, &act.Used, &act.Time, &pn, &ps)
if err != nil {
return hs.Printf("%v", err)
}
nodes, err := nmsdb.ListNode_m()
if err != nil {
log.E("TaskHtml list node fail with error(%v)", err)
return hs.Printf("%v", err)
}
actions, err := nmsdb.ListAction(act, pn, ps)
if err != nil {
log.E("TaskHtml list action by action(%v) fail with error(%v)", util.S2Json(act), err)
return hs.Printf("%v", err)
}
tmpl, err := template.New("task.html").Funcs(index_fm).ParseFiles(filepath.Join(WWW, "task.html"))
if err != nil {
log.E("TaskHtml parse template file(%v) fail with error(%v)", "task.html", err)
return hs.Printf("%v", err)
}
err = tmpl.Execute(hs.W, util.Map{
"nodes": nodes,
"actions": actions,
"beg": act.Time,
})
if err == nil {
return routing.HRES_RETURN
} else {
return hs.Printf("%v", err)
}
}
|
package translator
//go:generate mockgen -source=$GOFILE -destination=mock/mock_$GOFILE -package=mock
import (
"errors"
"fmt"
"github.com/goropikari/psqlittle/backend"
"github.com/goropikari/psqlittle/core"
)
// RelationalAlgebraNode is interface of RelationalAlgebraNode
type RelationalAlgebraNode interface {
Eval(backend.DB) (backend.Table, error)
}
// TableNode is Node of table
type TableNode struct {
TableName string
}
// Eval evaluates TableNode
func (t *TableNode) Eval(db backend.DB) (backend.Table, error) {
tb, err := db.GetTable(t.TableName)
if err != nil {
return nil, err
}
return tb, err
}
// RenameTableNode is Node for renaming tabel
type RenameTableNode struct {
Alias string
Table RelationalAlgebraNode
}
// Eval evaluates RenameTableNode
func (rt *RenameTableNode) Eval(db backend.DB) (backend.Table, error) {
if rt.Table == nil {
return nil, errors.New("have to include table")
}
tb, err := rt.Table.Eval(db)
if err != nil {
return nil, err
}
newTable := tb.Copy()
newTable.RenameTableName(rt.Alias)
return newTable, nil
}
// ProjectionNode is Node of projection operation
type ProjectionNode struct {
ResTargets []ExpressionNode
TargetColNames core.ColumnNames
RANode RelationalAlgebraNode
}
// Eval evaluates ProjectionNode
func (p *ProjectionNode) Eval(db backend.DB) (backend.Table, error) {
if p.RANode == nil {
return nil, nil
}
tb, err := p.RANode.Eval(db)
if err != nil {
return nil, err
}
if tb == nil {
return p.makeEmptyTable()
}
newTable := tb.Copy()
resFuncs := p.constructResFunc()
if err := validateTargetColumn(newTable.GetColNames(), p.TargetColNames); err != nil {
return nil, err
}
return newTable.Project(p.TargetColNames, resFuncs)
}
func validateTargetColumn(tbCols core.ColumnNames, targets core.ColumnNames) error {
for _, tc := range targets {
if (tc == core.ColumnName{Name: "*"}) {
continue
}
if (tc == core.ColumnName{}) {
// expresison
continue
}
if !haveColumn(tc, tbCols) {
return fmt.Errorf(`column "%v" does not exist`, makeColName(tc))
}
}
return nil
}
func haveColumn(c core.ColumnName, cs core.ColumnNames) bool {
for _, col := range cs {
if c == col {
return true
}
}
return false
}
func makeColName(c core.ColumnName) string {
if c.TableName == "" {
return c.Name
}
return c.TableName + "." + c.Name
}
func (p *ProjectionNode) constructResFunc() []func(row backend.Row) (core.Value, error) {
resFuncs := make([]func(backend.Row) (core.Value, error), 0, len(p.ResTargets))
for _, target := range p.ResTargets {
resFuncs = append(resFuncs, target.Eval())
}
return resFuncs
}
func (p *ProjectionNode) makeEmptyTable() (backend.Table, error) {
resFuncs := p.constructResFunc()
row := &EmptyTableRow{
ColNames: p.TargetColNames,
Values: make(core.Values, 0),
}
for _, fn := range resFuncs {
v, err := fn(row)
if err != nil {
return nil, err
}
row.Values = append(row.Values, v)
}
return &EmptyTable{
ColNames: p.TargetColNames,
Rows: []*EmptyTableRow{row},
}, nil
}
type EmptyTable struct {
ColNames core.ColumnNames
Rows []*EmptyTableRow
}
func (t *EmptyTable) Copy() backend.Table {
return t
}
func (t *EmptyTable) GetName() string {
return ""
}
func (t *EmptyTable) GetColNames() core.ColumnNames {
return t.ColNames
}
func (t *EmptyTable) GetRows() []backend.Row {
rows := make([]backend.Row, 0, len(t.Rows))
for _, row := range t.Rows {
rows = append(rows, row)
}
return rows
}
func (t *EmptyTable) GetCols() core.Cols {
return nil
}
func (t *EmptyTable) RenameTableName(name string) {}
func (t *EmptyTable) InsertValues(cs core.ColumnNames, vs core.ValuesList) error { return nil }
func (t *EmptyTable) Project(cs core.ColumnNames, fns []func(backend.Row) (core.Value, error)) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) Where(fn func(backend.Row) (core.Value, error)) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) CrossJoin(backend.Table) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) OrderBy(ns core.ColumnNames, dirs []int) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) Limit(n int) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) Update(colNames core.ColumnNames, condFn func(backend.Row) (core.Value, error), assignValFns []func(backend.Row) (core.Value, error)) (backend.Table, error) {
return nil, nil
}
func (t *EmptyTable) Delete(func(backend.Row) (core.Value, error)) (backend.Table, error) {
return nil, nil
}
type EmptyTableRow struct {
ColNames core.ColumnNames
Values core.Values
}
func (r *EmptyTableRow) GetValueByColName(core.ColumnName) (core.Value, error) {
return nil, nil
}
func (r *EmptyTableRow) GetValues() core.Values {
return r.Values
}
func (r *EmptyTableRow) GetColNames() core.ColumnNames {
return nil
}
func (r *EmptyTableRow) UpdateValue(name core.ColumnName, val core.Value) {}
// WhereNode is Node of where clause
type WhereNode struct {
Condition ExpressionNode
Table RelationalAlgebraNode
}
// Eval evaluate WhereNode
func (wn *WhereNode) Eval(db backend.DB) (backend.Table, error) {
if wn.Table == nil {
return backend.Table(nil), nil
}
tb, err := wn.Table.Eval(db)
if err != nil {
return nil, err
}
if wn.Condition == nil {
return tb, nil
}
newTable := tb.Copy()
condFunc := wn.Condition.Eval()
return newTable.Where(condFunc)
}
// CrossJoinNode is a node of cross join.
type CrossJoinNode struct {
RANodes []RelationalAlgebraNode
}
// Eval evaluates CrossJoinNode
func (c *CrossJoinNode) Eval(db backend.DB) (backend.Table, error) {
tbs := make([]backend.Table, 0, len(c.RANodes))
for _, ra := range c.RANodes {
tb, err := ra.Eval(db)
if err != nil {
return nil, err
}
tbs = append(tbs, tb)
}
if err := validateTableName(tbs); err != nil {
return nil, err
}
switch len(tbs) {
case 0: // when table not specified. Only select expression case.
return nil, nil
case 1:
return tbs[0], nil
}
var tb backend.Table
var err error
tb = tbs[0]
for k, t := range tbs {
if k == 0 {
continue
}
tb, err = crossJoinTable(tb, t)
if err != nil {
return nil, err
}
}
return tb, nil
}
func validateTableName(tbs []backend.Table) error {
nm := make(map[string]int)
for _, tb := range tbs {
if name := tb.GetName(); name != "" {
if _, ok := nm[name]; ok {
return fmt.Errorf(`ERROR: table name "%v" specified more than once`, name)
}
nm[name]++
}
}
return nil
}
func crossJoinTable(tb1, tb2 backend.Table) (backend.Table, error) {
if tb1 == nil || tb2 == nil {
return nil, nil
}
tb, err := tb1.CrossJoin(tb2)
if err != nil {
return nil, err
}
return tb, nil
}
// OrderByNode is a Node for order by clause
type OrderByNode struct {
SortKeys core.ColumnNames
SortDirs []int
RANode RelationalAlgebraNode
}
// Eval evaluates OrderByNode
func (o *OrderByNode) Eval(db backend.DB) (backend.Table, error) {
tb, err := o.RANode.Eval(db)
if err != nil {
return nil, err
}
tb.OrderBy(o.SortKeys, o.SortDirs)
return tb, nil
}
// LimitNode is a Node for limit clause
type LimitNode struct {
Count int
RANode RelationalAlgebraNode
}
// Eval evaluates LimitNode
func (l *LimitNode) Eval(db backend.DB) (backend.Table, error) {
tb, err := l.RANode.Eval(db)
if err != nil {
return nil, err
}
tb, err = tb.Limit(l.Count)
if err != nil {
return nil, err
}
return tb, nil
}
// DropTableNode is a node of drop statement
type DropTableNode struct {
TableNames []string
}
// Eval evaluates DropTableNode
func (d *DropTableNode) Eval(db backend.DB) (backend.Table, error) {
for _, name := range d.TableNames {
if err := db.DropTable(name); err != nil {
return nil, err
}
}
return nil, nil
}
// CreateTableNode is a node of create statement
type CreateTableNode struct {
TableName string
ColumnDefs core.Cols
}
// Eval evaluates CreateTableNode
func (c *CreateTableNode) Eval(db backend.DB) (backend.Table, error) {
if err := db.CreateTable(c.TableName, c.ColumnDefs); err != nil {
return nil, err
}
return nil, nil
}
// InsertNode is a node of create statement
type InsertNode struct {
TableName string
ColumnNames core.ColumnNames
ValuesList core.ValuesList
}
// Eval evaluates CreateTableNode
func (c *InsertNode) Eval(db backend.DB) (backend.Table, error) {
tb, err := db.GetTable(c.TableName)
if err != nil {
return nil, err
}
tb.InsertValues(c.ColumnNames, c.ValuesList)
return nil, nil
}
// UpdateNode is a node of update statement
type UpdateNode struct {
Condition ExpressionNode
ColNames core.ColumnNames
AssignExpr []ExpressionNode
TableName string
}
// Eval evaluates UpdateNode
func (u *UpdateNode) Eval(db backend.DB) (backend.Table, error) {
var condFunc func(backend.Row) (core.Value, error)
if u.Condition == nil {
condFunc = func(row backend.Row) (core.Value, error) {
return core.True, nil
}
} else {
condFunc = u.Condition.Eval()
}
tb, err := db.GetTable(u.TableName)
if err != nil {
return nil, err
}
assignValFns := make([]func(backend.Row) (core.Value, error), 0)
for _, expr := range u.AssignExpr {
assignValFns = append(assignValFns, expr.Eval())
}
tb.Update(u.ColNames, condFunc, assignValFns)
return nil, nil
}
// DeleteNode is a node of update statement
type DeleteNode struct {
Condition ExpressionNode
TableName string
}
// Eval evaluates DeleteNode
func (d *DeleteNode) Eval(db backend.DB) (backend.Table, error) {
var condFunc func(backend.Row) (core.Value, error)
if d.Condition == nil {
condFunc = func(row backend.Row) (core.Value, error) {
return core.True, nil
}
} else {
condFunc = d.Condition.Eval()
}
tb, err := db.GetTable(d.TableName)
if err != nil {
return nil, err
}
return tb.Delete(condFunc)
}
|
package db
import (
"encoding/json"
"errors"
"fmt"
"log"
pb "gopkg.in/cheggaaa/pb.v1"
"github.com/boltdb/bolt"
)
//NewBucketNotFoundError formatting for missing bucket
func NewBucketNotFoundError(bucketName string) error {
msg := fmt.Sprintf("Bucket not found: %s", bucketName)
return errors.New(msg)
}
//NewPutBucketError formatting for create bucket errors
func NewPutBucketError(bucketName string, key string, value string, err error) error {
msg := fmt.Sprintf("Putting %s in %s|%s failed: %s", value, bucketName, key, err)
return errors.New(msg)
}
//NewCreateBucketError formatting for create bucket errors
func NewCreateBucketError(bucketName string, err error) error {
msg := fmt.Sprintf("Create bucket failed for %s: %s", bucketName, err)
return errors.New(msg)
}
//CreateBuckets Creates buckets if they don't exist
func CreateBuckets(db *bolt.DB, bucketNames []string) {
err := db.Update(func(tx *bolt.Tx) error {
for _, bucketName := range bucketNames {
_, err := tx.CreateBucketIfNotExists([]byte(bucketName))
if err != nil {
return NewCreateBucketError(bucketName, err)
}
}
return nil
})
if err != nil {
log.Fatal(err)
}
}
//UpdateStringList Puts a map[string][]string into the db
func UpdateStringList(db *bolt.DB, bucketName string, data map[string][]string) error {
log.Printf("Importing %s", bucketName)
bar := pb.New(len(data)).SetWidth(80).Start()
bar.ShowSpeed = true
err := db.Update(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucketName))
if bucket == nil {
return NewBucketNotFoundError(bucketName)
}
for key, listValues := range data {
listValuesBytes, err := json.Marshal(listValues)
if err != nil {
return err
}
bucket.Put([]byte(key), listValuesBytes)
bar.Increment()
}
return nil
})
bar.Finish()
if err != nil {
log.Fatal(err)
}
return nil
}
//InList Test membership of list
func InList(list []string, key string) bool {
for _, check := range list {
if check == key {
return true
}
}
return false
}
//GetAllKeys Helper to get all keys in a bucket
func GetAllKeys(db *bolt.DB, bucketName string) []string {
var result []string
err := db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucketName))
if bucket == nil {
return NewBucketNotFoundError(bucketName)
}
bucket.ForEach(func(k, v []byte) error {
result = append(result, string(k))
return nil
})
return nil
})
if err != nil {
log.Fatal(err)
}
return result
}
//GetAll Helper to get all keys and values in a bucket
func GetAll(db *bolt.DB, bucketName string) map[string]string {
result := make(map[string]string)
err := db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucketName))
if bucket == nil {
return NewBucketNotFoundError(bucketName)
}
bucket.ForEach(func(k, v []byte) error {
result[string(k)] = string(v)
return nil
})
return nil
})
if err != nil {
log.Fatal(err)
}
return result
}
//GetAllLists Helper to get all lists in a bucket
func GetAllLists(db *bolt.DB, bucketName string) map[string][]string {
result := make(map[string][]string)
err := db.View(func(tx *bolt.Tx) error {
bucket := tx.Bucket([]byte(bucketName))
if bucket == nil {
return NewBucketNotFoundError(bucketName)
}
bucket.ForEach(func(k, v []byte) error {
var newList []string
err := json.Unmarshal(v, &newList)
result[string(k)] = newList
if err != nil {
log.Fatal(err)
}
return nil
})
return nil
})
if err != nil {
log.Fatal(err)
}
return result
}
//UniqueStrings convert map[string][]string in a set
func UniqueStrings(data map[string][]string) map[string]bool {
result := make(map[string]bool)
for _, elements := range data {
for _, element := range elements {
result[element] = true
}
}
return result
}
|
package git
import (
"git-get/pkg/run"
)
// ConfigGlobal represents a global gitconfig file.
type ConfigGlobal struct{}
// Get reads a value from global gitconfig file. Returns empty string when key is missing.
func (c *ConfigGlobal) Get(key string) string {
out, err := run.Git("config", "--global", key).AndCaptureLine()
// In case of error return an empty string, the missing value will fall back to a default.
if err != nil {
return ""
}
return out
}
|
// Copyright 2018 Kuei-chun Chen. All rights reserved.
package sim
import (
"context"
"os"
"testing"
)
func TestGetSchema(t *testing.T) {
var err error
os.Setenv("DATABASE_URL", "mongodb://user:password@localhost/")
var client = getMongoClient()
defer client.Disconnect(context.Background())
collection := client.Database("keyhole").Collection("favorites")
var str string
if str, err = GetSchema(collection, true); err != nil {
t.Fatal(err)
}
t.Log(str)
}
|
package db
import (
"context"
"fmt"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"time"
)
func NewDatabase(env, user, password, hostname, dbname string) (*mongo.Database, error) {
server := mountServerConnection(env, user, password, hostname, dbname)
clientOptions := options.Client().ApplyURI(server)
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
client, err := mongo.Connect(ctx, clientOptions)
if err != nil {
return nil, err
}
return client.Database(dbname), nil
}
func mountServerConnection(env, user, password, hostname, dbname string) string {
if env == "dev" || env == "" {
return "mongodb://mongo:27017/dev_env"
}
return fmt.Sprintf("mongodb+srv://%s:%s@%s/%s?retryWrites=true&w=majority", user, password, hostname, dbname)
}
|
package accesslog
import (
"bufio"
"bytes"
"errors"
"net"
"net/http"
)
type ResponseProxy interface {
http.ResponseWriter
Status() int
ResponseBytes() []byte
}
type responseRecorder struct {
writer http.ResponseWriter
statusCode int
recordResponse bool
Body *bytes.Buffer
}
func NewResponseRecorder(w http.ResponseWriter, recordResponse bool) ResponseProxy {
return &responseRecorder{
writer: w,
statusCode: http.StatusOK,
recordResponse: recordResponse,
}
}
func (r *responseRecorder) WriteHeader(statusCode int) {
r.statusCode = statusCode
r.writer.WriteHeader(statusCode)
}
func (r *responseRecorder) Header() http.Header {
return r.writer.Header()
}
func (r *responseRecorder) Write(buf []byte) (int, error) {
n, err := r.writer.Write(buf)
if err == nil && r.recordResponse {
if r.Body == nil {
r.Body = bytes.NewBuffer(nil)
}
r.Body.Write(buf)
}
return n, err
}
func (r *responseRecorder) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if hj, ok := r.writer.(http.Hijacker); ok {
return hj.Hijack()
}
return nil, nil, errors.New("error in hijacker")
}
func (r *responseRecorder) Status() int {
return r.statusCode
}
var emptyBytesSlice []byte
func (r *responseRecorder) ResponseBytes() []byte {
if r.Body == nil {
return emptyBytesSlice
}
return r.Body.Bytes()
}
|
package engine
import (
"io"
"mime/multipart"
"os"
)
// the main 'scan' function, will basically be a wrapper around the other functionalities
func (a *analysisService) Scan(FileName string) (*Report, error) {
a.logger.Debug("Skade Entrypoint")
var err error
//start by opening the file
suspiciousFile, err = os.Open(FileName)
if err != nil {
a.logger.Error("Could not open file:")
}
// make sure we close the file again once we are done
// defer suspiciousFile.Close()
// then get all the Bytes from the file
// using susBytes because 'bytes' is libarry name
// susBytes, err = getBytesFromFile()
if err != nil {
a.logger.Error("Could not get raw bytes from file")
}
return nil, nil
}
func (a *analysisService) ScanBytes(susBytes []byte) (*Report, error) {
a.logger.Debug("Starting Scan")
return nil, nil
}
func (a *analysisService) ScanFile(file io.Reader) (*Report, error) {
return nil, nil
}
func (a *analysisService) ScanFileUpload(file multipart.File) (*Report, error) {
return nil, nil
}
|
package generator
import (
"crypto/rand"
"log"
)
const char_spec = "(){}[]<>?!`~*#$^%;:'\"\\/"
const char_alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
const char_number = "0123456789"
type (
PassInfo struct {
CharacterSet string
Length int64
}
)
func getDictionary(characterSet string) string {
switch characterSet {
default:
case "alphanumspec":
return char_alpha + char_number + char_spec
case "alphanum":
return char_alpha + char_number
case "alpha":
return char_alpha
case "number":
return char_number
}
log.Fatal("Character set is not exist")
return ""
}
func RandStr(passInfo PassInfo) string {
dictionary := getDictionary(passInfo.CharacterSet)
var bytes = make([]byte, passInfo.Length)
rand.Read(bytes)
for k, v := range bytes {
bytes[k] = dictionary[v%byte(len(dictionary))]
}
return string(bytes)
}
|
package bazi
import "fmt"
// NewSiZhu 新四柱
func NewSiZhu(pSolarDate *TSolarDate, pBaziDate *TBaziDate) *TSiZhu {
p := &TSiZhu{
pYearZhu: NewZhu(),
pMonthZhu: NewZhu(),
pDayZhu: NewZhu(),
pHourZhu: NewZhu(),
pSolarDate: pSolarDate,
pBaziDate: pBaziDate,
}
p.init()
return p
}
// TSiZhu 四柱
type TSiZhu struct {
pYearZhu *TZhu // 年柱
pMonthZhu *TZhu // 月柱
pDayZhu *TZhu // 日柱
pHourZhu *TZhu // 时柱
pSolarDate *TSolarDate // 新历日期
pBaziDate *TBaziDate // 八字历日期
}
func (self *TSiZhu) init() *TSiZhu {
// 通过八字年来获取年柱
self.pYearZhu.genYearGanZhi(self.pBaziDate.Year())
// 通过年干支和八字月
self.pMonthZhu.genMonthGanZhi(self.pBaziDate.Month(), self.pYearZhu.Gan().Value())
// 通过公历 年月日计算日柱
self.pDayZhu.genDayGanZhi(self.pSolarDate.GetAllDays())
// 通过小时 获取时柱
self.pHourZhu.genHourGanZhi(self.pSolarDate.Hour(), self.pDayZhu.Gan().Value())
return self
}
// genShiShen 计算十神
func (self *TSiZhu) String() string {
return fmt.Sprintf("四柱:%v %v %v %v\n命盘解析:\n%v(%v)[%v]\t%v(%v)[%v]\t%v(%v)[%v]\t%v(%v)[%v]\t\n%v(%v) \t%v(%v) \t%v(%v) \t%v(%v)\n%v %v %v %v\n%v %v %v %v",
self.pYearZhu.GanZhi(),
self.pMonthZhu.GanZhi(),
self.pDayZhu.GanZhi(),
self.pHourZhu.GanZhi(),
// ----------------------------------------------------------------------------------
self.pYearZhu.Gan(), self.pYearZhu.Gan().ToWuXing(), self.pYearZhu.Gan().ToShiShen(self.pDayZhu.Gan().Value()),
self.pMonthZhu.Gan(), self.pMonthZhu.Gan().ToWuXing(), self.pMonthZhu.Gan().ToShiShen(self.pDayZhu.Gan().Value()),
self.pDayZhu.Gan(), self.pDayZhu.Gan().ToWuXing(), "主",
self.pHourZhu.Gan(), self.pHourZhu.Gan().ToWuXing(), self.pHourZhu.Gan().ToShiShen(self.pDayZhu.Gan().Value()),
// ----------------------------------------------------------------------------------
self.pYearZhu.Zhi(), self.pYearZhu.Zhi().ToWuXing(),
self.pMonthZhu.Zhi(), self.pMonthZhu.Zhi().ToWuXing(),
self.pDayZhu.Zhi(), self.pDayZhu.Zhi().ToWuXing(),
self.pHourZhu.Zhi(), self.pHourZhu.Zhi().ToWuXing(),
// ----------------------------------------------------------------------------------
self.pYearZhu.Zhi().ToCangGan(self.pDayZhu.Gan().Value()),
self.pMonthZhu.Zhi().ToCangGan(self.pDayZhu.Gan().Value()),
self.pDayZhu.Zhi().ToCangGan(self.pDayZhu.Gan().Value()),
self.pHourZhu.Zhi().ToCangGan(self.pDayZhu.Gan().Value()),
self.pYearZhu.GanZhi().ToNaYin(),
self.pMonthZhu.GanZhi().ToNaYin(),
self.pDayZhu.GanZhi().ToNaYin(),
self.pHourZhu.GanZhi().ToNaYin(),
)
}
|
package lcd
import (
"github.com/gorilla/mux"
"github.com/irisnet/irishub/client/context"
"github.com/irisnet/irishub/client/utils"
"github.com/irisnet/irishub/codec"
)
// RegisterRoutes - Central function to define routes that get registered by the main application
func RegisterRoutes(cliCtx context.CLIContext, r *mux.Router, cdc *codec.Codec) {
r.HandleFunc("/bank/accounts/{address}",
QueryAccountRequestHandlerFn(cdc, utils.GetAccountDecoder(cdc), cliCtx)).Methods("GET")
r.HandleFunc("/bank/coins/{type}",
QueryCoinTypeRequestHandlerFn(cdc, cliCtx)).Methods("GET")
r.HandleFunc("/bank/token-stats",
QueryTokenStatsRequestHandlerFn(cdc, utils.GetAccountDecoder(cdc), cliCtx)).Methods("GET")
r.HandleFunc("/bank/token-stats/{id}",
QueryTokenStatsRequestHandlerFn(cdc, utils.GetAccountDecoder(cdc), cliCtx)).Methods("GET")
r.HandleFunc("/bank/accounts/{address}/send", SendRequestHandlerFn(cdc, cliCtx)).Methods("POST")
r.HandleFunc("/bank/accounts/{address}/burn", BurnRequestHandlerFn(cdc, cliCtx)).Methods("POST")
r.HandleFunc("/bank/accounts/{address}/set-memo-regexp", SetMemoRegexpRequestHandlerFn(cdc, cliCtx)).Methods("POST")
}
|
package hlf
import (
"fmt"
"time"
)
//Logger log mechanism interface
type Logger interface {
Child(string) Logger
To(string) Logger
Ntf(string, ...interface{})
Inf(string, ...interface{})
Err(string, ...interface{})
Wrn(string, ...interface{})
Dbg(string, ...interface{})
Trc(string, ...interface{})
}
func createLogger(id string, target string, parent Logger) Logger {
if target == "" {
target = _conf.DefaultFile
}
lg := logger{
id: id,
target: target,
}
var islogger bool
lg.parent, islogger = parent.(*logger)
if !islogger {
lg.parent = nil
}
lg.loadConf()
return &lg
}
//MakeLogger init a logger
func MakeLogger(id string) Logger {
return createLogger(id, "", _defaultLogger)
}
type logger struct {
id string
conf loggerConf
parent *logger
target string
}
func (me *logger) loadConf() {
id := me.id
if id == "" {
id = "_"
}
conf, found := _conf.Loggers[id]
if found {
me.conf = conf
} else if me.parent != nil {
me.conf = me.parent.conf
} else {
me.conf = _defaultLogConf
}
}
func (me *logger) Child(id string) Logger {
return createLogger(id, "", me)
}
func (me *logger) To(target string) Logger {
return createLogger(me.id, target, me.parent)
}
func (me *logger) Ntf(format string, a ...interface{}) {
me.print(_LV_NOTIFICATION, format, a...)
}
func (me *logger) Inf(format string, a ...interface{}) {
me.print(_LV_INFO, format, a...)
}
func (me *logger) Err(format string, a ...interface{}) {
me.print(_LV_ERROR, format, a...)
}
func (me *logger) Wrn(format string, a ...interface{}) {
me.print(_LV_WARNING, format, a...)
}
func (me *logger) Dbg(format string, a ...interface{}) {
me.print(_LV_DEBUG, format, a...)
}
func (me *logger) Trc(format string, a ...interface{}) {
me.print(_LV_TRACE, format, a...)
}
func (me *logger) print(lv logLevel, format string, a ...interface{}) {
if me.conf.Lv < lv {
return
}
text := me.formati(nil, lv, format, a...)
if me.conf.ToConsole {
me.send2console(text)
}
if me.conf.ToFile {
text = me.formati(me, lv, format, a...)
me.send2file(me.target, text)
for log := me.parent; log != nil; log = log.parent {
clv := me.findAppliedLv(log)
if clv >= lv {
text := me.formati(log, lv, format, a...)
log.send2file(me.target, text)
}
}
}
}
func (me *logger) findAppliedLv(ancestor *logger) logLevel {
var lv logLevel = _LV_UNKNOWN
found := false
for log := me; log != ancestor && log.parent != nil; log = log.parent {
lv, found = ancestor.conf.ChildLv[log.id]
if found {
break
}
}
if !found {
lv = ancestor.conf.DefaultChildLv
}
return lv
}
func (me *logger) send2console(text string) {
me.send2Srv("console:", text)
}
func (me *logger) send2file(target string, text string) {
me.send2Srv(me.getFileTarget(target), text)
}
func (me *logger) send2Srv(target string, text string) {
li := logItem{
target: target,
text: text,
}
_logSrvCh <- li
}
func (me *logger) toPrefix() string {
if me.id == "" {
return " "
}
return "[" + me.id + "] "
}
func (me *logger) formati(parent *logger, lv logLevel, format string, a ...interface{}) string {
text := ""
text += lv.toPrefix()
text += logTime()
text += me.indent(parent)
text += me.toPrefix()
text += fmt.Sprintf(format, a...)
text += "\n"
return text
}
func (me *logger) getIndent(parent *logger) int {
indent := 0
for log := me; log != parent && log.parent != nil; log = log.parent {
indent += _conf.Indent
}
return indent
}
func (me *logger) indent(parent *logger) string {
indent := me.getIndent(parent)
indents := ""
for i := 0; i < indent; i++ {
indents += " "
}
return indents
}
func (me *logger) getPath() string {
path := ""
for l := me; l != nil; l = l.parent {
if l.id != "" {
path = l.id + "/" + path
}
}
path = _logRoot + path
return path
}
func (me *logger) getFileTarget(f string) string {
return "file:" + me.getPath() + f
}
func logTime() string {
return "[" + time.Now().Format(time.RFC3339) + "]"
}
|
package routers
import (
"github.com/byrnedo/apibase/controllers"
"github.com/byrnedo/oauthsvc/controllers/mq"
"github.com/byrnedo/oauthsvc/osinserver"
"github.com/byrnedo/apibase/natsio/defaultnats"
)
func init() {
controllers.SubscribeNatsRoutes(defaultnats.Conn, "oauth_svc_worker", mq.NewOauthController(defaultnats.Conn, osinserver.Server))
}
|
package delivery
import (
"testing"
"github.com/stretchr/testify/suite"
)
type accountServiceTestSuite struct {
baseTestSuite
}
func TestAccountService(t *testing.T) {
suite.Run(t, new(accountServiceTestSuite))
}
func (s *accountServiceTestSuite) TestetBalance() {
data := []byte(`[
{
"accountAlias": "SgsR",
"asset": "BTC",
"balance": "0.00250000",
"withdrawAvailable": "0.00250000",
"crossWalletBalance": "0.00241969",
"crossUnPnl": "0.00000000",
"availableBalance": "0.00241969",
"updateTime": 1592468353979
}
]`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newSignedRequest()
s.assertRequestEqual(e, r)
})
res, err := s.client.NewGetBalanceService().Do(newContext())
s.r().NoError(err)
s.r().Len(res, 1)
e := &Balance{
AccountAlias: "SgsR",
Asset: "BTC",
Balance: "0.00250000",
WithdrawAvailable: "0.00250000",
CrossWalletBalance: "0.00241969",
CrossUnPnl: "0.00000000",
AvailableBalance: "0.00241969",
UpdateTime: 1592468353979,
}
s.assertBalanceEqual(e, res[0])
}
func (s *accountServiceTestSuite) assertBalanceEqual(e, a *Balance) {
r := s.r()
r.Equal(e.AccountAlias, a.AccountAlias, "AccountAlias")
r.Equal(e.Asset, a.Asset, "Asset")
r.Equal(e.Balance, a.Balance, "Balance")
r.Equal(e.WithdrawAvailable, a.WithdrawAvailable, "WithdrawAvailable")
r.Equal(e.CrossWalletBalance, a.CrossWalletBalance, "CrossWalletBalance")
r.Equal(e.CrossUnPnl, a.CrossUnPnl, "CrossUnPnl")
r.Equal(e.AvailableBalance, a.AvailableBalance, "AvailableBalance")
r.Equal(e.UpdateTime, a.UpdateTime, "UpdateTime")
}
func (s *accountServiceTestSuite) TestetAccount() {
data := []byte(`{
"assets": [
{
"asset": "BTC",
"walletBalance": "0.00241969",
"unrealizedProfit": "0.00000000",
"marginBalance": "0.00241969",
"maintMargin": "0.00000000",
"initialMargin": "0.00000000",
"positionInitialMargin": "0.00000000",
"openOrderInitialMargin": "0.00000000",
"maxWithdrawAmount": "0.00241969",
"crossWalletBalance": "0.00241969",
"crossUnPnl": "0.00000000",
"availableBalance": "0.00241969"
}
],
"positions": [
{
"symbol": "BTCUSD_201225",
"positionAmt": "0",
"initialMargin": "0",
"maintMargin": "0",
"unrealizedProfit": "0.00000000",
"positionInitialMargin": "0",
"openOrderInitialMargin": "0",
"leverage": "125",
"isolated": false,
"positionSide": "BOTH",
"entryPrice": "0.0",
"maxQty": "50"
},
{
"symbol": "BTCUSD_201225",
"positionAmt": "0",
"initialMargin": "0",
"maintMargin": "0",
"unrealizedProfit": "0.00000000",
"positionInitialMargin": "0",
"openOrderInitialMargin": "0",
"leverage": "125",
"isolated": false,
"positionSide": "LONG",
"entryPrice": "0.0",
"maxQty": "50"
},
{
"symbol": "BTCUSD_201225",
"positionAmt": "0",
"initialMargin": "0",
"maintMargin": "0",
"unrealizedProfit": "0.00000000",
"positionInitialMargin": "0",
"openOrderInitialMargin": "0",
"leverage": "125",
"isolated": false,
"positionSide": "SHORT",
"entryPrice": "0.0",
"maxQty": "50"
}
],
"canDeposit": true,
"canTrade": true,
"canWithdraw": true,
"feeTier": 2,
"updateTime": 0
}`)
s.mockDo(data, nil)
defer s.assertDo()
s.assertReq(func(r *request) {
e := newSignedRequest()
s.assertRequestEqual(e, r)
})
res, err := s.client.NewGetAccountService().Do(newContext())
s.r().NoError(err)
e := &Account{
Assets: []*AccountAsset{
{
Asset: "BTC", // asset name
WalletBalance: "0.00241969",
UnrealizedProfit: "0.00000000",
MarginBalance: "0.00241969",
MaintMargin: "0.00000000",
InitialMargin: "0.00000000",
PositionInitialMargin: "0.00000000",
OpenOrderInitialMargin: "0.00000000",
MaxWithdrawAmount: "0.00241969",
CrossWalletBalance: "0.00241969",
CrossUnPnl: "0.00000000",
AvailableBalance: "0.00241969",
},
},
Positions: []*AccountPosition{
{
Symbol: "BTCUSD_201225",
PositionAmt: "0",
InitialMargin: "0",
MaintMargin: "0",
UnrealizedProfit: "0.00000000",
PositionInitialMargin: "0",
OpenOrderInitialMargin: "0",
Leverage: "125",
Isolated: false,
PositionSide: "BOTH",
EntryPrice: "0.0",
MaxQty: "50",
},
{
Symbol: "BTCUSD_201225",
PositionAmt: "0",
InitialMargin: "0",
MaintMargin: "0",
UnrealizedProfit: "0.00000000",
PositionInitialMargin: "0",
OpenOrderInitialMargin: "0",
Leverage: "125",
Isolated: false,
PositionSide: "LONG",
EntryPrice: "0.0",
MaxQty: "50",
},
{
Symbol: "BTCUSD_201225",
PositionAmt: "0",
InitialMargin: "0",
MaintMargin: "0",
UnrealizedProfit: "0.00000000",
PositionInitialMargin: "0",
OpenOrderInitialMargin: "0",
Leverage: "125",
Isolated: false,
PositionSide: "SHORT",
EntryPrice: "0.0",
MaxQty: "50",
},
},
CanDeposit: true,
CanTrade: true,
CanWithdraw: true,
FeeTier: 2,
UpdateTime: 0,
}
s.assertAccountEqual(e, res)
}
func (s *accountServiceTestSuite) assertAccountEqual(e, a *Account) {
r := s.r()
r.Equal(e.CanDeposit, a.CanDeposit, "CanDeposit")
r.Equal(e.CanTrade, a.CanTrade, "CanTrade")
r.Equal(e.CanWithdraw, a.CanWithdraw, "CanWithdraw")
r.Equal(e.FeeTier, a.FeeTier, "FeeTier")
r.Equal(e.UpdateTime, a.UpdateTime, "UpdateTime")
r.Len(a.Assets, len(e.Assets))
for i := 0; i < len(a.Assets); i++ {
r.Equal(e.Assets[i].Asset, a.Assets[i].Asset, "Asset")
r.Equal(e.Assets[i].AvailableBalance, a.Assets[i].AvailableBalance, "AvailableBalance")
r.Equal(e.Assets[i].CrossUnPnl, a.Assets[i].CrossUnPnl, "CrossUnPnl")
r.Equal(e.Assets[i].CrossWalletBalance, a.Assets[i].CrossWalletBalance, "CrossWalletBalance")
r.Equal(e.Assets[i].InitialMargin, a.Assets[i].InitialMargin, "InitialMargin")
r.Equal(e.Assets[i].MaintMargin, a.Assets[i].MaintMargin, "MaintMargin")
r.Equal(e.Assets[i].MarginBalance, a.Assets[i].MarginBalance, "MarginBalance")
r.Equal(e.Assets[i].MaxWithdrawAmount, a.Assets[i].MaxWithdrawAmount, "MaxWithdrawAmount")
r.Equal(e.Assets[i].OpenOrderInitialMargin, a.Assets[i].OpenOrderInitialMargin, "OpenOrderInitialMargin")
r.Equal(e.Assets[i].PositionInitialMargin, e.Assets[i].PositionInitialMargin, "PossitionInitialMargin")
r.Equal(e.Assets[i].UnrealizedProfit, a.Assets[i].UnrealizedProfit, "UnrealizedProfit")
r.Equal(e.Assets[i].WalletBalance, a.Assets[i].WalletBalance, "WalletBalance")
}
r.Len(a.Positions, len(e.Positions))
for i := 0; i < len(a.Positions); i++ {
r.Equal(e.Positions[i].EntryPrice, a.Positions[i].EntryPrice, "EntryPrice")
r.Equal(e.Positions[i].InitialMargin, a.Positions[i].InitialMargin, "InitialMargin")
r.Equal(e.Positions[i].Isolated, a.Positions[i].Isolated, "Isolated")
r.Equal(e.Positions[i].Leverage, a.Positions[i].Leverage, "Leverage")
r.Equal(e.Positions[i].MaintMargin, a.Positions[i].MaintMargin, "MaintMargin")
r.Equal(e.Positions[i].MaxQty, a.Positions[i].MaxQty, "MaxQty")
r.Equal(e.Positions[i].OpenOrderInitialMargin, a.Positions[i].OpenOrderInitialMargin, "OpenOrderInitialMargin")
r.Equal(e.Positions[i].PositionInitialMargin, a.Positions[i].PositionInitialMargin, "PositionInitialMargin")
r.Equal(e.Positions[i].PositionSide, a.Positions[i].PositionSide, "PositionSide")
r.Equal(e.Positions[i].Symbol, a.Positions[i].Symbol, "Symbol")
r.Equal(e.Positions[i].UnrealizedProfit, a.Positions[i].UnrealizedProfit, "UnrealizedProfit")
r.Equal(e.Positions[i].PositionAmt, a.Positions[i].PositionAmt, "PositionAmt")
}
}
|
package main
// 两数之和
func twoSum(nums []int, target int) []int {
numMap := make(map[int]int)
for index, num := range nums {
numMap[num] = index
}
for index, num := range nums {
if otherIndex, ok := numMap[target-num]; ok && otherIndex != index {
return []int{index, otherIndex}
}
}
return nil
}
|
package db
import (
"database/sql"
)
var Db *sql.DB
func init() {
}
|
package main
// Lab 5. Order (Collections, and iterating)
// Requirements:
// As a lonely person, I would like to have a way to classify the store recipes in a cookbook
//
// Objective:
// 01 - Understand Iterating over collection
// 02 - Understand Package References
// 03 - Understand For loops
//
// Steps:
// 01 - Reference the cookbook and friends packages you have made.
// 02 - Create a new dish struct to be cooked
// 03 - Add a cake dish from your recipe book for each of your guests
func main() {
}
|
package pgsql
import (
"testing"
)
func TestInt4RangeArray(t *testing.T) {
testlist2{{
valuer: Int4RangeArrayFromIntArray2Slice,
scanner: Int4RangeArrayToIntArray2Slice,
data: []testdata{
{
input: [][2]int{{-2147483648, 2147483647}, {0, 21}},
output: [][2]int{{-2147483648, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromInt8Array2Slice,
scanner: Int4RangeArrayToInt8Array2Slice,
data: []testdata{
{
input: [][2]int8{{-128, 127}, {0, 21}},
output: [][2]int8{{-128, 127}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromInt16Array2Slice,
scanner: Int4RangeArrayToInt16Array2Slice,
data: []testdata{
{
input: [][2]int16{{-32768, 32767}, {0, 21}},
output: [][2]int16{{-32768, 32767}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromInt32Array2Slice,
scanner: Int4RangeArrayToInt32Array2Slice,
data: []testdata{
{
input: [][2]int32{{-2147483648, 2147483647}, {0, 21}},
output: [][2]int32{{-2147483648, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromInt64Array2Slice,
scanner: Int4RangeArrayToInt64Array2Slice,
data: []testdata{
{
input: [][2]int64{{-2147483648, 2147483647}, {0, 21}},
output: [][2]int64{{-2147483648, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromUintArray2Slice,
scanner: Int4RangeArrayToUintArray2Slice,
data: []testdata{
{
input: [][2]uint{{0, 2147483647}, {0, 21}},
output: [][2]uint{{0, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromUint8Array2Slice,
scanner: Int4RangeArrayToUint8Array2Slice,
data: []testdata{
{
input: [][2]uint8{{0, 255}, {0, 21}},
output: [][2]uint8{{0, 255}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromUint16Array2Slice,
scanner: Int4RangeArrayToUint16Array2Slice,
data: []testdata{
{
input: [][2]uint16{{0, 65535}, {0, 21}},
output: [][2]uint16{{0, 65535}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromUint32Array2Slice,
scanner: Int4RangeArrayToUint32Array2Slice,
data: []testdata{
{
input: [][2]uint32{{0, 2147483647}, {0, 21}},
output: [][2]uint32{{0, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromUint64Array2Slice,
scanner: Int4RangeArrayToUint64Array2Slice,
data: []testdata{
{
input: [][2]uint64{{0, 2147483647}, {0, 21}},
output: [][2]uint64{{0, 2147483647}, {0, 21}}},
},
}, {
valuer: Int4RangeArrayFromFloat32Array2Slice,
scanner: Int4RangeArrayToFloat32Array2Slice,
data: []testdata{
{
input: [][2]float32{{-2147483648.0, 214748364.0}, {0.0, 21.0}},
output: [][2]float32{{-2147483648.0, 214748364.0}, {0.0, 21.0}}},
},
}, {
valuer: Int4RangeArrayFromFloat64Array2Slice,
scanner: Int4RangeArrayToFloat64Array2Slice,
data: []testdata{
{
input: [][2]float64{{-2147483648.0, 2147483647.0}, {0.0, 21.0}},
output: [][2]float64{{-2147483648.0, 2147483647.0}, {0.0, 21.0}}},
},
}, {
data: []testdata{
{
input: string(`{"[-2147483648,2147483647)","[0,21)"}`),
output: string(`{"[-2147483648,2147483647)","[0,21)"}`)},
},
}, {
data: []testdata{
{
input: []byte(`{"[-2147483648,2147483647)","[0,21)"}`),
output: []byte(`{"[-2147483648,2147483647)","[0,21)"}`)},
},
}}.execute(t, "int4rangearr")
}
|
package main
import (
"fmt"
"math"
)
// 410. 分割数组的最大值
// 给定一个非负整数数组和一个整数 m,你需要将这个数组分成 m 个非空的连续子数组。设计一个算法使得这 m 个子数组各自和的最大值最小。
// 注意:
// 数组长度 n 满足以下条件:
// 1 ≤ n ≤ 1000
// 1 ≤ m ≤ min(50, n)
// https://leetcode-cn.com/problems/split-array-largest-sum/
func main() {
fmt.Println(splitArray2([]int{7, 2, 5, 10, 8}, 2)) // 18
}
// 法一:动态规划,O(n^3)
// dp[i][j]表示以将nums的前i个数分为 j 组得到的最大连续子数组和的最小值(j<=i)
// 设前 k 个数分为 j-1 组,最后一组为第 k+1 到第 i 个数 (k+1>=j)
func splitArray(nums []int, m int) (result int) {
n := len(nums)
dp := make([][]int, n+1)
// init
for k := range dp {
dp[k] = make([]int, m+1)
for j := 0; j <= m; j++ {
dp[k][j] = math.MaxInt64
}
}
dp[0][0] = 0
// sums 表示前i个元素的和
sums := make([]int, n+1)
for i := 0; i < n; i++ {
sums[i+1] = sums[i] + nums[i]
}
for i := 1; i <= n; i++ {
for j := 1; j <= m && j <= i; j++ {
for k := 0; k < i; k++ {
dp[i][j] = getMin(dp[i][j], getMax(sums[i]-sums[k], dp[k][j-1]))
}
}
}
return dp[n][m]
}
// 法二:二分查找
// best
// nums子数组的值在 [max(nums), sum(nums)]中间,使用二分查找不断查找接近值
func splitArray2(nums []int, m int) (result int) {
n := len(nums)
left := nums[0]
right := nums[0]
for i := 1; i < n; i++ {
right += nums[i]
if nums[i] > left {
left = nums[i]
}
}
for left < right {
mid := left + ((right - left) >> 1)
// 从nums[0]开始计算一个 subSum
// 每找出一个subSum <= mid 的子数组,统计数量+1
count := 1 // 循环中subSum超过mid才进行统计,因此最后一轮没有在循环中统计,所以赋1
subSum := 0
for i := 0; i < n; i++ {
subSum += nums[i]
if subSum > mid {
count++
subSum = nums[i]
}
}
if count > m { // 分组数量过多,说明选择的subSum过小
left = mid + 1
} else { // 分组数量小于等于m,说明选择的subSum可能过大
right = mid
}
}
return left
}
func getMax(a, b int) int {
if a > b {
return a
}
return b
}
func getMin(a, b int) int {
if a < b {
return a
}
return b
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.