text stringlengths 11 4.05M |
|---|
package main
import (
"fmt"
)
func test(cur interface{}) {
if cur == nil {
fmt.Println("cur is nil")
} else {
fmt.Println("cur is not nil")
}
}
func main() {
var cur interface{}
test(cur)
}
|
package main
import (
"bufio"
"fmt"
"io"
"os"
"strconv"
)
func main() {
solve(os.Stdin, os.Stdout)
}
func solve(stdin io.Reader, stdout io.Writer) {
sc := bufio.NewScanner(stdin)
sc.Scan()
m, _ := strconv.Atoi(sc.Text())
sc.Scan()
p, _ := strconv.ParseFloat(sc.Text(), 64)
sc.Scan()
x, _ := strconv.Atoi(sc.Text())
ans := search(m, x, p)
fmt.Fprintln(stdout, ans)
}
func search(m, x int, p float64) (ans float64) {
const target = 1000000
dp := make([][]float64, m+1)
for i := 0; i < m+1; i++ {
dp[i] = make([]float64, 1<<(m-i)+1)
}
state := x * (1 << m) / target
dp[0][state] = 1.0
for i := 0; i < m; i++ {
for j := range dp[i] {
if j == 0 {
dp[i+1][0] += dp[i][j] * 1.0
continue
}
if j == len(dp[i])-1 {
dp[i+1][len(dp[i+1])-1] += dp[i][j] * 1.0
continue
}
dp[i+1][j/2+1] += dp[i][j] * p
dp[i+1][(j-1)/2] += dp[i][j] * (1 - p)
}
}
return dp[m][1]
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package graphics
import (
"context"
"github.com/google/go-cmp/cmp"
"chromiumos/tast/errors"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/display"
"chromiumos/tast/local/kmsvnc"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: KmsvncConnect,
LacrosStatus: testing.LacrosVariantUnknown,
Desc: "Connects to kmsvnc server and verifies server parameters",
Contacts: []string{"shaochuan@chromium.org", "uekawa@chromium.org"},
Attr: []string{"group:mainline"},
HardwareDeps: hwdep.D(hwdep.InternalDisplay()),
SoftwareDeps: []string{"chrome"},
Fixture: "chromeGraphics",
})
}
// KmsvncConnect launches the kmsvnc server, connects to it, and verifies server parameters.
func KmsvncConnect(ctx context.Context, s *testing.State) {
cr := s.FixtValue().(*chrome.Chrome)
k, err := kmsvnc.NewKmsvnc(ctx, true)
if err != nil {
s.Fatal("Failed to start kmsvnc: ", err)
}
defer k.Stop(ctx)
serverInit, err := k.Connect(ctx)
if err != nil {
s.Fatal("Failed to connect to kmsvnc server: ", err)
}
// Verify server parameters.
gotW, gotH := int(serverInit.FramebufferWidth), int(serverInit.FramebufferHeight)
wantW, wantH, err := findDisplayWidthHeight(ctx, cr)
if err != nil {
s.Error("Failed to find primary display size: ", err)
}
if wantW%4 != 0 {
s.Logf("Screen width %d will be padded to be a multiple of 4", wantW)
wantW += (4 - (wantW % 4))
}
if gotW != wantW || gotH != wantH {
s.Errorf("Unexpected framebuffer size, got %dx%d, want %dx%d", gotW, gotH, wantW, wantH)
}
got := serverInit.PixelFormat
want := []byte{
0x20, // bits-per-pixel
0x20, // depth
0x00, // big-endian-flag
0xff, // true-color-flag
0x00, 0xff, // red-max
0x00, 0xff, // green-max
0x00, 0xff, // blue-max
0x10, // red-shift
0x08, // green-shift
0x00, // blue-shift
}
if !cmp.Equal(got, want) {
s.Errorf("Unexpected pixel format, got %v, want %v", got, want)
}
}
// findDisplayWidthHeight returns the width/height of the primary display, which should match the VNC framebuffer size.
func findDisplayWidthHeight(ctx context.Context, cr *chrome.Chrome) (int, int, error) {
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
return 0, 0, errors.Wrap(err, "failed to connect to test API")
}
info, err := display.GetPrimaryInfo(ctx, tconn)
if err != nil {
return 0, 0, errors.Wrap(err, "failed to get primary display info")
}
dm, err := info.GetSelectedMode()
if err != nil {
return 0, 0, errors.Wrap(err, "failed to get selected display mode")
}
// TODO(b/177965296): handle rotation on tablet devices?
return dm.WidthInNativePixels, dm.HeightInNativePixels, nil
}
|
package main
import (
"io/ioutil"
"net/http"
"net/url"
"strings"
)
// FetchList will visit a plaintext URL and extract the list of links
// Suggested sources are gists or pastebin
func FetchList(source *url.URL) ([]url.URL, error) {
resp, err := http.Get(source.String())
if err != nil {
return []url.URL{}, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return []url.URL{}, err
}
rawFeeds := strings.Split(strings.TrimSpace(string(body)), "\n")
var parsedFeeds []url.URL
for _, v := range rawFeeds {
feedURL, err := url.Parse(v)
if err == nil && feedURL.Scheme != "" {
parsedFeeds = append(parsedFeeds, *feedURL)
}
}
return parsedFeeds, nil
}
|
package main
type ListNode struct {
Val int
Next *ListNode
}
func main() {
// 测试用例
}
func removeNthFromEnd(head *ListNode, n int) *ListNode {
// pre指针
pre := &ListNode{}
pre.Next = head
first, second := pre, pre
// 先走n+1步
for i := 0; i <= n; i++ {
first = first.Next
}
// 一起走
for first != nil {
first = first.Next
second = second.Next
}
// 删
second.Next = second.Next.Next
return pre.Next
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/jessevdk/go-flags"
"reflect"
"sort"
"strconv"
"time"
//"runtime/debug"
//"os"
"strings"
)
//type PrintOutput func(string)
type AnyType interface{}
func If(condition bool, true AnyType, false AnyType) AnyType {
if condition {
return true
}
return false
}
type Collection interface {
Len() int
Get(i int) interface{}
}
type StringArray []string
func (f StringArray) Len() int {
return len(f)
}
func (f StringArray) Get(i int) interface{} {
return f[i]
}
func ToInterface(s Collection) []interface{} {
mx := s.Len()
newSls := make([]interface{}, mx)
for i := 0; i < mx; i++ {
newSls[i] = s.Get(i)
}
return newSls
}
//var colorableOut = colorable.NewColorableStdout()
func Write(a ...interface{}) {
write(fmt.Sprint(a...))
//_, _ = colorableOut.Write([]byte(fmt.Sprint(a...)))
}
func WriteLn(a ...interface{}) {
write(fmt.Sprint(a...) + "\n")
//debug.PrintStack()
//_, _ = colorableOut.Write([]byte(fmt.Sprint(a...) + "\n"))
}
func WriteF(s string, a ...interface{}) {
write(fmt.Sprintf(s, a...))
//_, _ = colorableOut.Write([]byte(fmt.Sprintf(s, a...)))
}
func WriteFLn(s string, a ...interface{}) {
write(fmt.Sprintf(s, a...) + "\n")
//_, _ = colorableOut.Write([]byte(fmt.Sprintf(s, a...)))
}
func parseArg(input string) []string {
var cur []rune = []rune{}
var list []string = []string{}
escape := false
group := false
groupCh := '"'
runes := []rune(input)
for _, char := range runes {
switch char {
case ' ':
if group {
cur = append(cur, ' ')
} else if len(cur) > 0 {
list = append(list, string(cur))
cur = []rune{}
}
escape = false
break
case '\'', '"':
//Debug("p", char)
if escape {
cur = append(cur, char)
escape = false
} else if group {
if groupCh != char {
cur = append(cur, char)
} else {
group = false
list = append(list, string(cur))
cur = []rune{}
}
} else if len(cur) > 0 {
cur = append(cur, char)
} else {
group = true
groupCh = char
}
break
case '\\':
if escape {
cur = append(cur, char)
escape = false
} else {
escape = true
}
break
default:
cur = append(cur, char)
escape = false
break
}
}
if len(cur) > 0 {
list = append(list, string(cur))
}
//WriteLn(input)
//for _, value := range list {
// WriteLn("[" + value + "]")
//}
return list
}
func GetHostOpt(args []string) (error, *RedisHostOption, []string) {
option := RedisHostOption{
Host: "localhost",
Port: 6379,
}
parser := flags.NewParser(&option, flags.PassDoubleDash|flags.IgnoreUnknown)
cmd, e := parser.ParseArgs(args)
return e, &option, cmd
}
func GetCmdOpt(args []string) (*RedisCommandOption, []string) {
commandOption := RedisCommandOption{}
option := struct {
FormatType func(string) `short:"f" long:"format" alias:"as" description:"format type: support: json, normal, raw"`
Repeat func(uint) `short:"r" long:"repeat" description:"repeat time"`
Delay func(float32) `short:"d" long:"delay" description:"delay in sec. (float)"`
//RunAtEachNode bool `short:"e" long:"each-node" description:"run at each node"`
//SplitResultForEachNode bool `short:"s" long:"split-node" description:"split result for each node"`
NoColor bool `long:"no-color" description:"no color output"`
}{
FormatType: func(s string) {
switch strings.ToLower(s) {
case "1", "json":
commandOption.FormatType = FormatJson
break
case "2", "raw", "rawstring":
commandOption.FormatType = FormatRawString
break
}
},
Repeat: func(u uint) {
commandOption.Repeat = u
},
Delay: func(u float32) {
commandOption.Delay = u
},
}
parser := flags.NewParser(&option, flags.PassDoubleDash|flags.IgnoreUnknown)
parser.Name = "command [args[]]"
argsx, _ := parser.ParseArgs(args)
//commandOption.RunAtEachNode = option.RunAtEachNode
//commandOption.SplitResultForEachNode = option.SplitResultForEachNode
//WriteLn(commandOption)
//WriteLn(argsx)
//parser.WriteHelp(os.Stdout)
return &commandOption, argsx
}
var _DEBUG_ = true
func Debug(tag string, a ...interface{}) {
if _DEBUG_ {
var k = append(append([]interface{}{interface{}("\033[32mDEBUG: " + time.Now().Format("2006-01-02 15:04:05") + " [" + tag + "] ")}, a...), "\033[0m")
WriteLn(k...)
}
}
func (result *RedisExecuteResult) Format(f EnumFormatType, nc bool) {
switch f {
case FormatJson:
result.FormatNormal(f)
case FormatNormal:
result.FormatNormal(f)
case FormatRawString:
result.FormatNormal(f)
}
}
func (result *RedisExecuteResult) FormatNormal(ft EnumFormatType) {
//Debug("format", *result.Value)
if result.Value == nil || *result.Value == nil {
WriteLn(If(ft == FormatJson, "null", "(nil)"))
return
}
value := reflect.ValueOf(*result.Value)
formatNormal(value, "", 0, 0, ft)
}
func makePrefix(prefix string, ix int, count int) string {
//if len(prefix) > 0 {
// prefix
//}
if count == 0 {
return prefix
}
sIx := len(strconv.Itoa(ix))
sCount := len(strconv.Itoa(count))
//defer func() {
// if r := recover(); r != nil {
// WriteLn(fmt.Sprintf("%d:%d, %d,%d", ix, sIx, count, sCount))
// }
//}()
if ix == 0 {
//Debug("Prefix", ix, count, "[", prefix, "]")
return prefix + strings.Repeat(" ", sCount-sIx) + strconv.Itoa(ix) + ") "
}
return strings.Repeat(" ", sCount-sIx+len(prefix)) + strconv.Itoa(ix) + ") "
}
func formatNormal(value reflect.Value, prefix string, ix int, count int, ft EnumFormatType) {
kind := value.Kind()
//Debug("BG", dep, kind, value.Interface())
switch kind {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Float32, reflect.Float64,
reflect.Bool:
WriteLn(makePrefix(prefix, ix, count), value)
case reflect.String:
_s := valueToString(ft, value)
WriteLn(makePrefix(prefix, ix, count), _s)
case reflect.Array, reflect.Slice:
_len := value.Len()
//Debug("Array", ix, count, _len, " [", prefix, "]")
px := makePrefix(prefix, ix, count)
if _len == 0 {
WriteLn(px, "(empty)")
break
}
for i := 0; i < _len; i++ {
v2 := value.Index(i)
formatNormal(v2, px, i, _len, ft)
}
case reflect.Map:
iter := Keys(value.MapKeys())
sort.Sort(iter)
for _, v := range iter {
WriteLn(makePrefix(prefix, ix, count) + v.Interface().(string) + " : " + valueToString(ft, value.MapIndex(v)))
}
case reflect.Interface:
formatNormal(value.Elem(), prefix, ix, count, ft)
case reflect.Invalid:
WriteLn(makePrefix(prefix, ix, count), "(nil)")
default:
if value.IsNil() {
WriteLn(makePrefix(prefix, ix, count), "(nil)")
} else {
WriteLn(makePrefix(prefix, ix, count), value)
}
}
}
type Keys []reflect.Value
func (a Keys) Len() int {
return len(a)
}
func (a Keys) Less(i, j int) bool {
iv := a[i].Interface().(string)
jv := a[j].Interface().(string)
return iv < jv
}
func (a Keys) Swap(i, j int) {
a[i], a[j] = a[j], a[i]
}
func valueToString(formatType EnumFormatType, value reflect.Value) string {
var ifv interface{}
if value.Kind() == reflect.Interface {
ifv = value.Interface()
}
Debug("vts", formatType, value.Kind(), value.Type(), reflect.ValueOf(ifv).Kind(), value)
if value.Kind() == reflect.String || reflect.ValueOf(ifv).Kind() == reflect.String {
if formatType == FormatRawString {
return value.Interface().(string)
}
if formatType == FormatJson {
var data interface{}
sss := value.Interface().(string)
e := json.Unmarshal([]byte(sss), &data)
if e != nil {
//Debug("json", sss, " - ", e)
bytes, _ := json.Marshal(sss)
return string(bytes)
}
s, _ := json.MarshalIndent(&data, "", " ")
//Debug("json", data, reflect.TypeOf(data).String(), " - ", string(s))
return string(s)
}
}
//if formatType == FormatNormal {
s, _ := json.MarshalIndent(value.Interface(), "", " ")
return string(s)
//}
//return fmt.Sprint(value)
}
|
package models
type Location struct {
Distance_unit string `json:"distance_unit"`
Key string `json:"key"`
Name string `json:"name"`
Region string `json:"region"`
Region_id string `json:"region_id"`
Country string `json:"country"`
Radius int `json:"radius"`
}
|
package srffwu
import (
"errors"
"fmt"
"math"
"strings"
"time"
)
type state int
const (
stateInit state = iota
stateWaitingForFirstStatus
stateSending
)
// Settings stores parameters for the firmware upgrade process.
type Settings struct {
PortName string
FwFileName string
Verbose bool
}
func printProgress(bts BootloaderStatus) {
p := (float64(bts.fwProcessed) / float64(bts.fwSize)) * 100.0
if math.IsNaN(p) {
p = 0
}
if p >= 100 {
fmt.Printf("\rprogress: 100%% (waiting for result...)\n")
} else {
fmt.Printf("\rprogress: %.2f%%", p)
}
}
func sendHexChunk(settings Settings) {
// Sending the next hex chunk (if needed).
hexChunk := FwDataGetHexChunk()
if hexChunk != "" {
if settings.Verbose {
fmt.Println("out: dta " + hexChunk + "\r")
}
SerialPortWrite("dta " + hexChunk + "\r")
}
}
// Start starts the firmware upgrade process. Returns true if a retry
// (function recall) is needed.
func Start(settings Settings) (bool, error) {
if err := SerialPortOpen(settings.PortName); err != nil {
return false, fmt.Errorf("error opening serial port %s (%v), exiting\n", settings.PortName, err.Error())
}
defer SerialPortClose()
c := make(chan string)
go SerialPortReader(c)
fmt.Println("identifying bootloader...")
var fwuState state
var bts BootloaderStatus
var err error
var deviceIdentifier string
var triesOnTimeout int
if settings.Verbose {
fmt.Println("out: \r\r\r")
}
SerialPortWrite("\r\r\r")
for {
select {
case line := <-c:
if settings.Verbose {
fmt.Println("in: " + line)
}
toks := strings.Split(line, " ")
switch fwuState {
default:
if len(toks) >= 4 && toks[0] == "sercon:" && toks[1] == "inf:" && toks[2] == "SharkRF" {
if toks[3] == "Bootloader" {
fmt.Println("found bootloader: " + strings.Join(toks[2:5], " "))
deviceIdentifier = toks[5]
fmt.Println("device identifier: " + deviceIdentifier)
fwuState = stateWaitingForFirstStatus
} else {
fmt.Println("app is running, rebooting device to bootloader")
if settings.Verbose {
fmt.Println("out: rbb\r")
}
SerialPortWrite("rbb\r")
return true, nil
}
}
case stateWaitingForFirstStatus:
bts, err = BootloaderStatusLineParse(toks)
if err == nil {
if bts.dataproc != "ready" {
BootloaderStatusPrint(bts)
fmt.Println("bootloader is not in ready state, rebooting device")
if settings.Verbose {
fmt.Println("out: rbb\r")
}
SerialPortWrite("rbb\r")
return true, nil
}
fmt.Println("bootloader is ready, starting firmware upgrade")
fwuState = stateSending
sendHexChunk(settings)
}
case stateSending:
bts, err = BootloaderStatusLineParse(toks)
if err == nil {
printProgress(bts)
// Checking results.
if bts.flash != "ok" || bts.configarea != "ok" || bts.dataproc != "working" {
BootloaderStatusPrint(bts)
if bts.flash == "ok" && bts.configarea == "ok" && bts.dataproc == "success" {
fmt.Println("\nfirmware upgraded successfully! starting app.")
if settings.Verbose {
fmt.Println("out: rbt\r")
}
SerialPortWrite("rbt\r")
} else {
fmt.Println("\nfirmware upgrade failed!")
}
return false, nil
}
sendHexChunk(settings)
}
}
case <-time.After(time.Second * 5):
if fwuState == stateSending && triesOnTimeout < 3 {
triesOnTimeout++
if settings.Verbose {
fmt.Println("out: sta\r")
}
SerialPortWrite("sta\r")
} else {
return false, errors.New("timeout")
}
}
}
}
|
package main
import (
"fmt"
"lib/util"
)
func main() {
done := make(chan bool)
fmt.Println("Starting http get...")
go util.CallURL(done)
<- done
fmt.Println("Finished http get...")
}
|
package server
import (
"fmt"
"github.com/yacen/gong/context"
"net/http"
)
type serverHandler struct {
middlewares []Middleware
}
func (h *serverHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
if len(h.middlewares) == 0 {
return
} else {
chain := &RealMiddlewareChain{middlewares: h.middlewares, index: 0}
ctx := context.NewContext(res, req)
chain.Next(ctx)
}
}
type Middleware interface {
Do(ctx *context.Context, chain Chain)
}
type Chain interface {
// call next middleware
Next(ctx *context.Context)
}
type RealMiddlewareChain struct {
middlewares []Middleware
index int
}
func (c *RealMiddlewareChain) Next(ctx *context.Context) {
if c.index >= len(c.middlewares) {
return
}
fmt.Println(c.index)
next := &RealMiddlewareChain{middlewares: c.middlewares, index: c.index + 1}
middleware := c.middlewares[c.index]
middleware.Do(ctx, next)
fmt.Println(ctx)
}
func (c *RealMiddlewareChain) add(middleware Middleware) {
c.middlewares = append(c.middlewares, middleware)
}
type MiddlewareFunc func(ctx *context.Context, chain Chain)
type FunctionMiddleware struct {
Fn MiddlewareFunc
}
func (m *FunctionMiddleware) Do(ctx *context.Context, chain Chain) {
m.Fn(ctx, chain)
}
|
package http
import (
"marketplace/transactions/domain"
"marketplace/transactions/internal/request"
"marketplace/transactions/internal/usecase"
"net/http"
"github.com/gin-gonic/gin"
"github.com/go-pg/pg/v10"
"github.com/sirupsen/logrus"
)
func GetMyTransactionsHandler(db *pg.DB, cmd usecase.GetMyTransactionsCmd) gin.HandlerFunc {
return func (c *gin.Context) {
user := c.MustGet("acc").(domain.Account)
transacs, err := cmd(db, user.Id)
if err != nil {
logrus.WithError(err).Error("An error has occured.")
c.Status(http.StatusInternalServerError)
return
}
c.JSON(http.StatusOK, request.ConvertToResponse(transacs))
}
}
|
package admin
import (
"errors"
"github.com/golang/mock/gomock"
"github.com/williamchang80/sea-apd/domain/user"
"github.com/williamchang80/sea-apd/dto/request/admin"
)
var emptyAdmin = user.User{}
var emptyAdminRequest = admin.Admin{}
// MockUsecase ...
type MockUsecase struct {
ctrl *gomock.Controller
}
// RegisterAdmin ...
func (m MockUsecase) RegisterAdmin(req admin.Admin) error {
if req == emptyAdminRequest {
return errors.New("Cannot register admin")
}
return nil
}
// NewMockUsecase ...
func NewMockUsecase(repo *gomock.Controller) *MockUsecase {
return &MockUsecase{
ctrl: repo,
}
}
|
package page_index
import (
. "Web/main_definitions"
"html/template"
"log"
"net/http"
)
// ------------------------------------------- Types ------------------------------------------- //
//
// IndexWebPage embeds the *WebPage type
// IndexWebPage implements the WebPageInterface via its Init() function
// More details in web_definitions.go
//
type IndexWebPage struct {
*PageData
Formatting string
AristosPicture string
ResumePage string
ContactPage string
ProjectPage string
AboutPage string
}
// ------------------------------------------- Public ------------------------------------------- //
// Initializes page
func (p *IndexWebPage) Init(baseData PageData) WebPageInterface {
p.PageData = NewWebPage(baseData, "index", "home/", p.Handler)
p.AristosPicture = p.UrlStaticFolder + "Aristos_Headshot.jpg"
p.Formatting = p.UrlStaticFolder + "formatting.css"
return p
}
// Expose common data fields
func (p *IndexWebPage) Data() *PageData {
return p.PageData
}
// Implements page's behavior
func (p *IndexWebPage) Handler(w http.ResponseWriter, r *http.Request) {
// If this is the first time, get data from other pages
if p.ResumePage == "" {
p.ResumePage = (*p.PageDict)["resume"].Data().UrlExtension
p.ContactPage = (*p.PageDict)["contact"].Data().UrlExtension
p.ProjectPage = (*p.PageDict)["projects"].Data().UrlExtension
p.AboutPage = (*p.PageDict)["about"].Data().UrlExtension
}
// Create Golang http template from html file
t, err := template.ParseFiles(p.LocalHtmlFile)
if err != nil {
log.Print("template parsing error: ", err)
}
// Pass in the page's data and execute the template
err = t.Execute(w, *p)
if err != nil {
log.Print("template executing error: ", err)
}
// buttonClick := r.FormValue("...")
// fmt.Println(buttonClick)
}
|
package exchange
import (
"testing"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/exchange/entities"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/stretchr/testify/assert"
)
func TestSeatNonBidsAdd(t *testing.T) {
type fields struct {
seatNonBidsMap map[string][]openrtb_ext.NonBid
}
type args struct {
bid *entities.PbsOrtbBid
nonBidReason int
seat string
}
tests := []struct {
name string
fields fields
args args
want map[string][]openrtb_ext.NonBid
}{
{
name: "nil-seatNonBidsMap",
fields: fields{seatNonBidsMap: nil},
args: args{},
want: nil,
},
{
name: "nil-seatNonBidsMap-with-bid-object",
fields: fields{seatNonBidsMap: nil},
args: args{bid: &entities.PbsOrtbBid{Bid: &openrtb2.Bid{}}, seat: "bidder1"},
want: sampleSeatNonBidMap("bidder1", 1),
},
{
name: "multiple-nonbids-for-same-seat",
fields: fields{seatNonBidsMap: sampleSeatNonBidMap("bidder2", 1)},
args: args{bid: &entities.PbsOrtbBid{Bid: &openrtb2.Bid{}}, seat: "bidder2"},
want: sampleSeatNonBidMap("bidder2", 2),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
snb := &nonBids{
seatNonBidsMap: tt.fields.seatNonBidsMap,
}
snb.addBid(tt.args.bid, tt.args.nonBidReason, tt.args.seat)
assert.Equalf(t, tt.want, snb.seatNonBidsMap, "expected seatNonBidsMap not nil")
})
}
}
func TestSeatNonBidsGet(t *testing.T) {
type fields struct {
snb *nonBids
}
tests := []struct {
name string
fields fields
want []openrtb_ext.SeatNonBid
}{
{
name: "get-seat-nonbids",
fields: fields{&nonBids{sampleSeatNonBidMap("bidder1", 2)}},
want: sampleSeatBids("bidder1", 2),
},
{
name: "nil-seat-nonbids",
fields: fields{nil},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.fields.snb.get(); !assert.Equal(t, tt.want, got) {
t.Errorf("seatNonBids.get() = %v, want %v", got, tt.want)
}
})
}
}
var sampleSeatNonBidMap = func(seat string, nonBidCount int) map[string][]openrtb_ext.NonBid {
nonBids := make([]openrtb_ext.NonBid, 0)
for i := 0; i < nonBidCount; i++ {
nonBids = append(nonBids, openrtb_ext.NonBid{
Ext: openrtb_ext.NonBidExt{Prebid: openrtb_ext.ExtResponseNonBidPrebid{Bid: openrtb_ext.NonBidObject{}}},
})
}
return map[string][]openrtb_ext.NonBid{
seat: nonBids,
}
}
var sampleSeatBids = func(seat string, nonBidCount int) []openrtb_ext.SeatNonBid {
seatNonBids := make([]openrtb_ext.SeatNonBid, 0)
seatNonBid := openrtb_ext.SeatNonBid{
Seat: seat,
NonBid: make([]openrtb_ext.NonBid, 0),
}
for i := 0; i < nonBidCount; i++ {
seatNonBid.NonBid = append(seatNonBid.NonBid, openrtb_ext.NonBid{
Ext: openrtb_ext.NonBidExt{Prebid: openrtb_ext.ExtResponseNonBidPrebid{Bid: openrtb_ext.NonBidObject{}}},
})
}
seatNonBids = append(seatNonBids, seatNonBid)
return seatNonBids
}
|
package main
import (
"bytes"
"fmt"
"log"
"os"
"text/template"
"time"
"github.com/codegangsta/cli"
"github.com/nsf/termbox-go"
"github.com/yanfali/go-tvdb"
)
var (
RowTemplate = `{{.SeriesName | printf "%-40s"}} {{.Language | printf "%-10s"}} {{.FirstAired | printf "%-15s"}} {{.Genre}}`
RowCompiled = template.Must(template.New("row").Parse(RowTemplate))
config tvdb.TvdbConfig
)
func ellipsisString(source string, length int) string {
if len(source) > length {
return fmt.Sprintf("%."+string(length-4)+"s...", source)
}
return source
}
func printSeries(series *tvdb.Series) string {
w := bytes.NewBuffer([]byte{})
series.SeriesName = ellipsisString(series.SeriesName, 40)
err := RowCompiled.Execute(w, series)
if err != nil {
panic(err)
}
return w.String()
}
func init() {
cli.AppHelpTemplate = `NAME:
{{.Name}} - {{.Usage}}
USAGE:
{{.Name}} {{if .Flags}}[global options] {{end}} "search title"
VERSION:
{{.Version}}{{if or .Author .Email}}
AUTHOR:{{if .Author}}
{{.Author}}{{if .Email}} - <{{.Email}}>{{end}}{{else}}
{{.Email}}{{end}}{{end}}
COMMANDS:
{{range .Commands}}{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}
{{end}}{{if .Flags}}
GLOBAL OPTIONS:
{{range .Flags}}{{.}}
{{end}}{{end}}
`
}
func main() {
app := cli.NewApp()
app.Name = "go-tvdb-cli"
app.Usage = "make CLI queries against the thetvdb.com"
app.Flags = []cli.Flag{
cli.IntFlag{
Name: "max-results, m",
Value: 10,
Usage: "Maximum Number of Results to Show",
},
cli.StringFlag{
Name: "apikey, k",
Value: "90CCCAB7A2B7509E",
Usage: "thetvdb.com API key",
},
cli.StringFlag{
Name: "language, l",
Value: "en",
Usage: "Default Language to search using",
},
}
app.Action = func(c *cli.Context) {
if len(c.Args()) == 0 {
fmt.Printf("Error: Not enough parameters\n")
cli.ShowAppHelp(c)
return
}
config = tvdb.NewDefaultTvdbConfig()
if apiKey := c.String("apikey"); apiKey != "" {
log.Printf("Using APIKEY %q\n", apiKey)
config.ApiKey = apiKey
}
if lang := c.String("language"); lang != "en" {
log.Printf("Using Language %q", lang)
config.Language = lang
}
myTvdb := tvdb.NewTvdbWithConfig(config)
results, err := myTvdb.SearchSeries(c.Args()[0], c.Int("max-results"))
if err != nil {
fmt.Errorf("error", err)
}
var tx = &termboxState{results: &results}
tx.consoleFn = func(*termboxState) string {
return fmt.Sprintf("Displaying %d results out of %d", len(results.Series), c.Int("max-results"))
}
if len(results.Series) == 0 {
log.Printf("No results found for %q", c.Args()[0])
return
}
if err := termbox.Init(); err != nil {
panic(err)
}
defer termbox.Close()
updateScreen(tx, drawSeries)
currentState := SeriesEventHandler
ch := getPollEventChan()
loop:
for {
select {
case tx.ev = <-ch:
if currentState = currentState(tx); currentState == nil {
break loop
}
case <-time.After(time.Second / 2):
cursorBlink(tx)
termbox.Flush()
}
}
}
app.Run(os.Args)
}
func getPollEventChan() chan termbox.Event {
var ch = make(chan termbox.Event)
go func() {
for {
ch <- termbox.PollEvent()
}
}()
return ch
}
|
package animatedArr
import (
"time"
)
func (a *AnimArr) InsertionSort() {
for i := 1; !a.Sorted && i < len(a.Data); i++ {
a.PivotInd = i
for j := i; j > 0 && a.Data[j-1] > a.Data[j]; j-- {
a.Comparisons++
a.Active = j
a.Active2 = j-1
a.ArrayAccesses += 2 // In for loop
a.swapElements(j, j-1)
time.Sleep(INST_SLEEP)
a.totalSleepTime += INST_SLEEP.Seconds()
}
}
}
|
package base
import (
"log"
"net"
"time"
)
type Server struct {
netSrv *NetServer
Proc *Processor
exit chan bool
Manager *SessionManager
verifyFunc func(pkt *Packet) bool //检查连接是否合法
}
// 实现ConnectHandler接口
func (self *Server) SetNetServer(ns *NetServer) {
self.netSrv = ns
}
// 实现ConnectHandler接口
func (self *Server) NewConnect(conn net.Conn) {
go self.Verify(conn)
}
// 验证连接合法性
// 第一个包作为验证包
// 验证失败直接断开连接
func (self *Server) Verify(conn net.Conn) {
stream := NewPacketStream(conn)
for {
select {
case pkt, ok := <-stream.ReadChan():
if !ok {
log.Printf("Session verify failed, can't get packet")
return
}
if self.verifyFunc(pkt) {
log.Printf("Session verify ok")
session := newSession(stream, self)
session.SetID(int64(pkt.MsgID))
self.Manager.AddSession(session)
go session.Run()
} else {
stream.Close()
log.Printf("Session verify failed, invalid token")
}
return
case <-time.After(time.Second * 10):
stream.Close()
log.Printf("Session verify failed, timeout")
return
}
}
}
func (self *Server) Stop(arg interface{}) {
close(self.exit)
}
// 账号检查
func CheckAccount(pkt *Packet) bool {
token := string(pkt.Data)
return token == "hehe"
}
func NewServer(port int, pro *Processor) error {
server := &Server{
Proc: pro,
exit: make(chan bool),
Manager: NewSessionManager(1000),
verifyFunc: CheckAccount,
}
ExitApplication(server.Stop, nil)
ns := &NetServer{}
err := ns.ListenAndServe(port, server)
return err
}
|
package requests
import (
"encoding/json"
"fmt"
"net/url"
"strings"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/string_utils"
)
// UpdateSingleRubric Returns the rubric with the given id.
//
// Unfortuantely this endpoint does not return a standard Rubric object,
// instead it returns a hash that looks like
// { 'rubric': Rubric, 'rubric_association': RubricAssociation }
//
// This may eventually be deprecated in favor of a more standardized return
// value, but that is not currently planned.
// https://canvas.instructure.com/doc/api/rubrics.html
//
// Path Parameters:
// # Path.CourseID (Required) ID
// # Path.ID (Required) The id of the rubric
//
// Form Parameters:
// # Form.RubricAssociationID (Optional) The id of the object with which this rubric is associated
// # Form.Rubric.Title (Optional) The title of the rubric
// # Form.Rubric.FreeFormCriterionComments (Optional) Whether or not you can write custom comments in the ratings field for a rubric
// # Form.Rubric.SkipUpdatingPointsPossible (Optional) Whether or not to update the points possible
// # Form.RubricAssociation.AssociationID (Optional) The id of the object with which this rubric is associated
// # Form.RubricAssociation.AssociationType (Optional) . Must be one of Assignment, Course, AccountThe type of object this rubric is associated with
// # Form.RubricAssociation.UseForGrading (Optional) Whether or not the associated rubric is used for grade calculation
// # Form.RubricAssociation.HideScoreTotal (Optional) Whether or not the score total is displayed within the rubric.
// This option is only available if the rubric is not used for grading.
// # Form.RubricAssociation.Purpose (Optional) . Must be one of grading, bookmarkWhether or not the association is for grading (and thus linked to an assignment)
// or if it's to indicate the rubric should appear in its context
// # Form.Rubric.Criteria (Optional) An indexed Hash of RubricCriteria objects where the keys are integer ids and the values are the RubricCriteria objects
//
type UpdateSingleRubric struct {
Path struct {
CourseID string `json:"course_id" url:"course_id,omitempty"` // (Required)
ID int64 `json:"id" url:"id,omitempty"` // (Required)
} `json:"path"`
Form struct {
RubricAssociationID int64 `json:"rubric_association_id" url:"rubric_association_id,omitempty"` // (Optional)
Rubric struct {
Title string `json:"title" url:"title,omitempty"` // (Optional)
FreeFormCriterionComments bool `json:"free_form_criterion_comments" url:"free_form_criterion_comments,omitempty"` // (Optional)
SkipUpdatingPointsPossible bool `json:"skip_updating_points_possible" url:"skip_updating_points_possible,omitempty"` // (Optional)
Criteria map[string](interface{}) `json:"criteria" url:"criteria,omitempty"` // (Optional)
} `json:"rubric" url:"rubric,omitempty"`
RubricAssociation struct {
AssociationID int64 `json:"association_id" url:"association_id,omitempty"` // (Optional)
AssociationType string `json:"association_type" url:"association_type,omitempty"` // (Optional) . Must be one of Assignment, Course, Account
UseForGrading bool `json:"use_for_grading" url:"use_for_grading,omitempty"` // (Optional)
HideScoreTotal bool `json:"hide_score_total" url:"hide_score_total,omitempty"` // (Optional)
Purpose string `json:"purpose" url:"purpose,omitempty"` // (Optional) . Must be one of grading, bookmark
} `json:"rubric_association" url:"rubric_association,omitempty"`
} `json:"form"`
}
func (t *UpdateSingleRubric) GetMethod() string {
return "PUT"
}
func (t *UpdateSingleRubric) GetURLPath() string {
path := "courses/{course_id}/rubrics/{id}"
path = strings.ReplaceAll(path, "{course_id}", fmt.Sprintf("%v", t.Path.CourseID))
path = strings.ReplaceAll(path, "{id}", fmt.Sprintf("%v", t.Path.ID))
return path
}
func (t *UpdateSingleRubric) GetQuery() (string, error) {
return "", nil
}
func (t *UpdateSingleRubric) GetBody() (url.Values, error) {
return query.Values(t.Form)
}
func (t *UpdateSingleRubric) GetJSON() ([]byte, error) {
j, err := json.Marshal(t.Form)
if err != nil {
return nil, nil
}
return j, nil
}
func (t *UpdateSingleRubric) HasErrors() error {
errs := []string{}
if t.Path.CourseID == "" {
errs = append(errs, "'Path.CourseID' is required")
}
if t.Form.RubricAssociation.AssociationType != "" && !string_utils.Include([]string{"Assignment", "Course", "Account"}, t.Form.RubricAssociation.AssociationType) {
errs = append(errs, "RubricAssociation must be one of Assignment, Course, Account")
}
if t.Form.RubricAssociation.Purpose != "" && !string_utils.Include([]string{"grading", "bookmark"}, t.Form.RubricAssociation.Purpose) {
errs = append(errs, "RubricAssociation must be one of grading, bookmark")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *UpdateSingleRubric) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mongo
import (
"context"
"github.com/apache/servicecomb-kie/server/datasource/mongo/rbac"
rbacdao "github.com/apache/servicecomb-kie/server/datasource/rbac"
dmongo "github.com/go-chassis/cari/db/mongo"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/bsonx"
"github.com/apache/servicecomb-kie/server/datasource"
"github.com/apache/servicecomb-kie/server/datasource/mongo/counter"
"github.com/apache/servicecomb-kie/server/datasource/mongo/history"
"github.com/apache/servicecomb-kie/server/datasource/mongo/kv"
"github.com/apache/servicecomb-kie/server/datasource/mongo/model"
"github.com/apache/servicecomb-kie/server/datasource/mongo/track"
)
type Broker struct {
}
func NewFrom(c *datasource.Config) (datasource.Broker, error) {
broker := Broker{}
err := ensureDB()
if err != nil {
return nil, err
}
return &broker, nil
}
func (*Broker) GetRevisionDao() datasource.RevisionDao {
return &counter.Dao{}
}
func (*Broker) GetKVDao() datasource.KVDao {
return &kv.Dao{}
}
func (*Broker) GetHistoryDao() datasource.HistoryDao {
return &history.Dao{}
}
func (*Broker) GetTrackDao() datasource.TrackDao {
return &track.Dao{}
}
func (*Broker) GetRbacDao() rbacdao.Dao {
return &rbac.Dao{}
}
func ensureDB() error {
err := ensureRevisionCounter()
ensureKV()
ensureKVRevision()
ensureView()
ensureKVLongPolling()
return err
}
func ensureRevisionCounter() error {
jsonSchema := bson.M{
"bsonType": "object",
"required": []string{"name", "domain", "count"},
}
validator := bson.M{
"$jsonSchema": jsonSchema,
}
revisionCounterIndex := buildIndexDoc("name", "domain")
revisionCounterIndex.Options = options.Index().SetUnique(true)
dmongo.EnsureCollection(model.CollectionCounter, validator, []mongo.IndexModel{revisionCounterIndex})
_, err := dmongo.GetClient().GetDB().Collection(model.CollectionCounter).UpdateOne(context.Background(),
bson.M{"name": "revision_counter", "domain": "default"},
bson.D{
{Key: "$set", Value: bson.D{
{Key: "count", Value: 1},
}},
}, options.Update().SetUpsert(true))
return err
}
func ensureKV() {
jsonSchema := bson.M{
"bsonType": "object",
"required": []string{"key", "domain", "project", "id"},
}
validator := bson.M{
"$jsonSchema": jsonSchema,
}
kvIndex := buildIndexDoc("id")
kvIndex.Options = options.Index().SetUnique(true)
dmongo.EnsureCollection(model.CollectionKV, validator, []mongo.IndexModel{kvIndex})
}
func ensureKVRevision() {
kvRevisionIndex := buildIndexDoc("delete_time")
kvRevisionIndex.Options = options.Index().SetExpireAfterSeconds(7 * 24 * 3600)
dmongo.EnsureCollection(model.CollectionKVRevision, nil, []mongo.IndexModel{kvRevisionIndex})
}
func ensureView() {
jsonSchema := bson.M{
"bsonType": "object",
"required": []string{"id", "domain", "project", "display"},
}
validator := bson.M{
"$jsonSchema": jsonSchema,
}
viewIDIndex := buildIndexDoc("id")
viewIDIndex.Options = options.Index().SetUnique(true)
viewMultipleIndex := buildIndexDoc("display", "domain", "project")
viewMultipleIndex.Options = options.Index().SetUnique(true)
dmongo.EnsureCollection(model.CollectionView, validator, []mongo.IndexModel{viewIDIndex, viewMultipleIndex})
}
func ensureKVLongPolling() {
jsonSchema := bson.M{
"bsonType": "object",
"required": []string{"id", "revision", "session_id", "url_path"},
}
validator := bson.M{
"$jsonSchema": jsonSchema,
}
timestampIndex := buildIndexDoc("timestamp")
timestampIndex.Options = options.Index().SetExpireAfterSeconds(7 * 24 * 3600)
kvLongPollingIndex := buildIndexDoc("revision", "domain", "session_id")
kvLongPollingIndex.Options = options.Index().SetUnique(true)
dmongo.EnsureCollection(model.CollectionPollingDetail, validator, []mongo.IndexModel{timestampIndex, kvLongPollingIndex})
}
func buildIndexDoc(keys ...string) mongo.IndexModel {
keysDoc := bsonx.Doc{}
for _, key := range keys {
keysDoc = keysDoc.Append(key, bsonx.Int32(1))
}
index := mongo.IndexModel{
Keys: keysDoc,
}
return index
}
func init() {
datasource.RegisterPlugin("mongo", NewFrom)
}
|
package dvid
import (
"reflect"
"testing"
. "github.com/janelia-flyem/go/gocheck"
)
type VolumeTest struct {
rles RLEs
encoding []byte
}
var _ = Suite(&VolumeTest{})
func (s *VolumeTest) SetUpSuite(c *C) {
s.rles = RLEs{
{Point3d{2, 3, 4}, 20},
{Point3d{4, 4, 4}, 14},
{Point3d{1, 3, 5}, 20},
}
var err error
s.encoding, err = s.rles.MarshalBinary()
c.Assert(err, IsNil)
}
func (s *VolumeTest) TestRLE(c *C) {
serialization, err := s.rles.MarshalBinary()
c.Assert(err, IsNil)
var obtained RLEs
err = obtained.UnmarshalBinary(serialization)
c.Assert(err, IsNil)
for i, _ := range s.rles {
c.Assert(s.rles[i], DeepEquals, obtained[i])
}
numVoxels, numRuns := obtained.Stats()
c.Assert(numVoxels, Equals, uint64(54))
c.Assert(numRuns, Equals, int32(3))
toAdd := RLEs{
{Point3d{0, 3, 4}, 14},
{Point3d{10, 3, 4}, 14},
{Point3d{8, 5, 7}, 13},
}
expectedRLEs := RLEs{
{Point3d{0, 3, 4}, 24},
{Point3d{4, 4, 4}, 14},
{Point3d{1, 3, 5}, 20},
{Point3d{8, 5, 7}, 13},
}
voxelsAdded := s.rles.Add(toAdd)
c.Assert(voxelsAdded, Equals, int64(17))
c.Assert(s.rles, DeepEquals, expectedRLEs)
}
func (s *VolumeTest) TestSparseVol(c *C) {
var vol SparseVol
err := vol.AddSerializedRLEs(s.encoding)
c.Assert(err, IsNil)
c.Assert(vol.Size(), Equals, Point3d{21, 2, 2})
c.Assert(vol.MinimumPoint3d(), Equals, Point3d{1, 3, 4})
c.Assert(vol.MaximumPoint3d(), Equals, Point3d{21, 4, 5})
vol.Clear()
newrles := RLEs{
{Point3d{32, 43, 54}, 20},
{Point3d{34, 44, 54}, 14},
}
encoding, err := newrles.MarshalBinary()
c.Assert(err, IsNil)
err = vol.AddSerializedRLEs(encoding)
c.Assert(err, IsNil)
c.Assert(vol.Size(), Equals, Point3d{20, 2, 1})
c.Assert(vol.MinimumPoint3d(), Equals, Point3d{32, 43, 54})
c.Assert(vol.MaximumPoint3d(), Equals, Point3d{51, 44, 54})
}
func TestNormalization(t *testing.T) {
norm := denormRLEs.Normalize()
if !reflect.DeepEqual(norm, expectedNorm) {
t.Errorf("Normalization of RLEs failed. Expected:\n%s\nGot:%s\n", expectedNorm, norm)
}
}
var (
denormRLEs = RLEs{
NewRLE(Point3d{20, 30, 40}, 10),
NewRLE(Point3d{30, 30, 40}, 2),
NewRLE(Point3d{32, 30, 40}, 3),
NewRLE(Point3d{38, 30, 40}, 7),
NewRLE(Point3d{45, 30, 41}, 4),
NewRLE(Point3d{17, 30, 42}, 1),
NewRLE(Point3d{18, 30, 42}, 5),
}
expectedNorm = RLEs{
NewRLE(Point3d{20, 30, 40}, 15),
NewRLE(Point3d{38, 30, 40}, 7),
NewRLE(Point3d{45, 30, 41}, 4),
NewRLE(Point3d{17, 30, 42}, 6),
}
)
func TestSplit(t *testing.T) {
modified, err := bodyRLEs.Split(splitDupRLEs)
if err != nil {
t.Errorf("Bad split on dup case: %v\n", err)
}
if len(modified) != 0 {
t.Errorf("Expected split of duplicate should complete erase original RLEs but it did not.\n")
}
if len(modified) != 0 {
t.Errorf("Expected split to give no modified RLEs, but got %d RLEs\n", len(modified))
}
// Test edge cases
modified, err = bodyRLEs.Split(splitTestRLEs)
if err != nil {
t.Errorf("Bad split on edge cases: %v\n", err)
}
if len(modified) == 0 {
t.Errorf("Expected split to say edge cases were not duplicate but got no modifications\n")
}
expectedNorm := expectedModRLEs.Normalize()
if !reflect.DeepEqual(modified, expectedNorm) {
for i, rle := range modified {
if !reflect.DeepEqual(rle, expectedNorm[i]) {
t.Errorf("Edge case error found. Expected RLE at pos %d: %s, got %s\n", i, expectedNorm[i], rle)
break
}
}
}
}
var (
splitTestRLEs = RLEs{
NewRLE(Point3d{3902, 5007, 6594}, 1),
NewRLE(Point3d{3904, 5007, 6594}, 1),
NewRLE(Point3d{3912, 5007, 6594}, 1),
NewRLE(Point3d{3869, 5018, 6594}, 3),
NewRLE(Point3d{3872, 5018, 6594}, 2),
NewRLE(Point3d{3886, 5018, 6594}, 9),
NewRLE(Point3d{3905, 5018, 6594}, 1),
NewRLE(Point3d{3912, 5018, 6594}, 3),
NewRLE(Point3d{3936, 5018, 6594}, 2),
NewRLE(Point3d{3866, 5019, 6594}, 6),
NewRLE(Point3d{3872, 5019, 6594}, 2),
}
expectedModRLEs = RLEs{
NewRLE(Point3d{3885, 5001, 6594}, 2),
NewRLE(Point3d{3922, 5001, 6594}, 2),
NewRLE(Point3d{3884, 5002, 6594}, 5),
NewRLE(Point3d{3922, 5002, 6594}, 4),
NewRLE(Point3d{3928, 5002, 6594}, 2),
NewRLE(Point3d{3883, 5003, 6594}, 7),
NewRLE(Point3d{3921, 5003, 6594}, 10),
NewRLE(Point3d{3881, 5004, 6594}, 9),
NewRLE(Point3d{3920, 5004, 6594}, 13),
NewRLE(Point3d{3879, 5005, 6594}, 11),
NewRLE(Point3d{3920, 5005, 6594}, 14),
NewRLE(Point3d{3877, 5006, 6594}, 13),
NewRLE(Point3d{3918, 5006, 6594}, 16),
NewRLE(Point3d{3875, 5007, 6594}, 13),
NewRLE(Point3d{3901, 5007, 6594}, 1),
NewRLE(Point3d{3903, 5007, 6594}, 1),
NewRLE(Point3d{3914, 5007, 6594}, 19),
NewRLE(Point3d{3873, 5008, 6594}, 14),
NewRLE(Point3d{3901, 5008, 6594}, 3),
NewRLE(Point3d{3904, 5008, 6594}, 29),
NewRLE(Point3d{3871, 5009, 6594}, 1),
NewRLE(Point3d{3872, 5009, 6594}, 16),
NewRLE(Point3d{3901, 5009, 6594}, 3),
NewRLE(Point3d{3904, 5009, 6594}, 29),
NewRLE(Point3d{3870, 5010, 6594}, 2),
NewRLE(Point3d{3872, 5010, 6594}, 16),
NewRLE(Point3d{3900, 5010, 6594}, 4),
NewRLE(Point3d{3904, 5010, 6594}, 29),
NewRLE(Point3d{3868, 5011, 6594}, 4),
NewRLE(Point3d{3872, 5011, 6594}, 15),
NewRLE(Point3d{3901, 5011, 6594}, 3),
NewRLE(Point3d{3904, 5011, 6594}, 30),
NewRLE(Point3d{3867, 5012, 6594}, 5),
NewRLE(Point3d{3872, 5012, 6594}, 15),
NewRLE(Point3d{3901, 5012, 6594}, 3),
NewRLE(Point3d{3904, 5012, 6594}, 31),
NewRLE(Point3d{3867, 5013, 6594}, 5),
NewRLE(Point3d{3872, 5013, 6594}, 16),
NewRLE(Point3d{3900, 5013, 6594}, 4),
NewRLE(Point3d{3904, 5013, 6594}, 31),
NewRLE(Point3d{3867, 5014, 6594}, 5),
NewRLE(Point3d{3872, 5014, 6594}, 16),
NewRLE(Point3d{3897, 5014, 6594}, 7),
NewRLE(Point3d{3904, 5014, 6594}, 31),
NewRLE(Point3d{3867, 5015, 6594}, 5),
NewRLE(Point3d{3872, 5015, 6594}, 8),
NewRLE(Point3d{3884, 5015, 6594}, 9),
NewRLE(Point3d{3897, 5015, 6594}, 7),
NewRLE(Point3d{3904, 5015, 6594}, 32),
NewRLE(Point3d{3867, 5016, 6594}, 5),
NewRLE(Point3d{3872, 5016, 6594}, 5),
NewRLE(Point3d{3885, 5016, 6594}, 19),
NewRLE(Point3d{3904, 5016, 6594}, 32),
NewRLE(Point3d{3936, 5016, 6594}, 1),
NewRLE(Point3d{3867, 5017, 6594}, 5),
NewRLE(Point3d{3872, 5017, 6594}, 3),
NewRLE(Point3d{3885, 5017, 6594}, 19),
NewRLE(Point3d{3904, 5017, 6594}, 3),
NewRLE(Point3d{3908, 5017, 6594}, 28),
NewRLE(Point3d{3936, 5017, 6594}, 2),
NewRLE(Point3d{3866, 5018, 6594}, 3),
NewRLE(Point3d{3874, 5018, 6594}, 1),
NewRLE(Point3d{3895, 5018, 6594}, 9),
NewRLE(Point3d{3904, 5018, 6594}, 1),
NewRLE(Point3d{3909, 5018, 6594}, 3),
NewRLE(Point3d{3915, 5018, 6594}, 21),
NewRLE(Point3d{3938, 5018, 6594}, 1),
NewRLE(Point3d{3881, 5019, 6594}, 2),
NewRLE(Point3d{3886, 5019, 6594}, 18),
NewRLE(Point3d{3904, 5019, 6594}, 1),
NewRLE(Point3d{3910, 5019, 6594}, 26),
NewRLE(Point3d{3936, 5019, 6594}, 4),
NewRLE(Point3d{3865, 5020, 6594}, 7),
NewRLE(Point3d{3872, 5020, 6594}, 2),
NewRLE(Point3d{3880, 5020, 6594}, 5),
NewRLE(Point3d{3886, 5020, 6594}, 18),
NewRLE(Point3d{3904, 5020, 6594}, 2),
NewRLE(Point3d{3911, 5020, 6594}, 2),
NewRLE(Point3d{3914, 5020, 6594}, 22),
NewRLE(Point3d{3936, 5020, 6594}, 5),
NewRLE(Point3d{3865, 5021, 6594}, 7),
NewRLE(Point3d{3872, 5021, 6594}, 1),
NewRLE(Point3d{3879, 5021, 6594}, 25),
NewRLE(Point3d{3904, 5021, 6594}, 5),
NewRLE(Point3d{3914, 5021, 6594}, 22),
NewRLE(Point3d{3936, 5021, 6594}, 5),
NewRLE(Point3d{3865, 5022, 6594}, 7),
NewRLE(Point3d{3872, 5022, 6594}, 1),
NewRLE(Point3d{3879, 5022, 6594}, 25),
NewRLE(Point3d{3904, 5022, 6594}, 9),
NewRLE(Point3d{3914, 5022, 6594}, 22),
NewRLE(Point3d{3936, 5022, 6594}, 6),
NewRLE(Point3d{3865, 5023, 6594}, 7),
NewRLE(Point3d{3872, 5023, 6594}, 1),
NewRLE(Point3d{3880, 5023, 6594}, 24),
NewRLE(Point3d{3904, 5023, 6594}, 32),
NewRLE(Point3d{3936, 5023, 6594}, 7),
NewRLE(Point3d{3864, 5024, 6594}, 8),
NewRLE(Point3d{3872, 5024, 6594}, 1),
NewRLE(Point3d{3880, 5024, 6594}, 24),
NewRLE(Point3d{3904, 5024, 6594}, 32),
NewRLE(Point3d{3936, 5024, 6594}, 7),
NewRLE(Point3d{3864, 5025, 6594}, 8),
NewRLE(Point3d{3872, 5025, 6594}, 1),
NewRLE(Point3d{3880, 5025, 6594}, 24),
NewRLE(Point3d{3904, 5025, 6594}, 32),
NewRLE(Point3d{3936, 5025, 6594}, 8),
NewRLE(Point3d{3864, 5026, 6594}, 8),
NewRLE(Point3d{3872, 5026, 6594}, 1),
NewRLE(Point3d{3880, 5026, 6594}, 24),
NewRLE(Point3d{3904, 5026, 6594}, 32),
NewRLE(Point3d{3936, 5026, 6594}, 9),
NewRLE(Point3d{3864, 5027, 6594}, 8),
NewRLE(Point3d{3872, 5027, 6594}, 1),
NewRLE(Point3d{3879, 5027, 6594}, 25),
NewRLE(Point3d{3904, 5027, 6594}, 32),
NewRLE(Point3d{3936, 5027, 6594}, 9),
NewRLE(Point3d{3864, 5028, 6594}, 8),
NewRLE(Point3d{3872, 5028, 6594}, 2),
NewRLE(Point3d{3879, 5028, 6594}, 25),
NewRLE(Point3d{3904, 5028, 6594}, 32),
NewRLE(Point3d{3936, 5028, 6594}, 9),
NewRLE(Point3d{3864, 5029, 6594}, 8),
NewRLE(Point3d{3872, 5029, 6594}, 2),
NewRLE(Point3d{3880, 5029, 6594}, 24),
NewRLE(Point3d{3904, 5029, 6594}, 32),
NewRLE(Point3d{3936, 5029, 6594}, 9),
NewRLE(Point3d{3863, 5030, 6594}, 9),
NewRLE(Point3d{3872, 5030, 6594}, 1),
NewRLE(Point3d{3880, 5030, 6594}, 24),
NewRLE(Point3d{3904, 5030, 6594}, 32),
NewRLE(Point3d{3936, 5030, 6594}, 9),
NewRLE(Point3d{3863, 5031, 6594}, 9),
NewRLE(Point3d{3872, 5031, 6594}, 1),
NewRLE(Point3d{3881, 5031, 6594}, 23),
NewRLE(Point3d{3904, 5031, 6594}, 32),
NewRLE(Point3d{3936, 5031, 6594}, 9),
NewRLE(Point3d{3863, 5032, 6594}, 9),
NewRLE(Point3d{3872, 5032, 6594}, 1),
NewRLE(Point3d{3881, 5032, 6594}, 23),
NewRLE(Point3d{3904, 5032, 6594}, 32),
NewRLE(Point3d{3936, 5032, 6594}, 7),
NewRLE(Point3d{3862, 5033, 6594}, 10),
NewRLE(Point3d{3872, 5033, 6594}, 1),
NewRLE(Point3d{3881, 5033, 6594}, 23),
NewRLE(Point3d{3904, 5033, 6594}, 32),
NewRLE(Point3d{3936, 5033, 6594}, 7),
NewRLE(Point3d{3862, 5034, 6594}, 10),
NewRLE(Point3d{3872, 5034, 6594}, 2),
NewRLE(Point3d{3882, 5034, 6594}, 22),
NewRLE(Point3d{3904, 5034, 6594}, 32),
NewRLE(Point3d{3936, 5034, 6594}, 7),
NewRLE(Point3d{3861, 5035, 6594}, 11),
NewRLE(Point3d{3872, 5035, 6594}, 2),
NewRLE(Point3d{3882, 5035, 6594}, 22),
NewRLE(Point3d{3904, 5035, 6594}, 32),
NewRLE(Point3d{3936, 5035, 6594}, 7),
NewRLE(Point3d{3861, 5036, 6594}, 11),
NewRLE(Point3d{3872, 5036, 6594}, 2),
NewRLE(Point3d{3882, 5036, 6594}, 22),
NewRLE(Point3d{3904, 5036, 6594}, 32),
NewRLE(Point3d{3936, 5036, 6594}, 7),
NewRLE(Point3d{3860, 5037, 6594}, 12),
NewRLE(Point3d{3872, 5037, 6594}, 1),
NewRLE(Point3d{3882, 5037, 6594}, 22),
NewRLE(Point3d{3904, 5037, 6594}, 32),
NewRLE(Point3d{3936, 5037, 6594}, 7),
NewRLE(Point3d{3860, 5038, 6594}, 12),
NewRLE(Point3d{3872, 5038, 6594}, 2),
NewRLE(Point3d{3883, 5038, 6594}, 21),
NewRLE(Point3d{3904, 5038, 6594}, 32),
NewRLE(Point3d{3936, 5038, 6594}, 7),
NewRLE(Point3d{3860, 5039, 6594}, 12),
NewRLE(Point3d{3872, 5039, 6594}, 4),
NewRLE(Point3d{3884, 5039, 6594}, 20),
NewRLE(Point3d{3904, 5039, 6594}, 32),
NewRLE(Point3d{3936, 5039, 6594}, 7),
NewRLE(Point3d{3858, 5040, 6594}, 14),
NewRLE(Point3d{3872, 5040, 6594}, 7),
NewRLE(Point3d{3885, 5040, 6594}, 12),
NewRLE(Point3d{3897, 5040, 6594}, 1),
NewRLE(Point3d{3898, 5040, 6594}, 6),
NewRLE(Point3d{3904, 5040, 6594}, 2),
NewRLE(Point3d{3906, 5040, 6594}, 1),
NewRLE(Point3d{3907, 5040, 6594}, 29),
NewRLE(Point3d{3936, 5040, 6594}, 5),
NewRLE(Point3d{3858, 5041, 6594}, 14),
NewRLE(Point3d{3872, 5041, 6594}, 8),
NewRLE(Point3d{3885, 5041, 6594}, 11),
NewRLE(Point3d{3896, 5041, 6594}, 8),
NewRLE(Point3d{3904, 5041, 6594}, 3),
NewRLE(Point3d{3907, 5041, 6594}, 29),
NewRLE(Point3d{3936, 5041, 6594}, 5),
NewRLE(Point3d{3858, 5042, 6594}, 14),
NewRLE(Point3d{3872, 5042, 6594}, 8),
NewRLE(Point3d{3885, 5042, 6594}, 9),
NewRLE(Point3d{3894, 5042, 6594}, 10),
NewRLE(Point3d{3904, 5042, 6594}, 6),
NewRLE(Point3d{3910, 5042, 6594}, 26),
NewRLE(Point3d{3936, 5042, 6594}, 4),
NewRLE(Point3d{3859, 5043, 6594}, 13),
NewRLE(Point3d{3872, 5043, 6594}, 9),
NewRLE(Point3d{3884, 5043, 6594}, 8),
NewRLE(Point3d{3892, 5043, 6594}, 12),
NewRLE(Point3d{3904, 5043, 6594}, 8),
NewRLE(Point3d{3912, 5043, 6594}, 24),
NewRLE(Point3d{3936, 5043, 6594}, 4),
NewRLE(Point3d{3858, 5044, 6594}, 14),
NewRLE(Point3d{3872, 5044, 6594}, 10),
NewRLE(Point3d{3884, 5044, 6594}, 8),
NewRLE(Point3d{3892, 5044, 6594}, 12),
NewRLE(Point3d{3904, 5044, 6594}, 12),
NewRLE(Point3d{3916, 5044, 6594}, 20),
NewRLE(Point3d{3936, 5044, 6594}, 4),
NewRLE(Point3d{3858, 5045, 6594}, 14),
NewRLE(Point3d{3872, 5045, 6594}, 11),
NewRLE(Point3d{3884, 5045, 6594}, 9),
NewRLE(Point3d{3893, 5045, 6594}, 11),
NewRLE(Point3d{3904, 5045, 6594}, 12),
NewRLE(Point3d{3916, 5045, 6594}, 20),
NewRLE(Point3d{3936, 5045, 6594}, 2),
NewRLE(Point3d{3859, 5046, 6594}, 13),
NewRLE(Point3d{3872, 5046, 6594}, 22),
NewRLE(Point3d{3894, 5046, 6594}, 10),
NewRLE(Point3d{3904, 5046, 6594}, 13),
NewRLE(Point3d{3917, 5046, 6594}, 19),
NewRLE(Point3d{3936, 5046, 6594}, 1),
NewRLE(Point3d{3859, 5047, 6594}, 13),
NewRLE(Point3d{3872, 5047, 6594}, 22),
NewRLE(Point3d{3894, 5047, 6594}, 10),
NewRLE(Point3d{3904, 5047, 6594}, 13),
NewRLE(Point3d{3917, 5047, 6594}, 19),
NewRLE(Point3d{3858, 5048, 6594}, 14),
NewRLE(Point3d{3872, 5048, 6594}, 22),
NewRLE(Point3d{3894, 5048, 6594}, 10),
NewRLE(Point3d{3904, 5048, 6594}, 13),
NewRLE(Point3d{3917, 5048, 6594}, 19),
NewRLE(Point3d{3858, 5049, 6594}, 14),
NewRLE(Point3d{3872, 5049, 6594}, 22),
NewRLE(Point3d{3894, 5049, 6594}, 10),
NewRLE(Point3d{3904, 5049, 6594}, 13),
NewRLE(Point3d{3917, 5049, 6594}, 17),
NewRLE(Point3d{3858, 5050, 6594}, 14),
NewRLE(Point3d{3872, 5050, 6594}, 23),
NewRLE(Point3d{3895, 5050, 6594}, 9),
NewRLE(Point3d{3904, 5050, 6594}, 14),
NewRLE(Point3d{3918, 5050, 6594}, 16),
NewRLE(Point3d{3858, 5051, 6594}, 14),
NewRLE(Point3d{3872, 5051, 6594}, 26),
NewRLE(Point3d{3898, 5051, 6594}, 6),
NewRLE(Point3d{3904, 5051, 6594}, 8),
NewRLE(Point3d{3912, 5051, 6594}, 21),
NewRLE(Point3d{3859, 5052, 6594}, 13),
NewRLE(Point3d{3872, 5052, 6594}, 25),
NewRLE(Point3d{3897, 5052, 6594}, 3),
NewRLE(Point3d{3900, 5052, 6594}, 1),
NewRLE(Point3d{3901, 5052, 6594}, 2),
NewRLE(Point3d{3903, 5052, 6594}, 1),
NewRLE(Point3d{3904, 5052, 6594}, 2),
NewRLE(Point3d{3906, 5052, 6594}, 25),
NewRLE(Point3d{3859, 5053, 6594}, 13),
NewRLE(Point3d{3872, 5053, 6594}, 32),
NewRLE(Point3d{3904, 5053, 6594}, 27),
NewRLE(Point3d{3860, 5054, 6594}, 12),
NewRLE(Point3d{3872, 5054, 6594}, 32),
NewRLE(Point3d{3904, 5054, 6594}, 26),
NewRLE(Point3d{3861, 5055, 6594}, 11),
NewRLE(Point3d{3872, 5055, 6594}, 32),
NewRLE(Point3d{3904, 5055, 6594}, 25),
NewRLE(Point3d{3862, 5056, 6594}, 10),
NewRLE(Point3d{3872, 5056, 6594}, 32),
NewRLE(Point3d{3904, 5056, 6594}, 24),
NewRLE(Point3d{3862, 5057, 6594}, 10),
NewRLE(Point3d{3872, 5057, 6594}, 32),
NewRLE(Point3d{3904, 5057, 6594}, 22),
NewRLE(Point3d{3862, 5058, 6594}, 10),
NewRLE(Point3d{3872, 5058, 6594}, 32),
NewRLE(Point3d{3904, 5058, 6594}, 21),
NewRLE(Point3d{3864, 5059, 6594}, 8),
NewRLE(Point3d{3872, 5059, 6594}, 32),
NewRLE(Point3d{3904, 5059, 6594}, 21),
NewRLE(Point3d{3863, 5060, 6594}, 9),
NewRLE(Point3d{3872, 5060, 6594}, 32),
NewRLE(Point3d{3904, 5060, 6594}, 20),
NewRLE(Point3d{3863, 5061, 6594}, 9),
NewRLE(Point3d{3872, 5061, 6594}, 32),
NewRLE(Point3d{3904, 5061, 6594}, 19),
NewRLE(Point3d{3864, 5062, 6594}, 8),
NewRLE(Point3d{3872, 5062, 6594}, 32),
NewRLE(Point3d{3904, 5062, 6594}, 19),
NewRLE(Point3d{3865, 5063, 6594}, 7),
NewRLE(Point3d{3872, 5063, 6594}, 32),
NewRLE(Point3d{3904, 5063, 6594}, 17),
NewRLE(Point3d{3866, 5064, 6594}, 6),
NewRLE(Point3d{3872, 5064, 6594}, 32),
NewRLE(Point3d{3904, 5064, 6594}, 17),
NewRLE(Point3d{3868, 5065, 6594}, 4),
NewRLE(Point3d{3872, 5065, 6594}, 32),
NewRLE(Point3d{3904, 5065, 6594}, 17),
NewRLE(Point3d{3868, 5066, 6594}, 4),
NewRLE(Point3d{3872, 5066, 6594}, 32),
NewRLE(Point3d{3904, 5066, 6594}, 16),
NewRLE(Point3d{3869, 5067, 6594}, 1),
NewRLE(Point3d{3871, 5067, 6594}, 1),
NewRLE(Point3d{3872, 5067, 6594}, 32),
NewRLE(Point3d{3904, 5067, 6594}, 16),
NewRLE(Point3d{3871, 5068, 6594}, 1),
NewRLE(Point3d{3872, 5068, 6594}, 1),
NewRLE(Point3d{3875, 5068, 6594}, 29),
NewRLE(Point3d{3904, 5068, 6594}, 14),
NewRLE(Point3d{3872, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5069, 6594}, 28),
NewRLE(Point3d{3904, 5069, 6594}, 10),
NewRLE(Point3d{3916, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5070, 6594}, 28),
NewRLE(Point3d{3904, 5070, 6594}, 8),
NewRLE(Point3d{3877, 5071, 6594}, 27),
NewRLE(Point3d{3904, 5071, 6594}, 8),
NewRLE(Point3d{3879, 5072, 6594}, 25),
NewRLE(Point3d{3904, 5072, 6594}, 2),
NewRLE(Point3d{3881, 5073, 6594}, 22),
NewRLE(Point3d{3883, 5074, 6594}, 18),
NewRLE(Point3d{3884, 5075, 6594}, 13),
NewRLE(Point3d{3887, 5076, 6594}, 7),
}
bodyRLEs = RLEs{
NewRLE(Point3d{3885, 5001, 6594}, 2),
NewRLE(Point3d{3922, 5001, 6594}, 2),
NewRLE(Point3d{3884, 5002, 6594}, 5),
NewRLE(Point3d{3922, 5002, 6594}, 4),
NewRLE(Point3d{3928, 5002, 6594}, 2),
NewRLE(Point3d{3883, 5003, 6594}, 7),
NewRLE(Point3d{3921, 5003, 6594}, 10),
NewRLE(Point3d{3881, 5004, 6594}, 9),
NewRLE(Point3d{3920, 5004, 6594}, 13),
NewRLE(Point3d{3879, 5005, 6594}, 11),
NewRLE(Point3d{3920, 5005, 6594}, 14),
NewRLE(Point3d{3877, 5006, 6594}, 13),
NewRLE(Point3d{3918, 5006, 6594}, 16),
NewRLE(Point3d{3875, 5007, 6594}, 13),
NewRLE(Point3d{3901, 5007, 6594}, 3),
NewRLE(Point3d{3904, 5007, 6594}, 1),
NewRLE(Point3d{3912, 5007, 6594}, 1),
NewRLE(Point3d{3914, 5007, 6594}, 19),
NewRLE(Point3d{3873, 5008, 6594}, 14),
NewRLE(Point3d{3901, 5008, 6594}, 3),
NewRLE(Point3d{3904, 5008, 6594}, 29),
NewRLE(Point3d{3871, 5009, 6594}, 1),
NewRLE(Point3d{3872, 5009, 6594}, 16),
NewRLE(Point3d{3901, 5009, 6594}, 3),
NewRLE(Point3d{3904, 5009, 6594}, 29),
NewRLE(Point3d{3870, 5010, 6594}, 2),
NewRLE(Point3d{3872, 5010, 6594}, 16),
NewRLE(Point3d{3900, 5010, 6594}, 4),
NewRLE(Point3d{3904, 5010, 6594}, 29),
NewRLE(Point3d{3868, 5011, 6594}, 4),
NewRLE(Point3d{3872, 5011, 6594}, 15),
NewRLE(Point3d{3901, 5011, 6594}, 3),
NewRLE(Point3d{3904, 5011, 6594}, 30),
NewRLE(Point3d{3867, 5012, 6594}, 5),
NewRLE(Point3d{3872, 5012, 6594}, 15),
NewRLE(Point3d{3901, 5012, 6594}, 3),
NewRLE(Point3d{3904, 5012, 6594}, 31),
NewRLE(Point3d{3867, 5013, 6594}, 5),
NewRLE(Point3d{3872, 5013, 6594}, 16),
NewRLE(Point3d{3900, 5013, 6594}, 4),
NewRLE(Point3d{3904, 5013, 6594}, 31),
NewRLE(Point3d{3867, 5014, 6594}, 5),
NewRLE(Point3d{3872, 5014, 6594}, 16),
NewRLE(Point3d{3897, 5014, 6594}, 7),
NewRLE(Point3d{3904, 5014, 6594}, 31),
NewRLE(Point3d{3867, 5015, 6594}, 5),
NewRLE(Point3d{3872, 5015, 6594}, 8),
NewRLE(Point3d{3884, 5015, 6594}, 9),
NewRLE(Point3d{3897, 5015, 6594}, 7),
NewRLE(Point3d{3904, 5015, 6594}, 32),
NewRLE(Point3d{3867, 5016, 6594}, 5),
NewRLE(Point3d{3872, 5016, 6594}, 5),
NewRLE(Point3d{3885, 5016, 6594}, 19),
NewRLE(Point3d{3904, 5016, 6594}, 32),
NewRLE(Point3d{3936, 5016, 6594}, 1),
NewRLE(Point3d{3867, 5017, 6594}, 5),
NewRLE(Point3d{3872, 5017, 6594}, 3),
NewRLE(Point3d{3885, 5017, 6594}, 19),
NewRLE(Point3d{3904, 5017, 6594}, 3),
NewRLE(Point3d{3908, 5017, 6594}, 28),
NewRLE(Point3d{3936, 5017, 6594}, 2),
NewRLE(Point3d{3866, 5018, 6594}, 6),
NewRLE(Point3d{3872, 5018, 6594}, 3),
NewRLE(Point3d{3886, 5018, 6594}, 18),
NewRLE(Point3d{3904, 5018, 6594}, 2),
NewRLE(Point3d{3909, 5018, 6594}, 27),
NewRLE(Point3d{3936, 5018, 6594}, 3),
NewRLE(Point3d{3866, 5019, 6594}, 6),
NewRLE(Point3d{3872, 5019, 6594}, 2),
NewRLE(Point3d{3881, 5019, 6594}, 2),
NewRLE(Point3d{3886, 5019, 6594}, 18),
NewRLE(Point3d{3904, 5019, 6594}, 1),
NewRLE(Point3d{3910, 5019, 6594}, 26),
NewRLE(Point3d{3936, 5019, 6594}, 4),
NewRLE(Point3d{3865, 5020, 6594}, 7),
NewRLE(Point3d{3872, 5020, 6594}, 2),
NewRLE(Point3d{3880, 5020, 6594}, 5),
NewRLE(Point3d{3886, 5020, 6594}, 18),
NewRLE(Point3d{3904, 5020, 6594}, 2),
NewRLE(Point3d{3911, 5020, 6594}, 2),
NewRLE(Point3d{3914, 5020, 6594}, 22),
NewRLE(Point3d{3936, 5020, 6594}, 5),
NewRLE(Point3d{3865, 5021, 6594}, 7),
NewRLE(Point3d{3872, 5021, 6594}, 1),
NewRLE(Point3d{3879, 5021, 6594}, 25),
NewRLE(Point3d{3904, 5021, 6594}, 5),
NewRLE(Point3d{3914, 5021, 6594}, 22),
NewRLE(Point3d{3936, 5021, 6594}, 5),
NewRLE(Point3d{3865, 5022, 6594}, 7),
NewRLE(Point3d{3872, 5022, 6594}, 1),
NewRLE(Point3d{3879, 5022, 6594}, 25),
NewRLE(Point3d{3904, 5022, 6594}, 9),
NewRLE(Point3d{3914, 5022, 6594}, 22),
NewRLE(Point3d{3936, 5022, 6594}, 6),
NewRLE(Point3d{3865, 5023, 6594}, 7),
NewRLE(Point3d{3872, 5023, 6594}, 1),
NewRLE(Point3d{3880, 5023, 6594}, 24),
NewRLE(Point3d{3904, 5023, 6594}, 32),
NewRLE(Point3d{3936, 5023, 6594}, 7),
NewRLE(Point3d{3864, 5024, 6594}, 8),
NewRLE(Point3d{3872, 5024, 6594}, 1),
NewRLE(Point3d{3880, 5024, 6594}, 24),
NewRLE(Point3d{3904, 5024, 6594}, 32),
NewRLE(Point3d{3936, 5024, 6594}, 7),
NewRLE(Point3d{3864, 5025, 6594}, 8),
NewRLE(Point3d{3872, 5025, 6594}, 1),
NewRLE(Point3d{3880, 5025, 6594}, 24),
NewRLE(Point3d{3904, 5025, 6594}, 32),
NewRLE(Point3d{3936, 5025, 6594}, 8),
NewRLE(Point3d{3864, 5026, 6594}, 8),
NewRLE(Point3d{3872, 5026, 6594}, 1),
NewRLE(Point3d{3880, 5026, 6594}, 24),
NewRLE(Point3d{3904, 5026, 6594}, 32),
NewRLE(Point3d{3936, 5026, 6594}, 9),
NewRLE(Point3d{3864, 5027, 6594}, 8),
NewRLE(Point3d{3872, 5027, 6594}, 1),
NewRLE(Point3d{3879, 5027, 6594}, 25),
NewRLE(Point3d{3904, 5027, 6594}, 32),
NewRLE(Point3d{3936, 5027, 6594}, 9),
NewRLE(Point3d{3864, 5028, 6594}, 8),
NewRLE(Point3d{3872, 5028, 6594}, 2),
NewRLE(Point3d{3879, 5028, 6594}, 25),
NewRLE(Point3d{3904, 5028, 6594}, 32),
NewRLE(Point3d{3936, 5028, 6594}, 9),
NewRLE(Point3d{3864, 5029, 6594}, 8),
NewRLE(Point3d{3872, 5029, 6594}, 2),
NewRLE(Point3d{3880, 5029, 6594}, 24),
NewRLE(Point3d{3904, 5029, 6594}, 32),
NewRLE(Point3d{3936, 5029, 6594}, 9),
NewRLE(Point3d{3863, 5030, 6594}, 9),
NewRLE(Point3d{3872, 5030, 6594}, 1),
NewRLE(Point3d{3880, 5030, 6594}, 24),
NewRLE(Point3d{3904, 5030, 6594}, 32),
NewRLE(Point3d{3936, 5030, 6594}, 9),
NewRLE(Point3d{3863, 5031, 6594}, 9),
NewRLE(Point3d{3872, 5031, 6594}, 1),
NewRLE(Point3d{3881, 5031, 6594}, 23),
NewRLE(Point3d{3904, 5031, 6594}, 32),
NewRLE(Point3d{3936, 5031, 6594}, 9),
NewRLE(Point3d{3863, 5032, 6594}, 9),
NewRLE(Point3d{3872, 5032, 6594}, 1),
NewRLE(Point3d{3881, 5032, 6594}, 23),
NewRLE(Point3d{3904, 5032, 6594}, 32),
NewRLE(Point3d{3936, 5032, 6594}, 7),
NewRLE(Point3d{3862, 5033, 6594}, 10),
NewRLE(Point3d{3872, 5033, 6594}, 1),
NewRLE(Point3d{3881, 5033, 6594}, 23),
NewRLE(Point3d{3904, 5033, 6594}, 32),
NewRLE(Point3d{3936, 5033, 6594}, 7),
NewRLE(Point3d{3862, 5034, 6594}, 10),
NewRLE(Point3d{3872, 5034, 6594}, 2),
NewRLE(Point3d{3882, 5034, 6594}, 22),
NewRLE(Point3d{3904, 5034, 6594}, 32),
NewRLE(Point3d{3936, 5034, 6594}, 7),
NewRLE(Point3d{3861, 5035, 6594}, 11),
NewRLE(Point3d{3872, 5035, 6594}, 2),
NewRLE(Point3d{3882, 5035, 6594}, 22),
NewRLE(Point3d{3904, 5035, 6594}, 32),
NewRLE(Point3d{3936, 5035, 6594}, 7),
NewRLE(Point3d{3861, 5036, 6594}, 11),
NewRLE(Point3d{3872, 5036, 6594}, 2),
NewRLE(Point3d{3882, 5036, 6594}, 22),
NewRLE(Point3d{3904, 5036, 6594}, 32),
NewRLE(Point3d{3936, 5036, 6594}, 7),
NewRLE(Point3d{3860, 5037, 6594}, 12),
NewRLE(Point3d{3872, 5037, 6594}, 1),
NewRLE(Point3d{3882, 5037, 6594}, 22),
NewRLE(Point3d{3904, 5037, 6594}, 32),
NewRLE(Point3d{3936, 5037, 6594}, 7),
NewRLE(Point3d{3860, 5038, 6594}, 12),
NewRLE(Point3d{3872, 5038, 6594}, 2),
NewRLE(Point3d{3883, 5038, 6594}, 21),
NewRLE(Point3d{3904, 5038, 6594}, 32),
NewRLE(Point3d{3936, 5038, 6594}, 7),
NewRLE(Point3d{3860, 5039, 6594}, 12),
NewRLE(Point3d{3872, 5039, 6594}, 4),
NewRLE(Point3d{3884, 5039, 6594}, 20),
NewRLE(Point3d{3904, 5039, 6594}, 32),
NewRLE(Point3d{3936, 5039, 6594}, 7),
NewRLE(Point3d{3858, 5040, 6594}, 14),
NewRLE(Point3d{3872, 5040, 6594}, 7),
NewRLE(Point3d{3885, 5040, 6594}, 12),
NewRLE(Point3d{3897, 5040, 6594}, 1),
NewRLE(Point3d{3898, 5040, 6594}, 6),
NewRLE(Point3d{3904, 5040, 6594}, 2),
NewRLE(Point3d{3906, 5040, 6594}, 1),
NewRLE(Point3d{3907, 5040, 6594}, 29),
NewRLE(Point3d{3936, 5040, 6594}, 5),
NewRLE(Point3d{3858, 5041, 6594}, 14),
NewRLE(Point3d{3872, 5041, 6594}, 8),
NewRLE(Point3d{3885, 5041, 6594}, 11),
NewRLE(Point3d{3896, 5041, 6594}, 8),
NewRLE(Point3d{3904, 5041, 6594}, 3),
NewRLE(Point3d{3907, 5041, 6594}, 29),
NewRLE(Point3d{3936, 5041, 6594}, 5),
NewRLE(Point3d{3858, 5042, 6594}, 14),
NewRLE(Point3d{3872, 5042, 6594}, 8),
NewRLE(Point3d{3885, 5042, 6594}, 9),
NewRLE(Point3d{3894, 5042, 6594}, 10),
NewRLE(Point3d{3904, 5042, 6594}, 6),
NewRLE(Point3d{3910, 5042, 6594}, 26),
NewRLE(Point3d{3936, 5042, 6594}, 4),
NewRLE(Point3d{3859, 5043, 6594}, 13),
NewRLE(Point3d{3872, 5043, 6594}, 9),
NewRLE(Point3d{3884, 5043, 6594}, 8),
NewRLE(Point3d{3892, 5043, 6594}, 12),
NewRLE(Point3d{3904, 5043, 6594}, 8),
NewRLE(Point3d{3912, 5043, 6594}, 24),
NewRLE(Point3d{3936, 5043, 6594}, 4),
NewRLE(Point3d{3858, 5044, 6594}, 14),
NewRLE(Point3d{3872, 5044, 6594}, 10),
NewRLE(Point3d{3884, 5044, 6594}, 8),
NewRLE(Point3d{3892, 5044, 6594}, 12),
NewRLE(Point3d{3904, 5044, 6594}, 12),
NewRLE(Point3d{3916, 5044, 6594}, 20),
NewRLE(Point3d{3936, 5044, 6594}, 4),
NewRLE(Point3d{3858, 5045, 6594}, 14),
NewRLE(Point3d{3872, 5045, 6594}, 11),
NewRLE(Point3d{3884, 5045, 6594}, 9),
NewRLE(Point3d{3893, 5045, 6594}, 11),
NewRLE(Point3d{3904, 5045, 6594}, 12),
NewRLE(Point3d{3916, 5045, 6594}, 20),
NewRLE(Point3d{3936, 5045, 6594}, 2),
NewRLE(Point3d{3859, 5046, 6594}, 13),
NewRLE(Point3d{3872, 5046, 6594}, 22),
NewRLE(Point3d{3894, 5046, 6594}, 10),
NewRLE(Point3d{3904, 5046, 6594}, 13),
NewRLE(Point3d{3917, 5046, 6594}, 19),
NewRLE(Point3d{3936, 5046, 6594}, 1),
NewRLE(Point3d{3859, 5047, 6594}, 13),
NewRLE(Point3d{3872, 5047, 6594}, 22),
NewRLE(Point3d{3894, 5047, 6594}, 10),
NewRLE(Point3d{3904, 5047, 6594}, 13),
NewRLE(Point3d{3917, 5047, 6594}, 19),
NewRLE(Point3d{3858, 5048, 6594}, 14),
NewRLE(Point3d{3872, 5048, 6594}, 22),
NewRLE(Point3d{3894, 5048, 6594}, 10),
NewRLE(Point3d{3904, 5048, 6594}, 13),
NewRLE(Point3d{3917, 5048, 6594}, 19),
NewRLE(Point3d{3858, 5049, 6594}, 14),
NewRLE(Point3d{3872, 5049, 6594}, 22),
NewRLE(Point3d{3894, 5049, 6594}, 10),
NewRLE(Point3d{3904, 5049, 6594}, 13),
NewRLE(Point3d{3917, 5049, 6594}, 17),
NewRLE(Point3d{3858, 5050, 6594}, 14),
NewRLE(Point3d{3872, 5050, 6594}, 23),
NewRLE(Point3d{3895, 5050, 6594}, 9),
NewRLE(Point3d{3904, 5050, 6594}, 14),
NewRLE(Point3d{3918, 5050, 6594}, 16),
NewRLE(Point3d{3858, 5051, 6594}, 14),
NewRLE(Point3d{3872, 5051, 6594}, 26),
NewRLE(Point3d{3898, 5051, 6594}, 6),
NewRLE(Point3d{3904, 5051, 6594}, 8),
NewRLE(Point3d{3912, 5051, 6594}, 21),
NewRLE(Point3d{3859, 5052, 6594}, 13),
NewRLE(Point3d{3872, 5052, 6594}, 25),
NewRLE(Point3d{3897, 5052, 6594}, 3),
NewRLE(Point3d{3900, 5052, 6594}, 1),
NewRLE(Point3d{3901, 5052, 6594}, 2),
NewRLE(Point3d{3903, 5052, 6594}, 1),
NewRLE(Point3d{3904, 5052, 6594}, 2),
NewRLE(Point3d{3906, 5052, 6594}, 25),
NewRLE(Point3d{3859, 5053, 6594}, 13),
NewRLE(Point3d{3872, 5053, 6594}, 32),
NewRLE(Point3d{3904, 5053, 6594}, 27),
NewRLE(Point3d{3860, 5054, 6594}, 12),
NewRLE(Point3d{3872, 5054, 6594}, 32),
NewRLE(Point3d{3904, 5054, 6594}, 26),
NewRLE(Point3d{3861, 5055, 6594}, 11),
NewRLE(Point3d{3872, 5055, 6594}, 32),
NewRLE(Point3d{3904, 5055, 6594}, 25),
NewRLE(Point3d{3862, 5056, 6594}, 10),
NewRLE(Point3d{3872, 5056, 6594}, 32),
NewRLE(Point3d{3904, 5056, 6594}, 24),
NewRLE(Point3d{3862, 5057, 6594}, 10),
NewRLE(Point3d{3872, 5057, 6594}, 32),
NewRLE(Point3d{3904, 5057, 6594}, 22),
NewRLE(Point3d{3862, 5058, 6594}, 10),
NewRLE(Point3d{3872, 5058, 6594}, 32),
NewRLE(Point3d{3904, 5058, 6594}, 21),
NewRLE(Point3d{3864, 5059, 6594}, 8),
NewRLE(Point3d{3872, 5059, 6594}, 32),
NewRLE(Point3d{3904, 5059, 6594}, 21),
NewRLE(Point3d{3863, 5060, 6594}, 9),
NewRLE(Point3d{3872, 5060, 6594}, 32),
NewRLE(Point3d{3904, 5060, 6594}, 20),
NewRLE(Point3d{3863, 5061, 6594}, 9),
NewRLE(Point3d{3872, 5061, 6594}, 32),
NewRLE(Point3d{3904, 5061, 6594}, 19),
NewRLE(Point3d{3864, 5062, 6594}, 8),
NewRLE(Point3d{3872, 5062, 6594}, 32),
NewRLE(Point3d{3904, 5062, 6594}, 19),
NewRLE(Point3d{3865, 5063, 6594}, 7),
NewRLE(Point3d{3872, 5063, 6594}, 32),
NewRLE(Point3d{3904, 5063, 6594}, 17),
NewRLE(Point3d{3866, 5064, 6594}, 6),
NewRLE(Point3d{3872, 5064, 6594}, 32),
NewRLE(Point3d{3904, 5064, 6594}, 17),
NewRLE(Point3d{3868, 5065, 6594}, 4),
NewRLE(Point3d{3872, 5065, 6594}, 32),
NewRLE(Point3d{3904, 5065, 6594}, 17),
NewRLE(Point3d{3868, 5066, 6594}, 4),
NewRLE(Point3d{3872, 5066, 6594}, 32),
NewRLE(Point3d{3904, 5066, 6594}, 16),
NewRLE(Point3d{3869, 5067, 6594}, 1),
NewRLE(Point3d{3871, 5067, 6594}, 1),
NewRLE(Point3d{3872, 5067, 6594}, 32),
NewRLE(Point3d{3904, 5067, 6594}, 16),
NewRLE(Point3d{3871, 5068, 6594}, 1),
NewRLE(Point3d{3872, 5068, 6594}, 1),
NewRLE(Point3d{3875, 5068, 6594}, 29),
NewRLE(Point3d{3904, 5068, 6594}, 14),
NewRLE(Point3d{3872, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5069, 6594}, 28),
NewRLE(Point3d{3904, 5069, 6594}, 10),
NewRLE(Point3d{3916, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5070, 6594}, 28),
NewRLE(Point3d{3904, 5070, 6594}, 8),
NewRLE(Point3d{3877, 5071, 6594}, 27),
NewRLE(Point3d{3904, 5071, 6594}, 8),
NewRLE(Point3d{3879, 5072, 6594}, 25),
NewRLE(Point3d{3904, 5072, 6594}, 2),
NewRLE(Point3d{3881, 5073, 6594}, 22),
NewRLE(Point3d{3883, 5074, 6594}, 18),
NewRLE(Point3d{3884, 5075, 6594}, 13),
NewRLE(Point3d{3887, 5076, 6594}, 7),
}
splitDupRLEs = RLEs{
NewRLE(Point3d{3885, 5001, 6594}, 2),
NewRLE(Point3d{3922, 5001, 6594}, 2),
NewRLE(Point3d{3884, 5002, 6594}, 5),
NewRLE(Point3d{3922, 5002, 6594}, 4),
NewRLE(Point3d{3928, 5002, 6594}, 2),
NewRLE(Point3d{3883, 5003, 6594}, 7),
NewRLE(Point3d{3921, 5003, 6594}, 10),
NewRLE(Point3d{3881, 5004, 6594}, 9),
NewRLE(Point3d{3920, 5004, 6594}, 13),
NewRLE(Point3d{3879, 5005, 6594}, 11),
NewRLE(Point3d{3920, 5005, 6594}, 14),
NewRLE(Point3d{3877, 5006, 6594}, 13),
NewRLE(Point3d{3918, 5006, 6594}, 16),
NewRLE(Point3d{3875, 5007, 6594}, 13),
NewRLE(Point3d{3901, 5007, 6594}, 4),
NewRLE(Point3d{3912, 5007, 6594}, 1),
NewRLE(Point3d{3914, 5007, 6594}, 19),
NewRLE(Point3d{3873, 5008, 6594}, 14),
NewRLE(Point3d{3901, 5008, 6594}, 32),
NewRLE(Point3d{3871, 5009, 6594}, 17),
NewRLE(Point3d{3901, 5009, 6594}, 32),
NewRLE(Point3d{3870, 5010, 6594}, 18),
NewRLE(Point3d{3900, 5010, 6594}, 33),
NewRLE(Point3d{3868, 5011, 6594}, 19),
NewRLE(Point3d{3901, 5011, 6594}, 33),
NewRLE(Point3d{3867, 5012, 6594}, 20),
NewRLE(Point3d{3901, 5012, 6594}, 34),
NewRLE(Point3d{3867, 5013, 6594}, 21),
NewRLE(Point3d{3900, 5013, 6594}, 35),
NewRLE(Point3d{3867, 5014, 6594}, 21),
NewRLE(Point3d{3897, 5014, 6594}, 38),
NewRLE(Point3d{3867, 5015, 6594}, 13),
NewRLE(Point3d{3884, 5015, 6594}, 9),
NewRLE(Point3d{3897, 5015, 6594}, 39),
NewRLE(Point3d{3867, 5016, 6594}, 10),
NewRLE(Point3d{3885, 5016, 6594}, 52),
NewRLE(Point3d{3867, 5017, 6594}, 8),
NewRLE(Point3d{3885, 5017, 6594}, 22),
NewRLE(Point3d{3908, 5017, 6594}, 30),
NewRLE(Point3d{3866, 5018, 6594}, 9),
NewRLE(Point3d{3886, 5018, 6594}, 20),
NewRLE(Point3d{3909, 5018, 6594}, 30),
NewRLE(Point3d{3866, 5019, 6594}, 8),
NewRLE(Point3d{3881, 5019, 6594}, 2),
NewRLE(Point3d{3886, 5019, 6594}, 19),
NewRLE(Point3d{3910, 5019, 6594}, 30),
NewRLE(Point3d{3865, 5020, 6594}, 9),
NewRLE(Point3d{3880, 5020, 6594}, 5),
NewRLE(Point3d{3886, 5020, 6594}, 20),
NewRLE(Point3d{3911, 5020, 6594}, 2),
NewRLE(Point3d{3914, 5020, 6594}, 27),
NewRLE(Point3d{3865, 5021, 6594}, 8),
NewRLE(Point3d{3879, 5021, 6594}, 30),
NewRLE(Point3d{3914, 5021, 6594}, 27),
NewRLE(Point3d{3865, 5022, 6594}, 8),
NewRLE(Point3d{3879, 5022, 6594}, 34),
NewRLE(Point3d{3914, 5022, 6594}, 28),
NewRLE(Point3d{3865, 5023, 6594}, 8),
NewRLE(Point3d{3880, 5023, 6594}, 63),
NewRLE(Point3d{3864, 5024, 6594}, 9),
NewRLE(Point3d{3880, 5024, 6594}, 63),
NewRLE(Point3d{3864, 5025, 6594}, 9),
NewRLE(Point3d{3880, 5025, 6594}, 64),
NewRLE(Point3d{3864, 5026, 6594}, 9),
NewRLE(Point3d{3880, 5026, 6594}, 65),
NewRLE(Point3d{3864, 5027, 6594}, 9),
NewRLE(Point3d{3879, 5027, 6594}, 66),
NewRLE(Point3d{3864, 5028, 6594}, 10),
NewRLE(Point3d{3879, 5028, 6594}, 66),
NewRLE(Point3d{3864, 5029, 6594}, 10),
NewRLE(Point3d{3880, 5029, 6594}, 65),
NewRLE(Point3d{3863, 5030, 6594}, 10),
NewRLE(Point3d{3880, 5030, 6594}, 65),
NewRLE(Point3d{3863, 5031, 6594}, 10),
NewRLE(Point3d{3881, 5031, 6594}, 64),
NewRLE(Point3d{3863, 5032, 6594}, 10),
NewRLE(Point3d{3881, 5032, 6594}, 62),
NewRLE(Point3d{3862, 5033, 6594}, 11),
NewRLE(Point3d{3881, 5033, 6594}, 62),
NewRLE(Point3d{3862, 5034, 6594}, 12),
NewRLE(Point3d{3882, 5034, 6594}, 61),
NewRLE(Point3d{3861, 5035, 6594}, 13),
NewRLE(Point3d{3882, 5035, 6594}, 61),
NewRLE(Point3d{3861, 5036, 6594}, 13),
NewRLE(Point3d{3882, 5036, 6594}, 61),
NewRLE(Point3d{3860, 5037, 6594}, 13),
NewRLE(Point3d{3882, 5037, 6594}, 61),
NewRLE(Point3d{3860, 5038, 6594}, 14),
NewRLE(Point3d{3883, 5038, 6594}, 60),
NewRLE(Point3d{3860, 5039, 6594}, 16),
NewRLE(Point3d{3884, 5039, 6594}, 59),
NewRLE(Point3d{3858, 5040, 6594}, 21),
NewRLE(Point3d{3885, 5040, 6594}, 56),
NewRLE(Point3d{3858, 5041, 6594}, 22),
NewRLE(Point3d{3885, 5041, 6594}, 56),
NewRLE(Point3d{3858, 5042, 6594}, 22),
NewRLE(Point3d{3885, 5042, 6594}, 55),
NewRLE(Point3d{3859, 5043, 6594}, 22),
NewRLE(Point3d{3884, 5043, 6594}, 56),
NewRLE(Point3d{3858, 5044, 6594}, 24),
NewRLE(Point3d{3884, 5044, 6594}, 56),
NewRLE(Point3d{3858, 5045, 6594}, 25),
NewRLE(Point3d{3884, 5045, 6594}, 54),
NewRLE(Point3d{3859, 5046, 6594}, 78),
NewRLE(Point3d{3859, 5047, 6594}, 77),
NewRLE(Point3d{3858, 5048, 6594}, 78),
NewRLE(Point3d{3858, 5049, 6594}, 76),
NewRLE(Point3d{3858, 5050, 6594}, 76),
NewRLE(Point3d{3858, 5051, 6594}, 75),
NewRLE(Point3d{3859, 5052, 6594}, 72),
NewRLE(Point3d{3859, 5053, 6594}, 72),
NewRLE(Point3d{3860, 5054, 6594}, 70),
NewRLE(Point3d{3861, 5055, 6594}, 68),
NewRLE(Point3d{3862, 5056, 6594}, 66),
NewRLE(Point3d{3862, 5057, 6594}, 64),
NewRLE(Point3d{3862, 5058, 6594}, 63),
NewRLE(Point3d{3864, 5059, 6594}, 61),
NewRLE(Point3d{3863, 5060, 6594}, 61),
NewRLE(Point3d{3863, 5061, 6594}, 60),
NewRLE(Point3d{3864, 5062, 6594}, 59),
NewRLE(Point3d{3865, 5063, 6594}, 56),
NewRLE(Point3d{3866, 5064, 6594}, 55),
NewRLE(Point3d{3868, 5065, 6594}, 53),
NewRLE(Point3d{3868, 5066, 6594}, 52),
NewRLE(Point3d{3869, 5067, 6594}, 1),
NewRLE(Point3d{3871, 5067, 6594}, 49),
NewRLE(Point3d{3871, 5068, 6594}, 2),
NewRLE(Point3d{3875, 5068, 6594}, 43),
NewRLE(Point3d{3872, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5069, 6594}, 38),
NewRLE(Point3d{3916, 5069, 6594}, 1),
NewRLE(Point3d{3876, 5070, 6594}, 36),
NewRLE(Point3d{3877, 5071, 6594}, 35),
NewRLE(Point3d{3879, 5072, 6594}, 27),
NewRLE(Point3d{3881, 5073, 6594}, 22),
NewRLE(Point3d{3883, 5074, 6594}, 18),
NewRLE(Point3d{3884, 5075, 6594}, 13),
NewRLE(Point3d{3887, 5076, 6594}, 7),
}
)
|
package main
import (
"bytes"
"encoding/json"
"github.com/ixchi/foxbot/bot"
"github.com/syfaro/haste-client"
"github.com/syfaro/telegram-bot-api"
"os"
"strconv"
)
type pluginUtils struct {
}
func (plugin *pluginUtils) Name() string {
return "General utilities"
}
func (plugin *pluginUtils) displayMyID(handler foxbot.Handler) error {
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, strconv.Itoa(handler.Update.Message.From.ID))
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err := handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) displayChatID(handler foxbot.Handler) error {
text := strconv.Itoa(handler.Update.Message.Chat.ID)
text += "\ngroup: " + strconv.FormatBool(handler.Update.Message.IsGroup())
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, text)
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err := handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) pasteRawJSON(handler foxbot.Handler) error {
handler.API.SendChatAction(tgbotapi.NewChatAction(handler.Update.Message.MessageID, tgbotapi.ChatTyping))
data, err := json.MarshalIndent(*handler.Update, "", " ")
if err != nil {
return err
}
hasteClient := haste.NewHaste("http://paste.syfaro.net")
resp, err := hasteClient.UploadBytes(data)
if err != nil {
return err
}
link := resp.GetLink(hasteClient)
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, link)
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err = handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) pasteEnvVariables(handler foxbot.Handler) error {
var text bytes.Buffer
for _, v := range os.Environ() {
text.WriteString(v)
text.WriteString("\n")
}
hasteClient := haste.NewHaste("http://paste.syfaro.net")
resp, err := hasteClient.UploadString(text.String())
if err != nil {
return err
}
link := resp.GetLink(hasteClient)
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, link)
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err = handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) displayAdminStatus(handler foxbot.Handler) error {
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, strconv.FormatBool(bot.Admins[handler.Update.Message.From.ID]))
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err := handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) displaySeenUsers(handler foxbot.Handler) error {
var text bytes.Buffer
for _, user := range bot.Users {
text.WriteString(user.String())
text.WriteString(" - ")
text.WriteString(strconv.FormatBool(bot.Admins[user.ID]))
text.WriteString("\n")
}
msg := tgbotapi.NewMessage(handler.Update.Message.Chat.ID, text.String())
msg.ReplyToMessageID = handler.Update.Message.MessageID
_, err := handler.API.SendMessage(msg)
return err
}
func (plugin *pluginUtils) GetCommands() []*foxbot.Command {
return []*foxbot.Command{
&foxbot.Command{
Name: "My ID",
Help: "Displays your ID",
Example: "/myid",
Command: "myid",
Handler: plugin.displayMyID,
},
&foxbot.Command{
Name: "Current chat ID",
Help: "Displays current chat ID",
Example: "/chatid",
Command: "chatid",
Handler: plugin.displayChatID,
},
&foxbot.Command{
Name: "Raw update JSON",
Help: "Displays raw JSON version of the update event for this message",
Example: "/updatejson",
Command: "updatejson",
Admin: true,
Handler: plugin.pasteRawJSON,
},
&foxbot.Command{
Name: "Environment",
Help: "Displays environment info",
Example: "/env",
Command: "env",
Admin: true,
Handler: plugin.pasteEnvVariables,
},
&foxbot.Command{
Name: "Display admin status",
Help: "Displays if a user is an admin",
Example: "/isadmin",
Command: "isadmin",
Handler: plugin.displayAdminStatus,
},
&foxbot.Command{
Name: "Seen users",
Help: "Displays users the bot has seen",
Example: "/seen",
Command: "seen",
Admin: true,
Handler: plugin.displaySeenUsers,
},
}
}
|
package idservice_test
import (
"fmt"
"log"
"net/http"
"gopkg.in/errgo.v1"
"gopkg.in/macaroon-bakery.v0/bakery"
"gopkg.in/macaroon-bakery.v0/bakery/checkers"
"gopkg.in/macaroon-bakery.v0/httpbakery"
)
type targetServiceHandler struct {
svc *bakery.Service
authEndpoint string
endpoint string
mux *http.ServeMux
}
// targetService implements a "target service", representing
// an arbitrary web service that wants to delegate authorization
// to third parties.
func targetService(endpoint, authEndpoint string, authPK *bakery.PublicKey) (http.Handler, error) {
key, err := bakery.GenerateKey()
if err != nil {
return nil, err
}
pkLocator := bakery.NewPublicKeyRing()
svc, err := bakery.NewService(bakery.NewServiceParams{
Key: key,
Location: endpoint,
Locator: pkLocator,
})
if err != nil {
return nil, err
}
log.Printf("adding public key for location %s: %v", authEndpoint, authPK)
pkLocator.AddPublicKeyForLocation(authEndpoint, true, authPK)
mux := http.NewServeMux()
srv := &targetServiceHandler{
svc: svc,
authEndpoint: authEndpoint,
}
mux.HandleFunc("/gold/", srv.serveGold)
mux.HandleFunc("/silver/", srv.serveSilver)
return mux, nil
}
func (srv *targetServiceHandler) serveGold(w http.ResponseWriter, req *http.Request) {
checker := srv.checkers(req, "gold")
if _, err := httpbakery.CheckRequest(srv.svc, req, nil, checker); err != nil {
srv.writeError(w, "gold", err)
return
}
fmt.Fprintf(w, "all is golden")
}
func (srv *targetServiceHandler) serveSilver(w http.ResponseWriter, req *http.Request) {
checker := srv.checkers(req, "silver")
if _, err := httpbakery.CheckRequest(srv.svc, req, nil, checker); err != nil {
srv.writeError(w, "silver", err)
return
}
fmt.Fprintf(w, "every cloud has a silver lining")
}
// checkers implements the caveat checking for the service.
func (svc *targetServiceHandler) checkers(req *http.Request, operation string) checkers.Checker {
return checkers.CheckerFunc{
Condition_: "operation",
Check_: func(_, op string) error {
if op != operation {
return fmt.Errorf("macaroon not valid for operation")
}
return nil
},
}
}
// writeError writes an error to w. If the error was generated because
// of a required macaroon that the client does not have, we mint a
// macaroon that, when discharged, will grant the client the
// right to execute the given operation.
//
// The logic in this function is crucial to the security of the service
// - it must determine for a given operation what caveats to attach.
func (srv *targetServiceHandler) writeError(w http.ResponseWriter, operation string, verr error) {
fail := func(code int, msg string, args ...interface{}) {
if code == http.StatusInternalServerError {
msg = "internal error: " + msg
}
http.Error(w, fmt.Sprintf(msg, args...), code)
}
if _, ok := errgo.Cause(verr).(*bakery.VerificationError); !ok {
fail(http.StatusForbidden, "%v", verr)
return
}
// Work out what caveats we need to apply for the given operation.
// Could special-case the operation here if desired.
caveats := []checkers.Caveat{{
Location: srv.authEndpoint,
Condition: "member-of-group target-service-users",
}, {
Condition: "operation " + operation,
}}
// Mint an appropriate macaroon and send it back to the client.
m, err := srv.svc.NewMacaroon("", nil, caveats)
if err != nil {
fail(http.StatusInternalServerError, "cannot mint macaroon: %v", err)
return
}
httpbakery.WriteDischargeRequiredError(w, m, verr)
}
|
package main
import "strings"
const (
NONCOMMAND = "non-command"
SIGNUP = "signup:"
SIGNIN = "signin:"
SIGNOUT = "signout"
TOUSER = "to user:"
TOGROUP = "to group:"
CREATEGROUP = "create group:"
JOINGROUP = "join group:"
INVITEGROUP = "invite group:"
RESTORENOTES = "restore:"
CLOSE = "close" //system replace
NULL = ""
)
var cmds = [...]string{SIGNUP, SIGNIN, SIGNOUT, TOUSER, TOGROUP, CREATEGROUP, JOINGROUP, INVITEGROUP, RESTORENOTES, CLOSE}
type parser struct {
cmdMap map[int][]string //length and command
minLen int
}
func newParser() *parser {
cmdMap := make(map[int][]string)
minLen := 0
for _, cmd := range cmds {
if len(cmd) < minLen {
minLen = len(cmd)
}
arr, ok := cmdMap[len(cmd)]
if ok {
cmdMap[len(cmd)] = append(arr, cmd)
continue
}
arr = make([]string, 0, len(cmds))
arr = append(arr, cmd)
cmdMap[len(cmd)] = arr
}
return &parser{cmdMap: cmdMap, minLen: minLen}
}
func (p *parser) parse() {
for i := 0; i < 100; i++ {
go func() {
for {
select {
case message := <-getQueue().pullUp():
if message.mtype == CLOSED {
getSessionStatesIndex().dispatch(message.chid, CLOSE, NULL)
continue
}
cmd, suffix := p.split(message.data)
getSessionStatesIndex().dispatch(message.chid, cmd, suffix)
}
}
}()
}
}
func (p *parser) split(str string) (string, string) {
if len(str) < p.minLen {
return NONCOMMAND, str
}
for k, v := range p.cmdMap {
if len(str) < k {
continue
}
for _, cmd := range v {
if str[0:k] == cmd {
return cmd, strings.TrimSpace(str[k:len(str)])
}
}
}
return NONCOMMAND, strings.TrimSpace(str)
}
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"github.com/josetom/go-chain/common"
"github.com/josetom/go-chain/core"
"github.com/josetom/go-chain/node"
"github.com/spf13/cobra"
)
const flagFrom = "from"
const flagTo = "to"
const flagValue = "value"
const flagData = "data"
func txCmd() *cobra.Command {
var txsCmd = &cobra.Command{
Use: "tx",
Short: "Interact with txs (add...).",
PreRunE: func(cmd *cobra.Command, args []string) error {
return incorrectUsageErr()
},
Run: func(cmd *cobra.Command, args []string) {
},
}
txsCmd.AddCommand(txAddCmd())
return txsCmd
}
func txAddCmd() *cobra.Command {
var cmd = &cobra.Command{
Use: "add",
Short: "Adds new TX to database.",
Run: func(cmd *cobra.Command, args []string) {
from, _ := cmd.Flags().GetString(flagFrom)
to, _ := cmd.Flags().GetString(flagTo)
value, _ := cmd.Flags().GetUint(flagValue)
data, _ := cmd.Flags().GetString(flagData)
fromAcc := common.NewAddress(from)
toAcc := common.NewAddress(to)
url := fmt.Sprintf(
"%s%s",
node.Config.Http.Host,
node.RequestTransactions,
)
body := &core.TransactionData{
From: fromAcc,
To: toAcc,
Value: value,
Data: data,
}
payloadBuf := new(bytes.Buffer)
json.NewEncoder(payloadBuf).Encode(body)
res, err := http.Post(url, "application/json", payloadBuf)
if err != nil {
log.Panicln(err)
}
txnRes := core.Transaction{}
node.ReadRes(res, &txnRes)
log.Println("TX successfully added to the ledger.", txnRes.TxnHash)
},
}
cmd.Flags().String(flagFrom, "", "From what address to send tokens")
cmd.MarkFlagRequired(flagFrom)
cmd.Flags().String(flagTo, "", "To what address to send tokens")
cmd.MarkFlagRequired(flagTo)
cmd.Flags().Uint(flagValue, 0, "How many tokens to send")
cmd.MarkFlagRequired(flagValue)
cmd.Flags().String(flagData, "", "Transaction data")
return cmd
}
|
package selector
import "context"
// SelectOptions is Select Options.
type SelectOptions struct {
Filters []Filter
}
// SelectOption is Selector option.
type SelectOption func(*SelectOptions)
// Filter is node filter function.
type Filter func(context.Context, []Node) []Node
// WithFilter with filter options
func WithFilter(fn ...Filter) SelectOption {
return func(opts *SelectOptions) {
opts.Filters = fn
}
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"strconv"
)
var version = "undefined"
func main() {
pretty := flag.Bool("p", false, "pretty print")
escape := flag.Bool("e", false, "quote print")
vv := flag.Bool("v", false, "print version")
flag.Parse()
if *vv {
fmt.Fprintln(os.Stdout, version)
os.Exit(0)
}
var readFromStdinFlag bool
var bs []byte
if flag.NArg() < 1 {
inBS, err := ioutil.ReadAll(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "read from stdin fail: %v\n", err)
os.Exit(2)
}
bs = inBS
readFromStdinFlag = true
} else {
filepath := flag.Arg(0)
fileBS, err := ioutil.ReadFile(filepath)
if err != nil {
fmt.Fprintf(os.Stderr, "read file fail: %v\n", err)
os.Exit(2)
}
bs = fileBS
}
var jsonObj interface{} = make(map[string]interface{})
if len(bs) > 0 && bs[0] == '[' {
jsonObj = make([]map[string]interface{}, 0)
}
if err := json.Unmarshal(bs, &jsonObj); err != nil {
fmt.Fprintf(os.Stderr, "parse json fail: %v\n", err)
os.Exit(3)
}
if *pretty {
r, err := json.MarshalIndent(jsonObj, "", " ")
if err != nil {
fmt.Fprintf(os.Stderr, "print json fail: %v\n", err)
os.Exit(4)
}
if readFromStdinFlag {
fmt.Fprintln(os.Stdout)
}
fmt.Fprintf(os.Stdout, "%s\n", string(r))
os.Exit(0)
} else {
r, err := json.Marshal(jsonObj)
if err != nil {
fmt.Fprintf(os.Stderr, "print json fail: %v\n", err)
os.Exit(5)
}
s := string(r)
if *escape {
s = strconv.Quote(s)
}
if readFromStdinFlag {
fmt.Fprintln(os.Stdout)
}
fmt.Fprintf(os.Stdout, "%s\n", s)
os.Exit(0)
}
}
|
package util
func StringValue(p *string) string {
if p == nil {
return ""
}
return *p
}
|
package action
import (
"encoding/json"
"fmt"
"strings"
"github.com/iris-contrib/blackfriday"
"github.com/microcosm-cc/bluemonday"
"github.com/mylxsw/adanos-alert/configs"
"github.com/mylxsw/adanos-alert/internal/repository"
"github.com/mylxsw/adanos-alert/pkg/messager/email"
"github.com/mylxsw/asteria/log"
"github.com/mylxsw/glacier/infra"
"github.com/mylxsw/go-utils/array"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type EmailAction struct {
manager Manager
client *email.Client
}
// EmailMeta 邮件发送元数据
type EmailMeta struct {
Template string `json:"template"`
}
func (e EmailAction) Validate(meta string, userRefs []string) error {
return nil
}
func NewEmailAction(manager Manager, conf *configs.Config) *EmailAction {
client := email.NewClient(
conf.EmailSMTP.Host,
conf.EmailSMTP.Port,
conf.EmailSMTP.Username,
conf.EmailSMTP.Password,
)
return &EmailAction{manager: manager, client: client}
}
func (e EmailAction) Handle(rule repository.Rule, trigger repository.Trigger, grp repository.EventGroup) error {
var meta EmailMeta
if err := json.Unmarshal([]byte(trigger.Meta), &meta); err != nil {
return fmt.Errorf("parse email meta failed: %v", err)
}
return e.manager.Resolve(func(resolver infra.Resolver, conf *configs.Config, msgRepo repository.EventRepo, userRepo repository.UserRepo) error {
payload, summary := createPayloadAndSummary(e.manager, "email", conf, msgRepo, rule, trigger, grp)
if strings.TrimSpace(meta.Template) != "" {
summary = parseTemplate(e.manager, meta.Template, payload)
}
summary = string(bluemonday.UGCPolicy().SanitizeBytes(blackfriday.Run([]byte(summary))))
emails := extractEmailsFromUserRefs(userRepo, getUserRefs(resolver, trigger, grp, msgRepo))
if err := e.client.Send(rule.Name, summary, emails...); err != nil {
log.WithFields(log.Fields{
"subject": rule.Name,
"body": summary,
"emails": emails,
"err": err,
}).Errorf("send message to email failed: %v", err)
return err
}
if log.DebugEnabled() {
log.WithFields(log.Fields{
"title": rule.Name,
}).Debug("send message to email succeed")
}
return nil
})
}
func extractEmailsFromUserRefs(userRepo repository.UserRepo, userRefs []primitive.ObjectID) []string {
if len(userRefs) == 0 {
return []string{}
}
users, err := userRepo.Find(bson.M{"_id": bson.M{"$in": userRefs}})
if err != nil {
log.WithFields(log.Fields{
"err": err.Error(),
"userRefs": userRefs,
}).Errorf("load user from repo failed: %s", err)
return []string{}
}
userFilterFunc := func(user repository.User, _ int) bool {
if user.Email != "" {
return true
}
for _, m := range user.Metas {
if strings.ToLower(m.Key) == "email" {
return true
}
}
return false
}
userMapFunc := func(user repository.User, _ int) string {
if user.Email != "" {
return user.Email
}
for _, m := range user.Metas {
if strings.ToLower(m.Key) == "email" {
return m.Value
}
}
return ""
}
return array.Map(array.Filter(users, userFilterFunc), userMapFunc)
}
|
package utils
type TreeNode struct {
Val int
Left *TreeNode
Right *TreeNode
}
//Hierarchical traversal
//[1,2,5,3,4,null,6]
|
package exchange
import (
"github.com/stretchr/testify/assert"
"log"
"testing"
)
func TestBittrex_SetPairs(t *testing.T) {
bittrex := Bittrex{}
bittrex.SetPairs()
pairs := bittrex.GetConfig().Pairs
assert.Contains(t, pairs, &Pair{"BTC", "ETH"})
assert.Contains(t, pairs, &Pair{"BTC", "LTC"})
}
func TestBittrex_GetResponse(t *testing.T) {
bittrex := Bittrex{}
price, err := bittrex.GetResponse("BTC", "ETH")
if err != nil {
log.Fatal(err)
}
assert.True(t, price.Price > 0, "price from Bittrex isn't greater than 0")
assert.True(t, price.Volume > 0, "volume from Bittrex isn't greater than 0")
}
|
package middlewares
import (
"net/http"
"github.com/julienschmidt/httprouter"
)
func AuthMiddleware(handler httprouter.Handle) httprouter.Handle {
return httprouter.Handle(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
// Authorization: Bearer <token>
// authHeader, ok := r.Header["Authorization"]
// if !ok {
// w.WriteHeader(403)
// fmt.Fprintf(w, "Access denied")
// return
// }
// fmt.Println("auth header:", authHeader)
handler(w, r, p)
})
}
|
package manifest
import (
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/ghodss/yaml"
)
type Entry struct {
Path string `json:"path" jsonschema_description:"relative storage path"`
MD5 string `json:"md5" jsonschema_description:"MD5 of file"`
Source string `json:"source" jsonschema_description:"URL of original download"`
Timestamp string `json:"timestamp" jsonschema_description:"timestamp of file"`
realPath string
}
type File struct {
Entries []Entry
path string
}
func Load(relpath string) (File, error) {
// Try to get absolute path. If it fails, fall back to relative path.
path, abserr := filepath.Abs(relpath)
if abserr != nil {
path = relpath
}
// Read file
source, err := ioutil.ReadFile(path)
if err != nil {
return File{}, fmt.Errorf("failed to read config at path %s: \n%v", path, err)
}
entlist := []Entry{}
err = yaml.Unmarshal(source, &entlist)
if err != nil {
return File{}, fmt.Errorf("failed to parse config at path %s: \n%v", path, err)
}
baseDir := filepath.Dir(path)
for i := range entlist {
entlist[i].realPath = filepath.Join(baseDir, entlist[i].Path)
}
return File{entlist, path}, nil
}
// ParseDataFile parses input file
func ParseDataFile(path string, data *map[string]interface{}) error {
raw, err := ioutil.ReadFile(path)
if err != nil {
return fmt.Errorf("failed to read data at path %s: \n%v", path, err)
}
return yaml.Unmarshal(raw, data)
}
func (e Entry) Exists() bool {
_, err := os.Stat(e.realPath)
return !os.IsNotExist(err)
}
func (e Entry) CalcMD5() (string, error) {
f, err := os.Open(e.realPath)
if err != nil {
return "", err
}
defer f.Close()
h := md5.New()
if _, err := io.Copy(h, f); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
|
package main
import "fmt"
// ***************************************************
// by default channels are unbuffered
// i.e., they will only accept sends (chan <-)
// only if there is receive (<- chan) ready to
// receive the send value.
// Buffered channels accept a limited number of values
// without a corresponding receiver for those values
// ***************************************************
func main() {
// channel of strings buffering upto 2 values
msg := make(chan string, 2)
msg <- "India"
msg <- "Israel"
fmt.Println(<-msg)
fmt.Println(<-msg)
}
|
package anime
import (
"context"
"reflect"
"testing"
"github.com/DATA-DOG/go-sqlmock"
)
func TestRepository(t *testing.T) {
t.Run("GetAnimes(limit = 2, offset = 0) returns []Anime{{...}, {...}}", func(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Errorf("An error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
animes := []Anime{
{ID: 1, Title: "Tokyo Ghoul", Description: "Testing...", Plot: "Testing..."},
{ID: 2, Title: "Death Parade", Description: "Testing...", Plot: "Testing..."},
}
rows := sqlmock.NewRows([]string{"id", "title", "description", "plot"}).
AddRow(animes[0].ID, animes[0].Title, animes[0].Description, animes[0].Plot).
AddRow(animes[1].ID, animes[1].Title, animes[1].Description, animes[1].Plot)
mock.ExpectQuery("^SELECT (.+) FROM animes LIMIT (.+) OFFSET (.+)$").
WithArgs(2, 0).
WillReturnRows(rows)
repo := mySQLRepository{db}
got, err := repo.GetAnimes(context.Background(), 2, 0)
if err != nil {
t.Fatalf("unable to process your request %v", err)
}
want := animes
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v want %v", got, want)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
t.Run("GetAnimes(limit = 2, offset = 1) returns []Anime{{...}, {...}}", func(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Errorf("An error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
animes := []Anime{
{ID: 1, Title: "Tokyo Ghoul", Description: "Testing...", Plot: "Testing..."},
}
rows := sqlmock.NewRows([]string{"id", "title", "description", "plot"}).
AddRow(animes[0].ID, animes[0].Title, animes[0].Description, animes[0].Plot)
mock.ExpectQuery("^SELECT (.+) FROM animes LIMIT (.+) OFFSET (.+)$").
WithArgs(2, 1).
WillReturnRows(rows)
repo := mySQLRepository{db}
got, err := repo.GetAnimes(context.Background(), 2, 1)
if err != nil {
t.Fatalf("unable to process your request %v", err)
}
want := animes
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v want %v", got, want)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
t.Run("GetAnime(id = 1) returns &Anime{}", func(t *testing.T) {
db, mock, err := sqlmock.New()
if err != nil {
t.Errorf("An error '%s' was not expected when opening a stub database connection", err)
}
defer db.Close()
anime := &Anime{
ID: 1,
Title: "Tokyo Ghoul",
Description: "Testing...",
Plot: "Testing...",
}
rows := sqlmock.NewRows([]string{"id", "title", "description", "plot"}).
AddRow(anime.ID, anime.Title, anime.Description, anime.Plot)
mock.ExpectQuery("^SELECT (.+) FROM animes WHERE id = (.+)$").
WithArgs(1).
WillReturnRows(rows)
repo := mySQLRepository{db}
got, err := repo.GetAnime(context.Background(), 1)
if err != nil {
t.Fatalf("unable to process your request %v", err)
}
want := anime
if !reflect.DeepEqual(got, want) {
t.Errorf("got %v want %v", got, want)
}
if err := mock.ExpectationsWereMet(); err != nil {
t.Errorf("there were unfulfilled expectations: %s", err)
}
})
}
|
package main
//55. 跳跃游戏
//给定一个非负整数数组,你最初位于数组的第一个位置。
//
//数组中的每个元素代表你在该位置可以跳跃的最大长度。
//
//判断你是否能够到达最后一个位置。
//
//示例1:
//
//输入: [2,3,1,1,4]
//输出: true
//解释: 我们可以先跳 1 步,从位置 0 到达 位置 1, 然后再从位置 1 跳 3 步到达最后一个位置。
//示例2:
//
//输入: [3,2,1,0,4]
//输出: false
//解释: 无论怎样,你总会到达索引为 3 的位置。但该位置的最大跳跃长度是 0 , 所以你永远不可能到达最后一个位置。
//思路 动态规划
func canJump(nums []int) bool {
n := len(nums)
dp := make([]bool, n)
dp[n-1] = true
for i := n - 2; i > -1; i-- {
for j := 1; j <= nums[i] && i+j < n; j++ {
dp[i] = dp[i] || dp[i+j]
}
}
return dp[0]
}
//思路 贪心算法
func canJump(nums []int) bool {
n := len(nums)
result := 0
for i := 0; i < n; i++ {
if i <= result {
result = max(result, nums[i]+i)
if result >= n-1 {
return true
}
}
}
return false
}
func max(x, y int) int {
if x > y {
return x
}
return y
}
func main() {
println(canJump([]int{2, 3, 1, 1, 4}))
}
|
// Copyright (c) 2014 ZeroStack Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package datastore
import (
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/zerostackinc/dbconn"
"github.com/zerostackinc/util"
)
// NewDeleteQuery returns a DeleteQuery instance given a typ. Typ needs to be
// a struct with column definitions with CQL tags. Refer to datastore_test
// for examples.
func NewDeleteQuery(typ reflect.Type) (*DeleteQuery, error) {
codec, err := getStructCodec(typ)
if err != nil {
return nil, err
}
return &DeleteQuery{
codec: codec,
deleteCols: []string{},
}, nil
}
// DeleteQuery represent a CQL update query.
type DeleteQuery struct {
filter []filter
deleteCols []string
codec *structCodec
err error
}
func (q *DeleteQuery) clone() *DeleteQuery {
x := *q
if len(q.filter) > 0 {
x.filter = make([]filter, len(q.filter))
copy(x.filter, q.filter)
}
if len(q.deleteCols) > 0 {
x.deleteCols = make([]string, len(q.deleteCols))
copy(x.deleteCols, q.deleteCols)
}
return &x
}
// Filter returns a derivative query with a field-based filter.
// Args : The field name (with optional space), Operator, and value.
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
func (q *DeleteQuery) Filter(field string, op Operator,
value interface{}) *DeleteQuery {
q = q.clone()
field = strings.TrimSpace(field)
if len(field) < 1 {
q.err = errors.New("datastore: invalid filter: " + field)
return q
}
// IN expects values as Slice.
if op == IN && reflect.TypeOf(value).Kind() != reflect.Slice {
q.err = errors.New("datastore: invalid IN :: " + "filter " + field +
", expects Slice got " + reflect.TypeOf(value).Kind().String())
return q
}
f := filter{
FieldName: field,
Op: op,
Value: value,
}
q.filter = append(q.filter, f)
return q
}
// AddColumnsToDelete adds given fieldName to the list of columns that will be
// deleted when query is executed. Empty fieldnames will delete the entire row.
func (q *DeleteQuery) AddColumnsToDelete(fieldNames ...string) *DeleteQuery {
q = q.clone()
q.deleteCols = append([]string(nil), fieldNames...)
return q
}
func (q *DeleteQuery) toCQL() (cql string, args []interface{}, err error) {
codec := q.codec
columnStr := ""
if len(q.deleteCols) > 0 {
columnStr = strings.Join(q.deleteCols, ",")
}
cql = fmt.Sprintf("DELETE %s FROM %s", columnStr, codec.columnFamily)
whereClause, whereArgs, err := getWhereClause(q.codec, q.filter)
if err != nil {
return "", whereArgs, err
}
cql = cql + whereClause
args = append(args, whereArgs...)
return cql, args, nil
}
// CQL returns the CQL query statement corresponding to the update query.
func (q *DeleteQuery) CQL() (string, error) {
cql, _, err := q.toCQL()
return cql, err
}
// Run executes the DeleteQuery.
func (q *DeleteQuery) Run(dbConn dbconn.DBConn) error {
defer util.LogExecutionTime(1, time.Now(), "datastore:delete_query:run")
session := dbConn.GetSession()
if session == nil {
return fmt.Errorf("invalid session")
}
defer dbConn.ReleaseSession(session)
cql, args, err := q.toCQL()
if err != nil {
return err
}
cqlQ := session.Query(cql, args...)
return cqlQ.Exec()
}
|
/*
* OFAC API
*
* OFAC (Office of Foreign Assets Control) API is designed to facilitate the enforcement of US government economic sanctions programs required by federal law. This project implements a modern REST HTTP API for companies and organizations to obey federal law and use OFAC data in their applications.
*
* API version: v1
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package openapi
// BIS Denied Persons List item
type Dpl struct {
// Name of the Denied Person
Name string `json:"name,omitempty"`
// Denied Person's street address
StreetAddress string `json:"streetAddress,omitempty"`
// Denied Person's city
City string `json:"city,omitempty"`
// Denied Person's state
State string `json:"state,omitempty"`
// Denied Person's country
Country string `json:"country,omitempty"`
// Denied Person's postal code
PostalCode string `json:"postalCode,omitempty"`
// Date when denial came into effect
EffectiveDate string `json:"effectiveDate,omitempty"`
// Date when denial expires, if blank denial never expires
ExpirationDate string `json:"expirationDate,omitempty"`
// Denotes whether or not the Denied Person was added by a standard order
StandardOrder string `json:"standardOrder,omitempty"`
// Most recent date when the Denied Person record was updated
LastUpdate string `json:"lastUpdate,omitempty"`
// Most recent action taken regarding the denial
Action string `json:"action,omitempty"`
// Reference to the order's citation in the Federal Register
FrCitation string `json:"frCitation,omitempty"`
Match float32 `json:"match,omitempty"`
}
|
package main
import "fmt"
//map _,ok:= m[V]
func main() {
srcArr:=[]string{"red", "black", "red", "pink", "blue", "pink", "blue"}
dst:=deleteRepeatElement(srcArr)
fmt.Println(dst)
dst2:=deleteEmptyStringByMap(srcArr)
fmt.Println(dst2)
}
func deleteEmptyStringByMap(src []string)(dst []string) {
m:=make(map[string]int)
for _,v:= range src{
m[v]=0
}
for k:=range m{
dst= append(dst,k)
}
return
}
func deleteRepeatElement(src []string)(dst[]string) {
for _,v:=range src{
flag:=false
//先判断输出的slice里面有没有
for _,str:=range dst{
//设置一个flag
if str==v {
flag=true
}
}
//没有就append
if !flag {
dst = append(dst,v)
}
}
return
}
|
package store
import "github.com/skoltai/limithandling/domain"
// AppRepository specifies the possible interactions with Apps
type AppRepository interface {
Create(app App) int
Get(id int) (App, error)
Update(app App) bool
All() []App
LimitOverrides(id int) []domain.Limit
}
// SimpleAppRepository implements a simple, in-memory AppRepository
type SimpleAppRepository struct {
store *MemoryStore
}
// NewSimpleAppRepository is a constructor for SimpleAppRepository
func NewSimpleAppRepository(store *MemoryStore) AppRepository {
return &SimpleAppRepository{store: store}
}
// Create creates an App record and returns with the ID
func (r *SimpleAppRepository) Create(app App) int {
return r.store.Apps.create(app)
}
// Get retrieves an App by ID or returns an empty App and an error if it can't be found
func (r *SimpleAppRepository) Get(id int) (App, error) {
return r.store.Apps.get(id)
}
// Update updates an App and returns wether the update was successful
func (r *SimpleAppRepository) Update(app App) bool {
return r.store.Apps.update(app)
}
// All returns all stored Apps for testing purposes
func (r *SimpleAppRepository) All() []App {
apps := make([]App, 0)
for _, a := range r.store.Apps.items {
apps = append(apps, a)
}
return apps
}
// LimitOverrides retrieves LimitOverrides by app ID
func (r *SimpleAppRepository) LimitOverrides(id int) []domain.Limit {
limits := r.store.LimitOverrides.filter(func(l LimitOverride) bool {
return l.AppID == id
})
res := make([]domain.Limit, 0)
for _, l := range limits {
res = append(res, l.Limit)
}
return res
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package hypervisor
import (
"context"
"chromiumos/tast/remote/hypervisor"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: IsManatee,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Verify that manatee detection is accurate",
Contacts: []string{"psoberoi@google.com", "manateam@google.com"},
Attr: []string{"group:mainline"},
Params: []testing.Param{{
Name: "without_manatee",
ExtraSoftwareDeps: []string{"no_manatee"},
Val: false,
}, {
Name: "with_manatee",
ExtraSoftwareDeps: []string{"manatee"},
Val: true,
}},
})
}
func IsManatee(ctx context.Context, s *testing.State) {
manateeExpected := s.Param().(bool)
d := s.DUT()
manatee, err := hypervisor.IsManatee(ctx, d)
if err != nil {
s.Fatal("WARNING: Failed to check for ManaTEE: ", err)
}
if manatee != manateeExpected {
s.Errorf("Unexpected result from IsManatee: %v (expected %v)", manatee, manateeExpected)
}
}
|
package glutton
import (
"context"
"fmt"
"io"
"net"
"net/url"
"strings"
"github.com/reiver/go-telnet"
log "go.uber.org/zap"
)
//telnetProxy Struct
type telnetProxy struct {
logger *log.Logger
curConn net.Conn
proxyclient *telnet.Client
hostconn *telnet.Conn
glutton *Glutton
host string
}
//NewTelnetProxy Create a new Telnet Proxy Session
func (g *Glutton) NewTelnetProxy(destinationURL string) (err error) {
t := &telnetProxy{
logger: g.logger,
glutton: g,
}
dest, err := url.Parse(destinationURL)
if err != nil {
t.logger.Error("[telnet.prxy] failed to parse destination address, check config.yaml")
return err
}
t.logger.Info(fmt.Sprintf("[telnet proxy] %v", dest.Host))
t.host = dest.Host
g.telnetProxy = t
return
}
//handle Telnet Proxy handler
func (t *telnetProxy) handle(ctx context.Context, conn net.Conn) (err error) {
ended := false
g := t.glutton
tcpAddr, err := net.ResolveTCPAddr("tcp", t.host)
if err != nil {
t.logger.Error(fmt.Sprintf("ResolveTCPAddr failed: %v", err.Error()))
return
}
hconn, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
t.logger.Error(fmt.Sprintf("[telnet proxy ] Connection error: %v", err))
return
}
go func() {
defer func() {
ended = true
conn.Close()
hconn.Close()
}()
for {
if ended == true || hconn == nil || conn == nil {
break
}
reply := make([]byte, 8*1024)
if _, err = hconn.Read(reply); err != nil {
if err == io.EOF {
t.logger.Error(fmt.Sprintf("[telnet proxy ] Connection closed by Server"))
break
}
t.logger.Error(fmt.Sprintf("[telnet proxy ] error: %v", err))
break
}
t.logger.Info(fmt.Sprintf("[telnet proxy ] Info: Recieved: %d bytes(s) from Server. Bytes: %s", len(string(reply)), string(reply)))
err = writeMsg(conn, string(reply), g)
if err != nil {
t.logger.Error(fmt.Sprintf("[telnet proxy] Error: %v", err))
break
}
}
return
}()
go func() {
defer func() {
ended = true
conn.Close()
hconn.Close()
}()
for {
if ended == true || hconn == nil || conn == nil {
return
}
msg, err := readMsg(conn, g)
if err != nil {
t.logger.Error(fmt.Sprintf("[telnet proxy] Error: %v", err))
break
}
_, err = hconn.Write([]byte(msg))
if err != nil {
if err == io.EOF {
t.logger.Error(fmt.Sprintf("[telnet proxy ] Connection closed by Server"))
break
}
t.logger.Error(fmt.Sprintf("[telnet proxy ] Error: %v", err))
break
}
if msg == "^C" {
break
}
if len(strings.Trim(msg, " ")) > 0 {
t.logger.Info(fmt.Sprintf("[telnet proxy ] Info: Sending: %d bytes(s) to Server, Bytes:\n %s", len(string(msg)), string(msg)))
}
}
return
}()
return
}
|
package panicdemo
import (
"fmt"
"testing"
"time"
)
func Test_Panic(t *testing.T) {
panicDemo()
}
func panicDemo() {
// 1. panic 会终止服务
// 2. panic 会沿着函数调用链, 逆向传染,直到退出或被捕获。
// 3. 但是, panic 退出不会终止 defer
// 4. 因此可以在 defer 中使用 recover 捕获 panic, 中断传染。
// 5. 只能在相同 G 内的函数调用链中使用 defer , 才能在任意一个环节捕获 panic。
// 6. 根据 5. , 如果函数 panicfunc_2 被 go 出去, 则父函数将无法捕获 panicfunc_2 中的 panic
defer func() {
fmt.Printf("defer of panicDemo: ")
if err := recover(); err != nil {
fmt.Println("catch panic in panicDemo")
}
fmt.Println("")
println()
}()
panicfunc()
time.Sleep(10 * time.Second)
}
func panicfunc() {
defer func() {
fmt.Printf("defer of panicfunc: ")
if err := recover(); err != nil {
fmt.Println("catch panic in panicfunc")
}
fmt.Println("")
}()
go panicfunc_2()
time.Sleep(3 * time.Second)
fmt.Println("end panicfunc")
}
func panicfunc_2() {
defer func() {
fmt.Println("header of panicfunc")
}()
defer func() {
fmt.Println(time.Now())
time.Sleep(1 * time.Second)
fmt.Println(time.Now())
}()
panicfunc_3()
println("end panicfunc_2")
}
func panicfunc_3() {
panic("panic in panicfunc_3")
}
|
package sshttp
import (
"fmt"
"io"
"net/http"
"net/url"
"os"
"path/filepath"
"sort"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
// File implements http.File using remote files over SFTP, and is returned
// by FileSystem's Open method.
type File struct {
// Embed for interface implementation
*sftp.File
// Client for use with File.Readdir
sftpc *sftp.Client
// Name of file in remote filesystem
name string
// Current file offset with File.Readdir
offset int
// EOF on next Readdir loop
eofNext bool
}
// Readdir is used to implement http.File for remote files over SFTP.
// It behaves in the same manner as os.File.Readdir:
// https://godoc.org/os#File.Readdir.
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
// Return and signal end of files
if f.eofNext {
return nil, io.EOF
}
// Gather other files in the same directory
fis, err := f.sftpc.ReadDir(filepath.Dir(f.name))
if err != nil {
return nil, err
}
sort.Sort(byBaseName(fis))
// If 0 or negative count is specified, return all files
// and EOF next.
if count <= 0 || len(fis) <= count {
f.eofNext = true
return fis, nil
}
// If files with offset is less than requested length,
// return the remainder and EOF next.
if len(fis)-f.offset <= count {
f.eofNext = true
return fis[f.offset:], nil
}
// If more files exist than requested, return requested
// number and add to offset
out := make([]os.FileInfo, count)
copy(out, fis[f.offset:f.offset+count])
f.offset += count
return out, nil
}
// FileSystem implements http.FileSystem for remote files over SFTP.
type FileSystem struct {
pair *clientPair
path string
}
// NewFileSystem creates a new FileSystem which can access remote files over
// SFTP. The resulting FileSystem can be used by net/http to provide access
// to remote files over SFTP, as if they were local. The host parameter
// specifies the URI to dial and access, and the configuration parameter is
// used to configure the underlying SSH connection.
//
// A host must be a complete URI, including a protocol segment. For example,
// sftp://127.0.0.1:22/home/foo dials 127.0.0.1 on port 22, and accesses the
// /home/foo directory on the host.
func NewFileSystem(host string, config *ssh.ClientConfig) (*FileSystem, error) {
// Ensure valid URI with proper protocol
u, err := url.Parse(host)
if err != nil {
return nil, err
}
if u.Scheme != Protocol {
return nil, fmt.Errorf("invalid URL scheme: %s", u.Scheme)
}
// Create clientPair with SSH and SFTP clients
pair, err := dialSSHSFTP(u.Host, config)
if err != nil {
return nil, err
}
return &FileSystem{
pair: pair,
path: u.Path,
}, nil
}
// Open attempts to access a file under the directory specified in NewFileSystem,
// and attempts to return a http.File for use with net/http.
func (fs *FileSystem) Open(name string) (http.File, error) {
// Check for the requested file in the remote filesystem
fpath := filepath.Join(fs.path, name)
f, err := fs.pair.sftpc.Open(fpath)
if err != nil {
return nil, err
}
// Create output file
file := &File{
File: f,
sftpc: fs.pair.sftpc,
name: fs.path,
}
// Check for a directory instead of a file, which requires
// a slightly different name with a trailing slash
stat, err := f.Stat()
if err != nil {
return nil, err
}
if stat.IsDir() {
file.name = fpath + "/"
}
return file, nil
}
// Close closes open SFTP and SSH connections for this FileSystem.
func (fs *FileSystem) Close() error {
var sErr stickyError
sErr.Set(fs.pair.sftpc.Close())
sErr.Set(fs.pair.sshc.Close())
return sErr.Get()
}
// byBaseName implements sort.Interface to sort []os.FileInfo.
type byBaseName []os.FileInfo
func (b byBaseName) Len() int { return len(b) }
func (b byBaseName) Less(i int, j int) bool { return b[i].Name() < b[j].Name() }
func (b byBaseName) Swap(i int, j int) { b[i], b[j] = b[j], b[i] }
|
package app
import (
"encoding/json"
"io"
"testing"
"github.com/cosmos/cosmos-sdk/simapp"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
dbm "github.com/tendermint/tm-db"
"github.com/tharsis/ethermint/encoding"
)
func BenchmarkEthermintApp_ExportAppStateAndValidators(b *testing.B) {
db := dbm.NewMemDB()
app := NewEthermintApp(log.NewTMLogger(io.Discard), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encoding.MakeConfig(ModuleBasics), simapp.EmptyAppOptions{})
genesisState := NewDefaultGenesisState()
stateBytes, err := json.MarshalIndent(genesisState, "", " ")
if err != nil {
b.Fatal(err)
}
// Initialize the chain
app.InitChain(
abci.RequestInitChain{
ChainId: "ethermint_9000-1",
Validators: []abci.ValidatorUpdate{},
AppStateBytes: stateBytes,
},
)
app.Commit()
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
// Making a new app object with the db, so that initchain hasn't been called
app2 := NewEthermintApp(log.NewTMLogger(log.NewSyncWriter(io.Discard)), db, nil, true, map[int64]bool{}, DefaultNodeHome, 0, encoding.MakeConfig(ModuleBasics), simapp.EmptyAppOptions{})
if _, err := app2.ExportAppStateAndValidators(false, []string{}); err != nil {
b.Fatal(err)
}
}
}
|
package bd
import (
"api-documentos/modelos"
)
//Insertar Orden de Compra
func InserOrden(orden modelos.OrdenCompra)(respuesta modelos.RespuestaBasica){
query, err:= db.Prepare("call usp_InsOrdenCompra(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)") //Son 33 + 28 = 61 Signos
if revisarError(err) {
respuesta= modelos.RespuestaBasica{Codigo: 400, Mensaje: "Hubo un error al conectar con la base de datos"}
return
}
_, err = query.Exec(
orden.Fecha,
orden.NoOrden,
orden.FechaEntrega,
orden.Proveedor,
orden.EjecutivoP,
orden.TelefonoP,
orden.EmailP,
orden.ClienteNoP,
orden.DireccionP,
orden.ColoniaP,
orden.CiudadP,
orden.CpP,
orden.TelefonoPe,
orden.FacturaA,
orden.RfcF,
orden.DireccionF,
orden.ColoniaF,
orden.CiudadF,
orden.CpF,
orden.TelefonoF,
orden.LugarEntrega,
orden.ClaveProducto,
orden.KGLT,
orden.Descripcion,
orden.Cantidad,
orden.PrecioUnit,
orden.Importe,
orden.Descuento,
orden.ClaveProducto2,
orden.KGLT2,
orden.Descripcion2,
orden.Cantidad2,
orden.PrecioUnit2,
orden.Importe2,
orden.Descuento2,
orden.ClaveProducto3,
orden.KGLT3,
orden.Descripcion3,
orden.Cantidad3,
orden.PrecioUnit3,
orden.Importe3,
orden.Descuento3,
orden.ClaveProducto4,
orden.KGLT4,
orden.Descripcion4,
orden.Cantidad4,
orden.PrecioUnit4,
orden.Importe4,
orden.Descuento4,
orden.ClaveProducto5,
orden.KGLT5,
orden.Descripcion5,
orden.Cantidad5,
orden.PrecioUnit5,
orden.Importe5,
orden.Descuento5,
orden.TotalKGLT,
orden.Subtotal,
orden.Descuentos,
orden.IVA,
orden.Total,
)
if revisarError(err) {
respuesta= modelos.RespuestaBasica{Codigo: 400, Mensaje: "Hubo un error al ejecutar la sentencia en la Base de Datos"}
return
}
respuesta= modelos.RespuestaBasica{Codigo: 200, Mensaje: "Inserción realizada correctamente"}
return
}
//Seleccionar todas Ordenes
func SelectOrdenes() (respuesta modelos.RespuestaGetOrdenes) {
var (
orden modelos.OrdenCompra
ordenes []modelos.OrdenCompra
)
if Err != nil {
respuesta = modelos.RespuestaGetOrdenes{Codigo: 400, Mensaje: "Hubo un error al conectar con la base de datos", Error: Err.Error()}
return
}
query, err := db.Query("SELECT * FROM ordencompramx")
if err != nil {
respuesta = modelos.RespuestaGetOrdenes{Codigo: 400, Mensaje: "Hubo un error en la consulta", Error: Err.Error()}
return
}
for query.Next() {
Err = query.Scan(
&orden.IdOrden,
&orden.Fecha,
&orden.NoOrden,
&orden.FechaEntrega,
&orden.Proveedor,
&orden.EjecutivoP,
&orden.TelefonoP,
&orden.EmailP,
&orden.ClienteNoP,
&orden.DireccionP,
&orden.ColoniaP,
&orden.CiudadP,
&orden.CpP,
&orden.TelefonoPe,
&orden.FacturaA,
&orden.RfcF,
&orden.DireccionF,
&orden.ColoniaF,
&orden.CiudadF,
&orden.CpF,
&orden.TelefonoF,
&orden.LugarEntrega,
&orden.ClaveProducto,
&orden.KGLT,
&orden.Descripcion,
&orden.Cantidad,
&orden.PrecioUnit,
&orden.Importe,
&orden.Descuento,
&orden.ClaveProducto2,
&orden.KGLT2,
&orden.Descripcion2,
&orden.Cantidad2,
&orden.PrecioUnit2,
&orden.Importe2,
&orden.Descuento2,
&orden.ClaveProducto3,
&orden.KGLT3,
&orden.Descripcion3,
&orden.Cantidad3,
&orden.PrecioUnit3,
&orden.Importe3,
&orden.Descuento3,
&orden.ClaveProducto4,
&orden.KGLT4,
&orden.Descripcion4,
&orden.Cantidad4,
&orden.PrecioUnit4,
&orden.Importe4,
&orden.Descuento4,
&orden.ClaveProducto5,
&orden.KGLT5,
&orden.Descripcion5,
&orden.Cantidad5,
&orden.PrecioUnit5,
&orden.Importe5,
&orden.Descuento5,
&orden.TotalKGLT,
&orden.Subtotal,
&orden.Descuentos,
&orden.IVA,
&orden.Total,
)
ordenes = append(ordenes, orden)
if Err != nil {
respuesta = modelos.RespuestaGetOrdenes{Codigo: 400, Mensaje: "Hubo un error al recibir un dato", Error: Err.Error()}
return
}
}
if len(ordenes) == 0 {
respuesta = modelos.RespuestaGetOrdenes{Codigo: 201, Mensaje: "No se encontraron registros en la base de datos", Error: Err.Error()}
return
}
respuesta = modelos.RespuestaGetOrdenes{Codigo: 200, Mensaje: "Datos enviados correctamente", Datos: ordenes}
return
}
|
package main
import (
"encoding/json"
"fmt"
"github.com/shawnwyckoff/go-utils/dsa/jsons"
)
// 基础类型(比如int)及其type出来的新类型,支持omitempty
// 结构体及其type出来的新类型,不支持omitempty
// 结构体S实现omitempty的唯一方法就是,在使用它的结构体P中显式的使用它的指针*S作为成员
type InInfo struct {
Address string
}
type Info InInfo
func (info Info) MarshalJSON() ([]byte, error) {
/*
// 和Score一样的实现,但是搁结构体就是不支持空[]byte的omitempty
if info.Address == "" {
return []byte{}, nil
}
if info.Address == "" {
return []byte(""), nil
}*/
return json.Marshal(InInfo(info))
}
/*
// build error: invalid receiver type Show (Show is a pointer type)
type Score *int
func (s Score) MarshalJSON() ([]byte, error) {
if int(s) == 0 {
return []byte{}, nil
}
return []byte(fmt.Sprintf("%d", int(s))), nil
}*/
type Score int
func (s Score) MarshalJSON() ([]byte, error) {
if int(s) == 0 {
return []byte{}, nil
}
return []byte(fmt.Sprintf("%d", int(s))), nil
}
func main() {
type S struct {
Name string
Score Score `json:"Score,omitempty"`
Info Info `json:"Info,omitempty"`
InfoEx *Info `json:"InfoEx,omitempty"`
}
d := S{Name: "Bob", Score: 0}
fmt.Println(jsons.MarshalStringDefault(d, false))
}
|
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package rpcpool_test
import (
"context"
"crypto/tls"
"fmt"
"log"
"time"
"storj.io/common/identity"
"storj.io/common/peertls/tlsopts"
"storj.io/common/rpc"
"storj.io/common/rpc/rpcpool"
"storj.io/common/rpc/rpctest"
"storj.io/drpc"
)
// Example shows how the wrapper can be used to wrap connection multiple times.
func Example() {
ctx := context.Background()
id, err := identity.NewFullIdentity(ctx, identity.NewCAOptions{
Difficulty: 0,
Concurrency: 1,
})
if err != nil {
log.Printf("%+v\n", err)
}
tlsOptions, err := tlsopts.NewOptions(id, tlsopts.Config{}, nil)
if err != nil {
log.Printf("%+v\n", err)
}
d := rpc.NewDefaultDialer(tlsOptions)
cr := rpctest.NewCallRecorder()
ctx = rpcpool.WithDialerWrapper(ctx, func(ctx context.Context, dialer rpcpool.Dialer) rpcpool.Dialer {
return func(context.Context) (conn rpcpool.RawConn, state *tls.ConnectionState, err error) {
// this is only for testing when connection is mocked
stub := rpctest.NewStubConnection()
state = &tls.ConnectionState{}
conn = &stub
// for real world example start with delegating the call to the original dialer
// conn, state, err = dialer(ctx)
// adding first wrapper (call recorder)
conn = cr.Attach(conn)
// add additional latency wrapper
conn = rpctest.ConnectionWithLatency(conn, 10*time.Millisecond)
return conn, state, err
}
})
conn, err := d.DialAddressInsecure(ctx, "localhost:1234")
if err != nil {
log.Printf("%+v\n", err)
}
in := message{}
out := message{}
err = conn.Invoke(ctx, "rpccall", messageEncoding{}, &in, &out)
if err != nil {
fmt.Println(cr.History())
}
// Output: [rpccall]
}
type message struct {
content string
}
type messageEncoding struct {
}
func (messageEncoding) Marshal(msg drpc.Message) ([]byte, error) {
return []byte(msg.(*message).content), nil
}
func (messageEncoding) Unmarshal(buf []byte, msg drpc.Message) error {
msg.(*message).content = string(buf)
return nil
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package encryption
import (
"bytes"
"testing"
)
func TestIncrementBytes(t *testing.T) {
for i, test := range []struct {
inbuf []byte
amount int64
err bool
outbuf []byte
truncated bool
}{
{nil, 10, false, nil, true},
{nil, 0, false, nil, false},
{nil, -1, true, nil, false},
{nil, -1, true, nil, false},
{nil, -457, true, nil, false},
{[]byte{0}, 0, false, []byte{0}, false},
{[]byte{0}, 1, false, []byte{1}, false},
{[]byte{0}, 254, false, []byte{0xfe}, false},
{[]byte{1}, 254, false, []byte{0xff}, false},
{[]byte{0}, 255, false, []byte{0xff}, false},
{[]byte{1, 0, 0}, 3, false, []byte{4, 0, 0}, false},
{[]byte{0}, 256, false, []byte{0}, true},
{[]byte{0}, 257, false, []byte{1}, true},
{
[]byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe"), 1, false,
[]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff"), false},
{
[]byte("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"), 1, false,
[]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), true},
{
[]byte("\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xfe"), 1, false,
[]byte("\x00\x00\x00\x00\x00\xff\xff\xff\xff\xfe"), false},
{
[]byte("\xfe\xff\x00\xff\xff\xfe\xff\xff\xff\xfe"), 0xff0001, false,
[]byte("\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xfe"), false},
{
[]byte("\xfe\xff\x00\xff\xff\xff\xff\xff\xff\xff"), 0xff0002, false,
[]byte("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"), true},
{
[]byte("\xfe\xff\x00\xff\xff\xff\xff\xff\xff\xff"), 0xff0003, false,
[]byte("\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00"), true},
} {
trunc, err := incrementBytes(test.inbuf, test.amount)
if err != nil {
if !test.err {
t.Fatalf("%d: unexpected err: %v", i, err)
}
continue
}
if test.err {
t.Fatalf("%d: err expected but no err happened", i)
}
if trunc != test.truncated {
t.Fatalf("%d: truncated rv mismatch", i)
}
if !bytes.Equal(test.outbuf, test.inbuf) {
t.Fatalf("%d: result mismatch\n%v\n%v", i, test.inbuf, test.outbuf)
}
}
}
|
// +build !clustered,!gcloud
package datastore
import (
"reflect"
"testing"
"github.com/janelia-flyem/dvid/dvid"
)
func TestRepoGobEncoding(t *testing.T) {
uuid := dvid.UUID("19b87f38f873481b9f3ac688877dff0d")
versionID := dvid.VersionID(23)
repoID := dvid.RepoID(13)
repo := newRepo(uuid, versionID, repoID, "foobar")
repo.alias = "just some alias"
repo.log = []string{
"Did this",
"Then that",
"And the other thing",
}
repo.properties = map[string]interface{}{
"foo": 42,
"bar": "some string",
"baz": []int{3, 9, 7},
}
encoding, err := repo.GobEncode()
if err != nil {
t.Fatalf("Could not encode repo: %v\n", err)
}
received := repoT{}
if err = received.GobDecode(encoding); err != nil {
t.Fatalf("Could not decode repo: %v\n", err)
}
// Did we serialize OK
repo.dag = nil
received.dag = nil
if len(received.properties) != 3 {
t.Errorf("Repo Gob messed up properties: %v\n", received.properties)
}
foo, ok := received.properties["foo"]
if !ok || foo != 42 {
t.Errorf("Repo Gob messed up properties: %v\n", received.properties)
}
bar, ok := received.properties["bar"]
if !ok || bar != "some string" {
t.Errorf("Repo Gob messed up properties: %v\n", received.properties)
}
baz, ok := received.properties["baz"]
if !ok || !reflect.DeepEqual(baz, []int{3, 9, 7}) {
t.Errorf("Repo Gob messed up properties: %v\n", received.properties)
}
repo.properties = nil
received.properties = nil
if !reflect.DeepEqual(*repo, received) {
t.Fatalf("Repo Gob messed up:\nOriginal: %v\nReceived: %v\n", *repo, received)
}
}
func makeTestVersions(t *testing.T) {
root, err := NewRepo("test repo", "test repo description", nil, "")
if err != nil {
t.Fatal(err)
}
if err := Commit(root, "root node", nil); err != nil {
t.Fatal(err)
}
child1, err := NewVersion(root, "note describing child 1", nil)
if err != nil {
t.Fatal(err)
}
if err := Commit(child1, "child 1", nil); err != nil {
t.Fatal(err)
}
// Test ability to set UUID of child
assignedUUID := dvid.UUID("0c8bc973dba74729880dd1bdfd8d0c5e")
child2, err := NewVersion(root, "note describing child 2", &assignedUUID)
if err != nil {
t.Fatal(err)
}
log2 := []string{"This is line 1 of log", "This is line 2 of log", "Last line for multiline log"}
if err := Commit(child2, "child 2 assigned", log2); err != nil {
t.Fatal(err)
}
// Make uncommitted child 3
child3, err := NewVersion(root, "note describing child 3", nil)
if err != nil {
t.Fatal(err)
}
nodelog := []string{`My first node-level log line.!(;#)}`, "Second line is here!!!"}
if err := AddToNodeLog(child3, nodelog); err != nil {
t.Fatal(err)
}
}
func TestRepoPersistence(t *testing.T) {
OpenTest()
makeTestVersions(t)
// Save this metadata
jsonBytes, err := MarshalJSON()
if err != nil {
t.Fatal(err)
}
// Shutdown and restart.
CloseReopenTest()
defer CloseTest()
// Check if metadata is same
jsonBytes2, err := MarshalJSON()
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(jsonBytes, jsonBytes2) {
t.Errorf("\nRepo metadata JSON changes on close/reopen:\n\nOld:\n%s\n\nNew:\n%s\n", string(jsonBytes), string(jsonBytes2))
}
}
// Make sure each new repo has a different local ID.
func TestNewRepoDifferent(t *testing.T) {
OpenTest()
defer CloseTest()
root1, err := NewRepo("test repo 1", "test repo 1 description", nil, "")
if err != nil {
t.Fatal(err)
}
root2, err := NewRepo("test repo 2", "test repo 2 description", nil, "")
if err != nil {
t.Fatal(err)
}
// Delve down into private methods to make sure internal IDs are different.
repo1, err := manager.repoFromUUID(root1)
if err != nil {
t.Fatal(err)
}
repo2, err := manager.repoFromUUID(root2)
if err != nil {
t.Fatal(err)
}
if root1 == root2 {
t.Errorf("New repos share uuid: %d\n", root1)
}
if repo1.id == repo2.id {
t.Errorf("New repos share repo id: %d\n", repo1.id)
}
if repo1.version == repo2.version {
t.Errorf("New repos share version id: %d\n", repo1.version)
}
if repo1.alias == repo2.alias {
t.Errorf("New repos share alias: %s\n", repo1.alias)
}
}
func TestUUIDAssignment(t *testing.T) {
OpenTest()
defer CloseTest()
uuidStr1 := "de305d5475b4431badb2eb6b9e546014"
myuuid := dvid.UUID(uuidStr1)
root, err := NewRepo("test repo", "test repo description", &myuuid, "")
if err != nil {
t.Fatal(err)
}
if root != myuuid {
t.Errorf("Assigned root UUID %q != created root UUID %q\n", myuuid, root)
}
// Check if branches can also have assigned UUIDs
if err := Commit(root, "root node", nil); err != nil {
t.Fatal(err)
}
uuidStr2 := "8fa05d5475b4431badb2eb6b9e0123014"
myuuid2 := dvid.UUID(uuidStr2)
child, err := NewVersion(myuuid, "note describing uuid2", &myuuid2)
if err != nil {
t.Fatal(err)
}
if child != myuuid2 {
t.Errorf("Assigned child UUID %q != created child UUID %q\n", myuuid2, child)
}
// Make sure we can lookup assigned UUIDs
uuid, _, err := MatchingUUID(uuidStr1[:10])
if err != nil {
t.Errorf("Error matching UUID fragment %s: %v\n", uuidStr1[:10], err)
}
if uuid != myuuid {
t.Errorf("Error getting back correct UUID %s from %s\n", myuuid, uuid)
}
}
|
package runtime
import (
"io/ioutil"
"os"
"path/filepath"
"github.com/kirsle/configdir"
"github.com/spf13/viper"
"gopkg.in/yaml.v2"
)
const (
oldConfigPath = "gscloud"
configPath = "gridscale"
)
// AccountEntry represents a single account in the config file.
type ProjectEntry struct {
Name string `yaml:"name" json:"name"`
UserID string `yaml:"userId" json:"userId"`
Token string `yaml:"token" json:"token"`
URL string `yaml:"url" json:"url"`
}
// Config are all configuration settings parsed from a configuration file.
type Config struct {
Projects []ProjectEntry `yaml:"projects"`
}
// OldConfig are all configuration settings parsed from an old configuration file
type OldConfig struct {
Accounts []ProjectEntry `yaml:"accounts"`
}
// ConfigPath constructs the platform specific path to the configuration file.
// - on Linux: $XDG_CONFIG_HOME or $HOME/.config
// - on macOS: $HOME/Library/Application Support
// - on Windows: %APPDATA% or "C:\\Users\\%USER%\\AppData\\Roaming"
func ConfigPath() string {
p := filepath.Join(configdir.LocalConfig(), configPath)
return p
}
func OldConfigPath() string {
p := filepath.Join(configdir.LocalConfig(), oldConfigPath)
return p
}
// ConfigPathWithoutUser is the same as ConfigPath but with environment variables not expanded.
func ConfigPathWithoutUser() string {
return localConfig + configPath
}
func OldConfigPathWithoutUser() string {
return localConfig + oldConfigPath
}
// ParseConfig parses viper config file.
func ParseConfig() (*Config, error) {
conf := Config{}
err := viper.Unmarshal(&conf)
if err != nil {
return nil, err
}
if conf.Projects == nil {
oldConf := OldConfig{}
viper.Unmarshal(&oldConf)
conf.Projects = oldConf.Accounts
}
return &conf, nil
}
func WriteConfig(conf *Config, filePath string) error {
err := os.MkdirAll(filepath.Dir(filePath), os.FileMode(0700))
if err != nil {
return err
}
c, _ := yaml.Marshal(conf)
err = ioutil.WriteFile(filePath, c, 0644)
if err != nil {
return err
}
return nil
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package util
import (
"testing"
"time"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
)
func TestEveryN(t *testing.T) {
start := timeutil.Now()
en := EveryN{N: time.Minute}
testCases := []struct {
t time.Duration // time since start
expected bool
}{
{0, true}, // the first attempt to log should always succeed
{0, false},
{time.Second, false},
{time.Minute - 1, false},
{time.Minute, true},
{time.Minute, false},
{time.Minute + 30*time.Second, false},
{10 * time.Minute, true},
{10 * time.Minute, false},
{10*time.Minute + 59*time.Second, false},
{11 * time.Minute, true},
}
for _, tc := range testCases {
if a, e := en.ShouldProcess(start.Add(tc.t)), tc.expected; a != e {
t.Errorf("ShouldProcess(%v) got %v, want %v", tc.t, a, e)
}
}
}
|
/*
Execution prompting the user to enter an integer,
then it adds that number to an int slice, it sorts that slice
and prints it to screen. Execution stops when user types "x".
*/
package main
import (
"fmt"
"sort"
"strconv"
)
func main() {
arr := make([]int, 0, 3)
givenValue := ""
fmt.Println("Please insert an integer. Press x to exit")
fmt.Scan(&givenValue)
for givenValue != "x" {
num, _ := strconv.Atoi(givenValue)
arr = append(arr, num)
sort.SliceStable(arr, func(i, j int) bool { return arr[i] < arr[j] })
fmt.Println(arr)
fmt.Println("Please insert an integer. Press x to exit")
fmt.Scan(&givenValue)
}
}
|
package main
import (
"flag"
"fmt"
"github.com/mit-dci/zkledger"
)
var num = flag.Int("num", 2, "The number of banks you want generate keys for")
var loadKeys = flag.Bool("load", false, "Loads the keys if they already exist")
func main() {
flag.Parse()
pki := zkledger.PKI{}
if *loadKeys {
pki.MakeTestWithKeys(*num)
} else {
pki.MakeTest(*num)
}
fmt.Println(pki)
}
|
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/andrew-d/isbinary"
"github.com/fatih/color"
"github.com/y-yagi/configure"
)
type config struct {
Home string `toml:"home"`
}
const cmd = "blogrep"
var (
warningColor = color.New(color.FgYellow).SprintFunc()
filePathColor = color.New(color.FgGreen, color.Bold).SprintFunc()
)
func errorline(s string) {
os.Stderr.WriteString(s + "\n")
}
func usage() {
fmt.Fprintf(os.Stderr, "usage: %s PATTERNS\n", os.Args[0])
}
func msg(err error) int {
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", os.Args[0], err)
return 1
}
return 0
}
func cmdEdit() error {
editor := os.Getenv("EDITOR")
if len(editor) == 0 {
editor = "vim"
}
return configure.Edit(cmd, editor)
}
func containsAllAndColorized(article *string, patterns []string) bool {
for _, pattern := range patterns {
if !strings.Contains(strings.ToLower(*article), strings.ToLower(pattern)) {
return false
}
*article = strings.Replace(*article, pattern, warningColor(pattern), -1)
}
return true
}
func readAndGrep(patterns []string, filePattern string, writer io.Writer) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
var articles []string
if info.IsDir() {
return nil
}
if filePattern != "" && !strings.Contains(path, filePattern) {
return nil
}
in, err := os.Open(path)
if err != nil {
return err
}
defer in.Close()
fileIsBinary, err := isbinary.TestReader(in)
if err != nil {
return err
}
if fileIsBinary {
return nil
}
in.Seek(0, 0)
data, err := ioutil.ReadAll(in)
if err != nil {
return err
}
articles = strings.Split(string(data), "\n***\n\n")
for _, article := range articles {
if containsAllAndColorized(&article, patterns) {
fmt.Fprintln(writer, filePathColor(path))
fmt.Fprintln(writer, article)
}
}
return nil
}
}
func init() {
if !configure.Exist(cmd) {
var cfg config
cfg.Home = ""
configure.Save(cmd, cfg)
}
}
func main() {
var edit bool
var filePattern string
flag.BoolVar(&edit, "c", false, "edit config")
flag.StringVar(&filePattern, "f", "", "file name pattern")
flag.Parse()
if edit {
os.Exit(msg(cmdEdit()))
}
args := flag.Args()
if len(args) < 1 {
usage()
os.Exit(2)
}
var cfg config
err := configure.Load(cmd, &cfg)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
os.Exit(1)
}
if len(cfg.Home) == 0 {
fmt.Fprintf(os.Stderr, "Please specify home to config file.\n")
os.Exit(1)
}
err = os.Chdir(cfg.Home)
if err != nil {
errorline(err.Error())
os.Exit(1)
}
cwd, _ := os.Getwd()
err = filepath.Walk(cwd, readAndGrep(args, filePattern, os.Stdout))
if err != nil {
errorline(err.Error())
os.Exit(1)
}
}
|
package controls
import (
"github.com/upyun/go-sdk/upyun"
"sofuny/config"
"sofuny/models"
)
// 数据库
var db = models.Connection()
// 又拍云
var up = upyun.NewUpYun(&upyun.UpYunConfig{
Bucket: config.Config().Upyun.Bucket,
Operator: config.Config().Upyun.Operator,
Password: config.Config().Upyun.Password,
})
|
package geojson
// A list of the geojson types that are currently supported.
const (
TypePoint = "Point"
TypeMultiPoint = "MultiPoint"
TypeLineString = "LineString"
TypeMultiLineString = "MultiLineString"
TypePolygon = "Polygon"
TypeMultiPolygon = "MultiPolygon"
)
|
//simulator bytom miner
package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"log"
"math/big"
"net"
"os"
"runtime"
"strconv"
"time"
//"github.com/bytom/consensus/difficulty"
"github.com/bytom/mining/tensority"
"github.com/bytom/protocol/bc"
"github.com/bytom/protocol/bc/types"
bytomutil "github.com/bytom/util"
)
type RpcRequest struct {
Method string `json:"method"`
Params interface{} `json:"params"`
ID string `json:"id"`
Worker string `json:"worker"`
}
type RpcResponse struct {
ID string `json:"id"`
Result json.RawMessage `json:"result"`
Error json.RawMessage `json:"error"`
RpcVersion string `json:"jsonrpc"`
}
type Miner struct {
ID string
Pool string
Status bool
Address string
LatestJobId string
MsgId uint64
Session net.Conn
//dataCh chan string
QuitCh chan struct{}
}
type PoolErr struct {
Code uint64 `json:"code"`
Message string `json:"message"`
}
type MineJob struct {
Version string `json:"version"`
Height string `json:"height"`
PreBlckHsh string `json:"previous_block_hash"`
Timestamp string `json:"timestamp"`
TxMkRt string `json:"transactions_merkle_root"`
TxSt string `json:"transaction_status_hash"`
Nonce string `json:"nonce"`
Bits string `json:"bits"`
JobId string `json:"job_id"`
Seed string `json:"seed"`
Target string `json:"target"`
}
type Result struct {
Id string `json:"id"`
Job MineJob `json:"job"`
Status string `json:"status"`
}
type StratumResp struct {
Id string `json:"id"`
Jsonrpc string `json:"jsonrpc, omitempty"`
Result Result `json:"result, omitempty"`
Error PoolErr `json:"error, omitempty"`
}
type MineJobntf struct {
Jsonrpc string `json:"jsonrpc, omitempty"`
Method string `json:"method, omitempty"`
Params MineJob `json:"params, omitempty"`
}
type SubmitReq struct {
Id string `json:"id"`
JobId string `json:"job_id"`
Nonce string `json:"nonce"`
}
type LoginReq struct {
Login string `json:"login"`
Password string `json:"pass"`
Agent string `json:"agent"`
}
type SubmitWorkReq struct {
BlockHeader *types.BlockHeader `json:"block_header"`
}
var (
poolAddr string
login string
DEBUG bool = false
maxNonce = ^uint64(0) // 2^64 - 1 = 18446744073709551615
Diff1 = StringToBig("0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF")
)
func main() {
pool := flag.String("pool", "stratum.btcc.com:9221", "Mining Pool stratum+tcp:// Addr")
user := flag.String("user", "bm1qh7e8309j24faltn5auurwqawlza5ylwxkzudsc.1", "login user , bytomAddress.[rigname]")
thread := flag.Int("thread", 2, "runtime max thread")
flag.Parse()
poolAddr = *pool
login = *user
os.Setenv("BYTOM_URL", "127.0.0.1:9888")
if *thread > 1 {
runtime.GOMAXPROCS(*thread)
}
reboot:
done := make(chan struct{})
log.Printf("Running with %v threads", *thread)
startMining(done)
select {
case <-done:
goto reboot
log.Printf("Miner test finished")
}
}
//start mine bytom
func startMining(closeCh chan struct{}) error {
log.Println("Miner start")
go func(done chan struct{}) {
miner, err := NewMiner(login, poolAddr)
if err != nil {
close(done)
return
}
miner.Start()
close(done)
}(closeCh)
return nil
}
func NewMiner(login, pool string) (m *Miner, err error) {
/*conn, err := net.Dial("tcp", pool)
if err != nil {
return
}*/
m = &Miner{
ID: login,
Address: login, //the address to receive miner profit
Pool: pool,
//Session: conn,
Status: true,
LatestJobId: "",
MsgId: 0,
//dataCh: make(chan string, 64),
QuitCh: make(chan struct{}),
}
return
}
func (m *Miner) Login() (err error) {
conn, err := net.Dial("tcp", m.Pool)
if err != nil {
return
}
m.Session = conn
m.Status = true
req := RpcRequest{
ID: m.ID,
Method: "login",
Params: LoginReq{Login: m.Address, Password: "password", Agent: "bmminer/2.0.0"},
Worker: m.ID,
}
if err := m.WriteStratumRequest(req, time.Now().Add(10*time.Second)); err != nil {
log.Println("error in test miner login()")
log.Println(err.Error())
return err
}
reply, err := bufio.NewReader(m.Session).ReadBytes('\n')
if len(reply) == 0 || err != nil {
close(m.QuitCh)
}
var resp StratumResp
err = json.Unmarshal(reply, &resp)
if err != nil {
return
}
m.LatestJobId = resp.Result.Job.JobId
go func(job MineJob) {
m.Mine(job)
}(resp.Result.Job)
return nil
}
func (m *Miner) Start() error {
//subscribe server login
if err := m.Login(); err != nil {
log.Println(err.Error())
close(m.QuitCh)
//return err
}
/*reply, err := bufio.NewReader(m.Session).ReadBytes('\n')
if len(reply) == 0 || err != nil {
close(m.QuitCh)
}
var resp StratumResp
json.Unmarshal(reply, &resp)
m.LatestJobId = resp.Result.Job.JobId
go func(job MineJob) {
m.Mine(job)
}(resp.Result.Job)*/
//listen to the server message
go func() {
for {
message, err := bufio.NewReader(m.Session).ReadBytes('\n')
if len(message) == 0 || err != nil {
close(m.QuitCh)
break
}
fmt.Println("Message from server: ", string(message))
var jobntf MineJobntf
json.Unmarshal(message, &jobntf)
if jobntf.Method == "job" {
log.Printf("----new job received----\n%s\n", message)
m.LatestJobId = jobntf.Params.JobId
go func(job MineJob) {
m.Mine(job)
}(jobntf.Params)
} else {
log.Printf("Received: %s\n", message)
}
}
}()
select {
case <-m.QuitCh:
log.Println("Miner ", m.ID, m.Address, "quit")
//close(m.dataCh)
m.Session.Close()
m.Status = false
}
return nil
}
//send stratum request to mining pool
func (m *Miner) WriteStratumRequest(req RpcRequest, deadline time.Time) error {
data, err := json.Marshal(req)
if err != nil {
log.Println("WriteStratumRequest marshal", err.Error())
return err
}
data = append(data, byte('\n'))
if err := m.Session.SetWriteDeadline(deadline); err != nil {
return err
}
if _, err := m.Session.Write(data); err != nil {
return err
}
return nil
}
func (m *Miner) Mine(job MineJob) bool {
seedHash, err1 := DecodeHash(job.Seed)
PreBlckHsh, err2 := DecodeHash(job.PreBlckHsh)
TxMkRt, err3 := DecodeHash(job.TxMkRt)
TxSt, err4 := DecodeHash(job.TxSt)
if err1 != nil || err2 != nil || err3 != nil || err4 != nil {
return false
}
bh := &types.BlockHeader{
Version: str2ui64Bg(job.Version),
Height: str2ui64Bg(job.Height),
PreviousBlockHash: PreBlckHsh,
Timestamp: str2ui64Bg(job.Timestamp),
Bits: str2ui64Bg(job.Bits),
BlockCommitment: types.BlockCommitment{
TransactionsMerkleRoot: TxMkRt,
TransactionStatusHash: TxSt,
},
}
if DEBUG {
viewParsing(bh, job)
}
log.Printf("Job %s: Mining at height: %d\n", job.JobId, bh.Height)
log.Printf("Job %s: Old target: %v\n", job.JobId, CompactToBig(bh.Bits))
newDiff := getNewTargetDiff(job.Target)
log.Printf("Job %s: New target: %v\n", job.JobId, newDiff)
nonce := str2ui64Li(job.Nonce)
log.Printf("Job %s: Start from nonce:\t0x%016x = %d\n", job.JobId, nonce, nonce)
for i := nonce; i <= maxNonce; i++ {
if job.JobId != m.LatestJobId || !m.Status {
log.Printf("Job %s: Expired", job.JobId)
return false
} else {
// log.Printf("Checking PoW with nonce: 0x%016x = %d\n", i, i)
bh.Nonce = i
headerHash := bh.Hash()
if DEBUG {
fmt.Printf("Job %s: HeaderHash: %v\n", job.JobId, headerHash.String())
}
// if difficulty.CheckProofOfWork(&headerHash, &seedHash, bh.Bits) {
if t := CheckPOW(&headerHash, &seedHash, BigToCompact(newDiff), bh.Bits); t > 0 {
log.Printf("Job %s: Target found! Proof hash: 0x%v\n", job.JobId, headerHash.String())
if t == 2 {
log.Println("Block found!")
go func(blockheader types.BlockHeader) {
m.SubmitBlock(blockheader)
}(*bh)
} else if t == 1 {
go func(jobid string, i uint64) {
m.SubmitWork(jobid, i)
}(job.JobId, i)
}
//m.SubmitWork(job.JobId, i)
}
}
}
log.Printf("Job %s: Stop at nonce:\t\t0x%016x = %d\n", job.JobId, bh.Nonce, bh.Nonce)
return false
}
//{"id": "antminer_1", "job_id": "1285153", "nonce": "0000026f80000ab9"}
func (m *Miner) SubmitWork(jobId string, nonce uint64) (err error) {
req := RpcRequest{
ID: m.ID,
Method: "submit",
Params: SubmitReq{
Id: m.ID,
JobId: jobId,
Nonce: getNonceStr(nonce),
},
Worker: m.ID,
}
if err := m.WriteStratumRequest(req, time.Now().Add(10*time.Second)); err != nil {
log.Println("error in test miner submitwork()")
log.Println(err.Error())
return err
}
return nil
}
func (m *Miner) SubmitBlock(bh types.BlockHeader) {
_, success := bytomutil.ClientCall("/submit-work", &SubmitWorkReq{BlockHeader: &bh})
if success == 0 {
log.Println("Mined new Block!")
}
}
func viewParsing(bh *types.BlockHeader, job MineJob) {
log.Println("Printing parsing result:")
fmt.Println("\tVersion:", bh.Version)
fmt.Println("\tHeight:", bh.Height)
fmt.Println("\tPreviousBlockHash:", bh.PreviousBlockHash.String())
fmt.Println("\tTimestamp:", bh.Timestamp)
fmt.Println("\tbits_str:", job.Bits)
fmt.Println("\tBits:", bh.Bits)
fmt.Println("\tTransactionsMerkleRoot:", bh.BlockCommitment.TransactionsMerkleRoot.String())
fmt.Println("\tTransactionStatusHash:", bh.BlockCommitment.TransactionStatusHash.String())
fmt.Println("\ttarget_str:", job.Target)
fmt.Println("\ttarget_ui64Bg:", str2ui64Bg(job.Target))
}
func str2ui64Bg(str string) uint64 {
ui64, _ := strconv.ParseUint(strSwitchEndian(str), 16, 64)
return ui64
}
func str2ui64Li(str string) uint64 {
ui64, _ := strconv.ParseUint(str, 16, 64)
return ui64
}
func strSwitchEndian(oldstr string) string {
slen := len(oldstr)
if slen%2 != 0 {
panic("hex string format error")
}
newstr := ""
for i := 0; i < slen; i += 2 {
newstr += oldstr[slen-i-2 : slen-i]
}
return newstr
}
func StringToBig(h string) *big.Int {
n := new(big.Int)
n.SetString(h, 0)
return n
}
func reverse(src []byte) []byte {
dst := make([]byte, len(src))
for i := len(src); i > 0; i-- {
dst[len(src)-i] = src[i-1]
}
return dst
}
func DecodeHash(s string) (h bc.Hash, err error) {
err = h.UnmarshalText([]byte(s))
return h, err
}
func getNewTargetDiff(target string) *big.Int {
padded := make([]byte, 32)
targetHex := target
decoded, _ := hex.DecodeString(targetHex)
decoded = reverse(decoded)
copy(padded[:len(decoded)], decoded)
newDiff := new(big.Int).SetBytes(padded)
//newDiff = new(big.Int).Div(Diff1, newDiff)
//log.Printf(" Old target: %v\n", difficulty.CompactToBig(blockheaderBits))
//newDiff = new(big.Int).Mul(difficulty.CompactToBig(blockheaderBits), newDiff)
//log.Printf("New target: %v\n", newDiff)
return newDiff
}
func getNonceStr(i uint64) string {
nonceStr := strconv.FormatUint(i, 16)
nonceStr = fmt.Sprintf("%016s", nonceStr)
return nonceStr
}
func HashToBig(hash *bc.Hash) *big.Int {
// reverse the bytes of the hash (little-endian) to use it in the big
// package (big-endian)
buf := hash.Byte32()
blen := len(buf)
for i := 0; i < blen/2; i++ {
buf[i], buf[blen-1-i] = buf[blen-1-i], buf[i]
}
return new(big.Int).SetBytes(buf[:])
}
//0 not pass 1 pass target not pass bits,2 pass bits network is new block
func CheckPOW(hash, seed *bc.Hash, target, bits uint64) (ret int) {
compareHash := tensority.AIHash.Hash(hash, seed)
tmp := HashToBig(compareHash)
if tmp.Cmp(CompactToBig(target)) <= 0 {
ret = 1
} else {
ret = 0
return
}
if tmp.Cmp(CompactToBig(bits)) <= 0 {
ret = 2
}
return
}
func CompactToBig(compact uint64) *big.Int {
// Extract the mantissa, sign bit, and exponent.
mantissa := compact & 0x007fffffffffffff
isNegative := compact&0x0080000000000000 != 0
exponent := uint(compact >> 56)
var bn *big.Int
if exponent <= 3 {
mantissa >>= 8 * (3 - exponent)
bn = big.NewInt(int64(mantissa))
} else {
bn = big.NewInt(int64(mantissa))
bn.Lsh(bn, 8*(exponent-3))
}
if isNegative {
bn = bn.Neg(bn)
}
return bn
}
func BigToCompact(n *big.Int) uint64 {
if n.Sign() == 0 {
return 0
}
var mantissa uint64
// Bytes() returns the absolute value of n as a big-endian byte slice
exponent := uint(len(n.Bytes()))
// Bits() returns the absolute value of n as a little-endian uint64 slice
if exponent <= 3 {
mantissa = uint64(n.Bits()[0])
mantissa <<= 8 * (3 - exponent)
} else {
tn := new(big.Int).Set(n)
// Since the base for the exponent is 256, the exponent can be treated
// as the number of bytes to represent the full 256-bit number. And as
// the exponent is treated as the number of bytes, Rsh 8*(exponent-3)
// makes sure that the shifted tn won't occupy more than 8*3=24 bits,
// and can be read from Bits()[0], which is 64-bit
mantissa = uint64(tn.Rsh(tn, 8*(exponent-3)).Bits()[0])
}
if mantissa&0x0080000000000000 != 0 {
mantissa >>= 8
exponent++
}
compact := uint64(exponent)<<56 | mantissa
if n.Sign() < 0 {
compact |= 0x0080000000000000
}
return compact
}
|
// Package imagesort contains a single executable located in cmd/imagesort
package imagesort
|
package main
import (
"fmt"
"net/http"
"encoding/json"
"io/ioutil"
)
func main() {
http.HandleFunc("/plus", plus)
fmt.Println(http.ListenAndServe(":9010", nil))
}
type PlusInput struct {
Number1 int `json:"number1"`
Number2 int `json:"number2"`
}
func plus(w http.ResponseWriter, r *http.Request) {
body, _ := ioutil.ReadAll(r.Body)
var p PlusInput
json.Unmarshal(body, &p)
fmt.Fprint(w, map[string]interface{}{
"response": p.Number1+p.Number2,
})
return
} |
package web
import (
"context"
"math"
"net/http"
"strconv"
)
const (
recordsPerPage = 20
)
func (s *Server) GetExchangeTicks(res http.ResponseWriter, req *http.Request) {
req.ParseForm()
page := req.FormValue("page")
pageToLoad, err := strconv.ParseInt(page, 10, 32)
if err != nil || pageToLoad <= 0 {
pageToLoad = 1
}
var txPerPage int = recordsPerPage
offset := (int(pageToLoad) - 1) * txPerPage
ctx := context.Background()
allExhangeSlice, err := s.db.AllExchangeTicks(ctx, offset, recordsPerPage)
if err != nil {
panic(err)
}
totalCount, err := s.db.AllExchangeTicksCount(ctx)
data := map[string]interface{}{
"exData": allExhangeSlice,
"currentPage": int(pageToLoad),
"previousPage": int(pageToLoad - 1),
"totalPages": int(math.Ceil(float64(totalCount) / float64(txPerPage))),
}
totalTxLoaded := int(offset) + len(allExhangeSlice)
if int64(totalTxLoaded) < totalCount {
data["nextPage"] = int(pageToLoad + 1)
}
s.render("exchange.html", data, res)
}
func (s *Server) GetVspTicks(res http.ResponseWriter, req *http.Request) {
req.ParseForm()
page := req.FormValue("page")
pageToLoad, err := strconv.ParseInt(page, 10, 32)
if err != nil || pageToLoad <= 0 {
pageToLoad = 1
}
var txPerPage int = recordsPerPage
offset := (int(pageToLoad) - 1) * txPerPage
ctx := context.Background()
allVSPSlice, err := s.db.AllVSPTicks(ctx, offset, recordsPerPage)
if err != nil {
panic(err)
}
totalCount, err := s.db.AllVSPTickCount(ctx)
data := map[string]interface{}{
"vspData": allVSPSlice,
"currentPage": int(pageToLoad),
"previousPage": int(pageToLoad - 1),
"totalPages": int(math.Ceil(float64(totalCount) / float64(txPerPage))),
}
totalTxLoaded := int(offset) + len(allVSPSlice)
if int64(totalTxLoaded) < totalCount {
data["nextPage"] = int(pageToLoad + 1)
}
s.render("vsp.html", data, res)
}
|
package main
import (
"fmt"
)
func (ts tables) delta() (ds filesDelta) {
for i, _ := range ts {
if i == 0 {
continue
}
t1 := ts[i-1]
t2 := ts[i]
fdelta := fileDelta{}
fdelta.oldFileName = t1.sourceFileName
fdelta.newFileName = t2.sourceFileName
fdelta.rowDelta = rowDelta{}
for key, _ := range t1.rows { // for each key in t1, compare with t2
//fmt.Println("key:", key)
if _, ok := t2.rows[key]; !ok { //// see if the key exists in t2
//TODO: report key missing in t2
continue
}
//// if key exists,
cvDelta := columnValueDelta{}
coDelta := columnOtherDelta{}
for col, _ := range t1.rows[key] { // for each column
//fmt.Println("col:", col, t1.rows[key][col], t2.rows[key][col])
if _, ok := t2.rows[key][col]; !ok { // see if the column exists in t2
coDelta[col] = "column missing"
//TODO: report column missing in t2
continue
}
// if column exists, then compare
if t1.rows[key][col] != t2.rows[key][col] {
cvDelta[col] = valueDelta{
oldValue: t1.rows[key][col],
newValue: t2.rows[key][col],
}
}
}
if len(cvDelta) > 0 {
columnDelta := fdelta.rowDelta[key]
columnDelta.columnValueDelta = cvDelta
fdelta.rowDelta[key] = columnDelta
}
if len(coDelta) > 0 {
columnDelta := fdelta.rowDelta[key]
columnDelta.columnValueDelta = coDelta
fdelta.rowDelta[key] = columnDelta
}
}
ds = append(ds, fdelta)
for key, _ := range t2.rows { // for each key in t2, compare with t1
if _, ok := t1.rows[key]; !ok { //// see if the key exists in t1
//TODO: report key missing in t1
continue
}
//// if key exists,
coDelta := columnOtherDelta{}
for col, _ := range t2.rows[key] { // for each column
if _, ok := t1.rows[key][col]; !ok { // see if the column exists in t1
//TODO: report column missing in t1
coDelta[col] = "new column"
continue
}
}
if len(coDelta) > 0 {
columnDelta := fdelta.rowDelta[key]
columnDelta.columnValueDelta = coDelta
fdelta.rowDelta[key] = columnDelta
}
}
}
return
}
func (ds filesDelta) print() {
if len(ds) == 0 {
fmt.Println("no files to compare")
return
}
tab := ""
for i, fd := range ds { //for each pair of files
tab = "-"
fmt.Println()
fmt.Println(tab, i, "comparing file", fd.oldFileName, "with", fd.newFileName, ":", len(fd.rowDelta), "diff")
if len(fd.rowDelta) == 0 {
tab := "--"
fmt.Println(tab, "no differences")
continue
}
for key, cd := range fd.rowDelta {
if len(cd.columnValueDelta) == 0 && len(cd.columnValueDelta) == 0 {
continue
}
tab := "--"
fmt.Println(tab, "differences for key", key)
for col, d := range cd.columnValueDelta {
tab := "---"
fmt.Println(tab, "column", col, "changed from", d.oldValue, "to", d.newValue)
}
}
}
}
|
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/emicklei/dot"
"github.com/emicklei/xconnect"
"gopkg.in/yaml.v2"
)
// read all xconnect config files
// start a webservice to display dot graphs in PNG
var master = dot.NewGraph(dot.Directed)
var networkIDtoNode = map[string]dot.Node{}
// xconnect -dot | dot -Tpng > graph.png && open graph.png
func makeGraph() {
cfgs := []xconnect.XConnect{}
for _, each := range collectYAMLnames() {
d, err := loadDocument(each)
if err != nil {
log.Println(err)
}
//fmt.Println("loaded", d.Config.Meta.Name)
cfgs = append(cfgs, d.XConnect)
}
for _, each := range cfgs {
addToGraph(each, master)
}
for _, each := range cfgs {
connectInGraph(each, master)
}
fmt.Println(master.String())
}
func collectYAMLnames() (files []string) {
root := "."
err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
if strings.HasSuffix(path, ".yaml") {
files = append(files, path)
}
return nil
})
if err != nil {
panic(err)
}
return
}
func loadDocument(name string) (xconnect.Document, error) {
content, err := ioutil.ReadFile(name)
if err != nil {
return xconnect.Document{}, err
}
var d xconnect.Document
if err = yaml.Unmarshal(content, &d); err != nil {
return xconnect.Document{}, err
}
return d, nil
}
func addToGraph(cfg xconnect.XConnect, g *dot.Graph) {
s := g.Subgraph(cfg.Meta.Name, dot.ClusterOption{})
s.Attr("style", "rounded")
s.Attr("bgcolor", "#F5FDF2")
if bg, ok := cfg.Meta.ExtraFields["ui-bgcolor"]; ok {
s.Attr("bgcolor", bg)
}
for k, v := range cfg.Listen {
id := fmt.Sprintf("%s/%s", cfg.Meta.Name, k)
n := s.Node(id).Label(k)
n.Attr("fillcolor", "#FFFFFF").Attr("style", "filled")
if bg, ok := v.ExtraFields["ui-fillcolor"]; ok {
n.Attr("fillcolor", bg).Attr("style", "filled")
}
networkIDtoNode[v.NetworkID()] = n
}
for k := range cfg.Connect {
id := fmt.Sprintf("%s/%s", cfg.Meta.Name, k)
// https://graphviz.org/doc/info/shapes.html#polygon
s.Node(id).Label(k).Attr("shape", "plaintext")
}
}
func connectInGraph(cfg xconnect.XConnect, g *dot.Graph) {
s, _ := g.FindSubgraph(cfg.Meta.Name)
for k, v := range cfg.Connect {
id := fmt.Sprintf("%s/%s", cfg.Meta.Name, k)
from := s.Node(id)
to, ok := networkIDtoNode[v.NetworkID()]
if !ok {
// if kind is set then create a node to represent the other end
if v.Kind != "" {
id := v.ResourceID()
to = g.Node(id)
// remember
networkIDtoNode[id] = to
} else {
fmt.Fprintf(os.Stderr, "[xconnect] no listen entry found: %s\n", v.NetworkID())
continue
}
}
from.Edge(to).Attr("arrowtail", "dot").Attr("dir", "both")
}
}
|
//package with commands
package commands
import (
"database/sql"
"log"
_ "github.com/mattn/go-sqlite3"
)
// GetImage возвращает путь к обложке книги, если она есть
func GetImage(database string, query string) string {
var path1 string
db, err := sql.Open("sqlite3", database)
if err != nil {
panic(err)
}
defer db.Close()
// получаем путь к папке с книгой
rows, err := db.Query("select path from main.books where id=" + query)
if err != nil {
log.Fatal(err)
} else {
for rows.Next() {
err := rows.Scan(&path1)
if err != nil {
log.Fatal(err)
}
}
}
defer rows.Close()
//добавляем название картинки (оно одно и то же всегда) и возвращаем полный путь к картинке
return path1 + "/cover.jpg"
}
|
package user
import (
"easyquery"
)
var RoleCrudService *RoleService
type RoleService struct {
easyquery.Crud
}
func init() {
RoleCrudService = NewDefaultRoleService()
}
func NewRoleService(crud easyquery.Crud) *RoleService {
return &RoleService{crud}
}
func NewDefaultRoleService() *RoleService {
return NewRoleService(easyquery.NewCrudService(SearchRole, SearchRoles, retreiveGormDB))
}
|
package main
import (
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"gorm.io/driver/postgres"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
func main() {
data, err := request()
if err != nil {
log.Fatalln(err)
}
log.Printf("%+v\n", data)
dbInsert("TODO", data)
}
func dbInsert(dsn string, data Stations) {
db, err := gorm.Open(postgres.Open(dsn), &gorm.Config{})
if err != nil {
panic("failed to connect database")
}
// Migrate the schema
if err := db.AutoMigrate(&Stations{}); err != nil {
log.Fatalf("failed auto migration: %v\n", err)
}
tx := db.Clauses(clause.OnConflict{
UpdateAll: true,
}).Create(&data)
tx.Commit()
// tx.Error
}
type Stations map[string]Station
type Station struct {
Forecast1 struct {
StationID string `json:"stationId"`
Start int `json:"start"`
TimeStep int `json:"timeStep"`
Temperature []int `json:"temperature"`
TemperatureStd []int `json:"temperatureStd"`
WindSpeed string `json:"windSpeed"`
WindDirection string `json:"windDirection"`
WindGust string `json:"windGust"`
Icon []int `json:"icon"`
PrecipitationTotal []int `json:"precipitationTotal"`
PrecipitationProbablity string `json:"precipitationProbablity"`
PrecipitationProbablityIndex string `json:"precipitationProbablityIndex"`
} `json:"forecast1"`
Forecast2 struct {
StationID string `json:"stationId"`
Start int `json:"start"`
TimeStep int `json:"timeStep"`
Temperature []int `json:"temperature"`
TemperatureStd []int `json:"temperatureStd"`
WindSpeed string `json:"windSpeed"`
WindDirection string `json:"windDirection"`
WindGust string `json:"windGust"`
Icon []int `json:"icon"`
PrecipitationTotal []int `json:"precipitationTotal"`
PrecipitationProbablity string `json:"precipitationProbablity"`
PrecipitationProbablityIndex string `json:"precipitationProbablityIndex"`
} `json:"forecast2"`
ForecastStart string `json:"forecastStart"`
Days []struct {
StationID string `json:"stationId"`
DayDate string `json:"dayDate"`
TemperatureMin int `json:"temperatureMin"`
TemperatureMax int `json:"temperatureMax"`
Icon int `json:"icon"`
Icon1 string `json:"icon1"`
Icon2 string `json:"icon2"`
Precipitation int `json:"precipitation"`
WindSpeed int `json:"windSpeed"`
WindGust int `json:"windGust"`
WindDirection int `json:"windDirection"`
Sunshine int `json:"sunshine"`
} `json:"days"`
Warnings []struct {
} `json:"warnings"`
ThreeHourSummaries string `json:"threeHourSummaries"`
}
func request() (Stations, error) {
// Generated by curl-to-Go: https://mholt.github.io/curl-to-go
// curl -X 'GET' \
// 'https://dwd.api.proxy.bund.dev/v30/stationOverviewExtended?stationIds=10865,G005' \
// -H 'accept: application/json'
req, err := http.NewRequest(http.MethodGet, "https://dwd.api.proxy.bund.dev/v30/stationOverviewExtended?stationIds=13670,H419", nil)
if err != nil {
return Stations{}, fmt.Errorf("failed creating request: %v", err)
}
req.Header.Set("Accept", "application/json")
resp, err := http.DefaultClient.Do(req)
if err != nil {
return Stations{}, fmt.Errorf("failed sending request: %v", err)
}
defer func() {
if err := resp.Body.Close(); err != nil {
log.Printf("failed closing body: %v\n", err)
}
}()
result := Stations{}
// result.StationID = make(map[string]StationID)
// result.StationSubID = make(map[string]SubID)
respBytes, err := io.ReadAll(resp.Body)
if err != nil {
return Stations{}, fmt.Errorf("failed reading response: %v", err)
}
if err := json.Unmarshal(respBytes, &result); err != nil {
return Stations{}, fmt.Errorf("failed unmarshalling response: %v", err)
}
return result, nil
}
|
package azure
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"time"
"github.com/Azure/go-autorest/autorest"
"github.com/protofire/polkadot-failover-mechanism/pkg/helpers/fanout"
"github.com/Azure/azure-sdk-for-go/profiles/2019-03-01/resources/mgmt/insights"
)
func getMetricsResourceURL(subscriptionID, resourceGroup, vmScaleSetName string) string {
return fmt.Sprintf(
"/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s",
subscriptionID,
resourceGroup,
vmScaleSetName,
)
}
// GetValidatorMetricsForVMScaleSet ...
func GetValidatorMetricsForVMScaleSet(
ctx context.Context,
client *insights.MetricsClient,
resourceGroup,
vmScaleSetName,
metricsName,
metricNameSpace string,
aggregationType insights.AggregationType,
) (insights.Metric, error) {
interval := "PT1M"
timespan := fmt.Sprintf(
"%s/%s",
time.Now().UTC().Add(time.Duration(-5)*time.Minute).Format("2006-01-02T15:04:05"),
time.Now().UTC().Format("2006-01-02T15:04:05"),
)
resourceURI := getMetricsResourceURL(client.SubscriptionID, resourceGroup, vmScaleSetName)
result, err := client.List(
ctx,
resourceURI,
timespan,
&interval,
metricsName,
string(aggregationType),
nil,
string(aggregationType),
"host eq '*'",
"",
metricNameSpace,
)
if err != nil {
return insights.Metric{}, err
}
if result.Value == nil || len(*result.Value) == 0 {
return insights.Metric{}, nil
}
return (*result.Value)[len(*result.Value)-1], nil
}
func GetMetricsClient(subscriptionID string) (insights.MetricsClient, error) {
client := insights.NewMetricsClient(subscriptionID)
auth, err := getAuthorizer()
if err != nil {
return client, fmt.Errorf("cannot get authorizer: %w", err)
}
client.Authorizer = auth
return client, nil
}
func GetMetricsDefinitionsClient(subscriptionID string) (insights.MetricDefinitionsClient, error) {
client := insights.NewMetricDefinitionsClient(subscriptionID)
auth, err := getAuthorizer()
if err != nil {
return client, fmt.Errorf("cannot get authorizer: %w", err)
}
client.Authorizer = auth
return client, nil
}
// GetValidatorMetricsForVMScaleSets ...
func GetValidatorMetricsForVMScaleSets(
ctx context.Context,
client *insights.MetricsClient,
vmScaleSetNames []string,
resourceGroup,
metricsName,
metricNameSpace string,
aggregationType insights.AggregationType,
) (map[string]insights.Metric, error) {
result := make(map[string]insights.Metric, len(vmScaleSetNames))
type metricItem struct {
metric insights.Metric
vmScaleSetName string
}
var names []interface{}
for _, name := range vmScaleSetNames {
names = append(names, name)
}
out := fanout.ConcurrentResponseItems(ctx, func(ctx context.Context, value interface{}) (interface{}, error) {
vmScaleSetName := value.(string)
metric, err := GetValidatorMetricsForVMScaleSet(
ctx,
client,
resourceGroup,
vmScaleSetName,
metricsName,
metricNameSpace,
aggregationType,
)
if err != nil {
return metricItem{}, err
}
return metricItem{
metric: metric,
vmScaleSetName: vmScaleSetName,
}, nil
}, names...)
items, err := fanout.ReadItemChannel(out)
if err != nil {
return result, err
}
for _, item := range items {
mi := item.(metricItem)
result[mi.vmScaleSetName] = mi.metric
}
return result, err
}
func getDataAggregation(data insights.MetricValue, aggregationType insights.AggregationType, checkValue int) int {
switch aggregationType {
case insights.Maximum:
if data.Maximum != nil && int(*data.Maximum) == checkValue {
return checkValue
}
case insights.Minimum:
if data.Minimum != nil && int(*data.Minimum) == checkValue {
return checkValue
}
case insights.Average:
if data.Average != nil && int(*data.Average) == checkValue {
return checkValue
}
case insights.Count:
if data.Count != nil && int(*data.Count) == checkValue {
return checkValue
}
case insights.Total:
if data.Total != nil && int(*data.Total) == checkValue {
return checkValue
}
}
return -1
}
// LogMetrics ...
func LogMetrics(metrics map[string]insights.Metric, level string) {
for vmScaleSetName, metric := range metrics {
body, err := json.MarshalIndent(metric, "", " ")
if err == nil {
log.Printf("[%s]. Got metrics for vm scale set %s:\n%s", level, vmScaleSetName, string(body))
} else {
log.Printf("[%s]. Got metrics for vm scale set %s - %#v: %#v", level, vmScaleSetName, metric, err)
}
}
}
// GetValidatorMetricNameForMetricNamespace...
func GetValidatorMetricNameForMetricNamespace(
ctx context.Context,
client *insights.MetricDefinitionsClient,
vmScaleSetName,
resourceGroup,
metricName,
metricNameSpace string,
) (string, error) {
resourceURI := getMetricsResourceURL(client.SubscriptionID, resourceGroup, vmScaleSetName)
definitions, err := client.List(ctx, resourceURI, metricNameSpace)
if err != nil {
return "", err
}
if definitions.Value != nil {
for _, def := range *definitions.Value {
if def.Name != nil && def.Name.Value != nil && strings.EqualFold(*def.Name.Value, metricName) {
return *def.Name.Value, nil
}
}
}
return "", nil
}
func WaitValidatorMetricNameForMetricNamespace(
ctx context.Context,
client *insights.MetricDefinitionsClient,
vmScaleSetName,
resourceGroup,
metricName,
metricNameSpace string,
) (string, error) {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return "", fmt.Errorf("cancelled waiting for metric of metric namespace %q. Instance scale set: %q", metricNameSpace, vmScaleSetName)
case <-ticker.C:
metric, err := GetValidatorMetricNameForMetricNamespace(
ctx,
client,
vmScaleSetName,
resourceGroup,
metricName,
metricNameSpace,
)
if err != nil {
dErr := &autorest.DetailedError{}
if errors.As(err, dErr) && dErr.StatusCode == 404 {
continue
}
return "", err
}
if metric != "" {
return metric, nil
}
}
}
}
func WaitValidatorMetricNamesForMetricNamespace(
ctx context.Context,
client *insights.MetricDefinitionsClient,
vmScaleSetNames []string,
resourceGroup,
metricName,
metricNameSpace string,
period int,
attempts int,
) (map[string]string, error) {
ticker := time.NewTicker(time.Duration(period) * time.Second)
defer ticker.Stop()
type metricItem struct {
metric string
vmScaleSetName string
}
var names []interface{}
for _, name := range vmScaleSetNames {
names = append(names, name)
}
out := fanout.ConcurrentResponseItems(ctx, func(ctx context.Context, value interface{}) (interface{}, error) {
vmScaleSetName := value.(string)
successCount := 0
errorsCount := 0
for {
select {
case <-ctx.Done():
return metricItem{}, fmt.Errorf("cancelled waiting for metric of metric namespace %q. Instance scale set: %q", metricNameSpace, vmScaleSetName)
case <-ticker.C:
metric, err := GetValidatorMetricNameForMetricNamespace(
ctx,
client,
vmScaleSetName,
resourceGroup,
metricName,
metricNameSpace,
)
if err != nil {
dErr := &autorest.DetailedError{}
if errors.As(err, dErr) && dErr.StatusCode == 404 {
successCount = 0
continue
}
errorsCount++
log.Printf(
"[DEBUG] faiover: Metrics. error getting metric definitions for metric name %q, namespace %q, scale set %q: %v",
metricName,
metricNameSpace,
vmScaleSetName,
err,
)
if errorsCount >= attempts {
return metricItem{}, fmt.Errorf(
"error getting metric definitions for metric name %q, namespace %q, scale set %q. Errors occurred %d: %w", metricName,
metricNameSpace,
vmScaleSetName,
errorsCount,
err,
)
}
continue
}
errorsCount = 0
if metric == "" {
successCount = 0
continue
}
successCount++
if successCount >= attempts {
return metricItem{
metric: metric,
vmScaleSetName: vmScaleSetName,
}, nil
}
log.Printf(
"[DEBUG] failover: Metrics. successfully get metric definition %q for namespace %q and scale set %q. Retried: %d",
metric,
metricNameSpace,
vmScaleSetName,
successCount,
)
}
}
}, names...)
result := make(map[string]string, len(vmScaleSetNames))
items, err := fanout.ReadItemChannel(out)
if err != nil {
return result, err
}
for _, item := range items {
mi := item.(metricItem)
result[mi.vmScaleSetName] = mi.metric
}
return result, nil
}
|
package gorequest
import (
"bytes"
"encoding/json"
"fmt"
"github.com/moul/http2curl"
"github.com/pkg/errors"
"io"
"io/ioutil"
"log"
"mime/multipart"
"net/http"
"net/http/httputil"
"net/textproto"
"net/url"
"strconv"
"strings"
"time"
)
func (s *Agent) SetDebug(enable bool) *Agent {
s.Debug = enable
return s
}
func (s *Agent) SetCurlCommand(enable bool) *Agent {
s.CurlCommand = enable
return s
}
func (s *Agent) SetDoNotClearAgent(enable bool) *Agent {
s.DoNotClearAgent = enable
return s
}
func (s *Agent) SetLogger(logger log.Logger) *Agent {
s.Logger = logger
return s
}
func (s *Agent) ClearAgent() {
if s.DoNotClearAgent {
return
}
s.Url = ""
s.Method = ""
s.Header = http.Header{}
s.Data = make(map[string]interface{})
s.SliceData = []interface{}{}
s.FormData = url.Values{}
s.QueryData = url.Values{}
s.FileData = make([]File, 0)
s.BounceToRawString = false
s.RawString = ""
s.ForceType = ""
s.TargetType = TypeJson
s.Cookies = make([]*http.Cookie, 0)
s.Errors = nil
}
func (s *Agent) CustomMethod(method, targetUrl string) *Agent {
switch method {
case POST:
return s.Post(targetUrl)
case GET:
return s.Get(targetUrl)
case HEAD:
return s.Head(targetUrl)
case PUT:
return s.Put(targetUrl)
case DELETE:
return s.Delete(targetUrl)
case PATCH:
return s.Patch(targetUrl)
case OPTIONS:
return s.Options(targetUrl)
default:
s.ClearAgent()
s.Method = method
s.Url = targetUrl
s.Errors = nil
return s
}
}
func (s *Agent) Get(targetUrl string) *Agent {
s.ClearAgent()
s.Method = GET
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Post(targetUrl string) *Agent {
s.ClearAgent()
s.Method = POST
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Head(targetUrl string) *Agent {
s.ClearAgent()
s.Method = HEAD
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Put(targetUrl string) *Agent {
s.ClearAgent()
s.Method = PUT
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Delete(targetUrl string) *Agent {
s.ClearAgent()
s.Method = DELETE
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Patch(targetUrl string) *Agent {
s.ClearAgent()
s.Method = PATCH
s.Url = targetUrl
s.Errors = nil
return s
}
func (s *Agent) Options(targetUrl string) *Agent {
s.ClearAgent()
s.Method = OPTIONS
s.Url = targetUrl
s.Errors = nil
return s
}
// Set is useful for setting header fields,
// this will overwrite the existd values of Header through AppendHeader().
// Example. To set `Accept` as `application/json`
// gorequest.New().Post("/gamelist").
// Set("Accept","application/json").End()
func (s *Agent) Set(param, value string) *Agent {
s.Header.Set(param, value)
return s
}
// AppendHeader is used for setting header fields with multiple values,
// Example. To set `Accept` as `application/json,text/plain`
// gorequest.New().Post("/gamelist").
// AppendHeader("Accept","application/json").
// AppendHeader("Accept","text/plain").End()
func (s *Agent) AppendHeader(param, value string) *Agent {
s.Header.Add(param, value)
return s
}
// RetryAble is used for setting a Retryer policy
// Example. To set Retryer policy with 5 seconds between each attmpt.
// 3 max attempt.
// And StatusBadRequest and StatusInternalServerError as RetryAbleStatus
// gorequest.New().Post("/gamelist").
// Retry(3,5*time.seconds,http.StatusBadRequest,http.StatusInternalServerErrot).End()
func (s *Agent) Retry(retryerCount int, retryTime time.Duration, statusCode ...int) *Agent {
for _, code := range statusCode {
statusText := http.StatusText(code)
if len(statusText) == 0 {
s.Errors = append(s.Errors, errors.New("StatusCode '"+strconv.Itoa(code)+"' doesn't exist in http package"))
}
}
s.RetryAble = struct {
RetryAbleStatus []int
RetryTime time.Duration
RetryCount int
Attempt int
Enable bool
}{RetryAbleStatus: statusCode, RetryTime: retryTime, RetryCount: retryerCount, Attempt: 0, Enable: true}
return s
}
// SetBasicAuth sets the basic authentication header
// Example. To set the header for username "myuser" and password "mypass"
// GoRequset.New().Post("/gamelist").
// SetBasicAuth("myuser","mypass").End()
func (s *Agent) SetBasicAuth(username, password string) *Agent {
s.BasicAuth = struct{ Username, Password string }{Username: username, Password: password}
return s
}
// AddCookie adds a cookie to the request. The behavior is the same as AddCookie on Request from net/http
func (s *Agent) AddCookie(c *http.Cookie) *Agent {
s.Cookies = append(s.Cookies, c)
return s
}
// AddCookies is a convient method to add multiple cookies
func (s *Agent) AddCookies(cookies []*http.Cookie) *Agent {
s.Cookies = append(s.Cookies, cookies...)
return s
}
// RedirectPolicy accepts a function to define how to handle redirects.
// If the policy function returns an error, the next Request is not made and the previous request is returned.
// The policy function's arguments are the Request about to be made and the past requests in order of oldest first.
func (s *Agent) RedirectPolicy(policy func(req Request, via []Request) error) *Agent {
s.safeModifyTransport()
s.Client.CheckRedirect = func(r *http.Request, v []*http.Request) error {
vv := make([]Request, len(v))
for i, r := range v {
vv[i] = Request(r)
}
return policy(Request(r), vv)
}
return s
}
func changeMapToURLValues(data map[string]interface{}) url.Values {
var newUrlValues = url.Values{}
for k, v := range data {
switch val := v.(type) {
case string:
newUrlValues.Add(k, val)
case bool:
newUrlValues.Add(k, strconv.FormatBool(val))
// if a number, change to string
// json.Number used to protect against a wrong (for gorequest) default conversion
// which always converts number to float64.
// This type is caused by using Decoder.UseNumber()
case json.Number:
newUrlValues.Add(k, string(val))
case int:
newUrlValues.Add(k, strconv.FormatInt(int64(val), 10))
// todo add all other int-types (int8,int16...)
case float64:
newUrlValues.Add(k, strconv.FormatFloat(float64(val), 'f', -1, 64))
case float32:
newUrlValues.Add(k, strconv.FormatFloat(float64(val), 'f', -1, 64))
// following slices are mostly needed for tests
case []string:
for _, element := range val {
newUrlValues.Add(k, element)
}
case []int:
for _, element := range val {
newUrlValues.Add(k, strconv.FormatInt(int64(element), 10))
}
case []bool:
for _, element := range val {
newUrlValues.Add(k, strconv.FormatBool(element))
}
case []float64:
for _, element := range val {
newUrlValues.Add(k, strconv.FormatFloat(float64(element), 'f', -1, 64))
}
case []float32:
for _, element := range val {
newUrlValues.Add(k, strconv.FormatFloat(float64(element), 'f', -1, 64))
}
// these slices are used in pratice like sending a struct
case []interface{}:
if len(val) <= 0 {
continue
}
switch val[0].(type) {
case string:
for _, element := range val {
newUrlValues.Add(k, element.(string))
}
case bool:
for _, element := range val {
newUrlValues.Add(k, strconv.FormatBool(element.(bool)))
}
case json.Number:
for _, element := range val {
newUrlValues.Add(k, string(element.(json.Number)))
}
}
default:
// todo add ptr ,arrays
}
}
return newUrlValues
}
// End is the most important function that you need to call when ending the chain. The request won't proceed without calling it.
// End function returns Response which matchs the structure of Response type in Golang's http package (but without Body data). The body data itself returns as a string in a 2nd return value.
// Lastly but worth noticing, error array (NOTE: not just single error value) is returned as a 3rd value and nil otherwise.
// Example:
// resp,body,errs := gorequest.New().Get("http://www.google.com").End()
// if errs != nil {
// fmt.Println(errs)
// }
// fmt.Println(resp,body)
// Moreover, End function also supports callback which you can put as a parameter.
// This extends the flexibility and makes gorequest fun and clean! You can use gorequest in whatever style you love!
// Example:
// func printBody(resp gorequest.Response,body string,errs []error) {
// fmt.Println(resp.Status)
// }
// gorequest.New().Get("http://www.google.com").End(printBody)
func (s *Agent) End(callback ...func(response Response, body string, errs []error)) (Response, string, []error) {
var bytesCallback []func(response Response, body []byte, errs []error)
if len(callback) > 0 {
bytesCallback = []func(response Response, body []byte, errs []error){
func(response Response, body []byte, errs []error) {
callback[0](response, string(body), errs)
},
}
}
resp, body, errs := s.EndBytes(bytesCallback...)
bodyString := string(body)
return resp, bodyString, errs
}
// EndBytes should be used when you want the body as bytes. The callbacks work the same way as with `End`, except that a byte array is used instead of a string
func (s *Agent) EndBytes(callback ...func(response Response, body []byte, errs []error)) (Response, []byte, []error) {
var (
errs []error
resp Response
body []byte
)
for {
resp, body, errs = s.getResponseBytes()
if errs != nil {
return nil, nil, errs
}
if s.isRetryAbleRequest(resp) {
resp.Header.Set("Retry-Count", strconv.Itoa(s.RetryAble.Attempt))
break
}
}
respCallback := *resp
if len(callback) != 0 {
callback[0](&respCallback, body, s.Errors)
}
return resp, body, nil
}
func (s *Agent) isRetryAbleRequest(resp Response) bool {
if s.RetryAble.Enable && s.RetryAble.Attempt < s.RetryAble.RetryCount && contains(resp.StatusCode, s.RetryAble.RetryAbleStatus) {
time.Sleep(s.RetryAble.RetryTime)
s.RetryAble.Attempt++
return false
}
return true
}
func contains(respStatus int, statuses []int) bool {
for _, status := range statuses {
if status == respStatus {
return true
}
}
return false
}
// EndStruct should be used when you want the body as a struct. The callbacks work the same way as with `End`,except that a struct is used instead of a string.
func (s *Agent) EndStruct(v interface{}, callback ...func(response Response, v interface{}, body []byte, errs []error)) (Response, []byte, []error) {
resp, body, errs := s.EndBytes()
if errs != nil {
return nil, body, errs
}
err := json.Unmarshal(body, &v)
if err != nil {
s.Errors = append(s.Errors, err)
return resp, body, s.Errors
}
respCallback := *resp
if len(callback) != 0 {
callback[0](&respCallback, v, body, s.Errors)
}
return resp, body, nil
}
func (s *Agent) getResponseBytes() (Response, []byte, []error) {
var (
req *http.Request
err error
resp Response
)
// check whether there is an error. if yes, return all errors
if len(s.Errors) != 0 {
return nil, nil, s.Errors
}
// check if there is forced type
switch s.ForceType {
case TypeJson, TypeForm, TypeXML, TypeText, TypeMultipart:
s.TargetType = s.ForceType
// if forceType is not set, check whether user set Content-Type header.
// If yes, also bounce to the correct supported TargetType automatically.
default:
contentType := s.Header.Get("Content-Type")
for k, v := range Types {
if contentType == v {
s.TargetType = k
}
}
}
// if slice and map get mixed, let's bounce to rawstring
if len(s.Data) != 0 && len(s.SliceData) != 0 {
s.BounceToRawString = true
}
// make Request
req, err = s.MakeRequest()
if err != nil {
s.Errors = append(s.Errors, err)
return nil, nil, s.Errors
}
// Set Transport
if !DisableTransportSwap {
s.Client.Transport = s.Transport
}
// Log details of this request
if s.Debug {
dump, err := httputil.DumpRequestOut(req, true)
s.Logger.SetPrefix("[http] ")
if err != nil {
s.Logger.Fatal("Error:", err)
} else {
s.Logger.Println("HTTP Request: %s", string(dump))
}
}
// Display CURL command line
if s.CurlCommand {
curl, err := http2curl.GetCurlCommand(req)
s.Logger.SetPrefix("[curl] ")
if err != nil {
s.Logger.Fatal("Error:", err)
} else {
s.Logger.Println("CURL command:%s", curl)
}
}
// Send request
resp, err = s.Client.Do(req)
if err != nil {
s.Errors = append(s.Errors, err)
return nil, nil, s.Errors
}
defer resp.Body.Close()
// Log details of this response
if s.Debug {
dump, err := httputil.DumpResponse(resp, true)
if err != nil {
s.Logger.Fatal("Error:", err)
} else {
s.Logger.Println("HTTP Response:%s", string(dump))
}
}
body, err := ioutil.ReadAll(resp.Body)
// Reset resp.Body so it can be use again
resp.Body = ioutil.NopCloser(bytes.NewBuffer(body))
if err != nil {
return nil, nil, []error{err}
}
return resp, body, nil
}
func (s *Agent) MakeRequest() (*http.Request, error) {
var (
req *http.Request
contentType string
contentReader io.Reader
err error
)
if s.Method == "" {
return nil, errors.New("No method specified")
}
// ===========Important===============
// Throughout this region, contentReader and contentType are only set when the contents will be non-empty.
// This is done avoid ever sending a non-nil request body with nil contents to http.NewRequest,
// because it contains logic which dependends on whether or not the body is "nil".
switch s.TargetType {
case TypeJson:
// If-case to give support to join array. we check if
// 1) Map only: send it as json map from s.Data
// 2) Array or Mix of map && array or others: send it as rawstring from s.RawString
var contentJson []byte
if s.BounceToRawString {
contentJson = []byte(s.RawString)
} else if len(s.Data) != 0 {
contentJson, _ = json.Marshal(s.Data)
} else if len(s.SliceData) != 0 {
contentJson, _ = json.Marshal(s.SliceData)
}
if contentJson != nil {
contentReader = bytes.NewReader(contentJson)
contentType = "application/json"
}
case TypeForm, TypeFormData, TypeUrlencoded:
var contentForm []byte
if s.BounceToRawString || len(s.SliceData) != 0 {
contentForm = []byte(s.RawString)
} else {
formData := changeMapToURLValues(s.Data)
contentForm = []byte(formData.Encode())
}
if len(contentForm) != 0 {
contentReader = bytes.NewReader(contentForm)
contentType = "application/x-www-form-urlencoded"
}
case TypeText:
if len(s.RawString) != 0 {
contentReader = strings.NewReader(s.RawString)
contentType = "text/plain"
}
case TypeXML:
if len(s.RawString) != 0 {
contentReader = strings.NewReader(s.RawString)
contentType = "application/xml"
}
case TypeHTML:
if len(s.RawString) != 0 {
contentReader = strings.NewReader(s.RawString)
contentType = "text/html"
}
case TypeMultipart:
var (
buf = &bytes.Buffer{}
mw = multipart.NewWriter(buf)
)
if s.BounceToRawString {
fieldName := s.Header.Get("data_fieldname")
if fieldName == "" {
fieldName = "data"
}
fw, _ := mw.CreateFormField(fieldName)
fw.Write([]byte(s.RawString))
contentReader = buf
}
if len(s.Data) != 0 {
formData := changeMapToURLValues(s.Data)
for key, values := range formData {
for _, value := range values {
fw, _ := mw.CreateFormField(key)
fw.Write([]byte(value))
}
}
contentReader = buf
}
if len(s.SliceData) != 0 {
fieldName := s.Header.Get("json_fieldname")
if fieldName == "" {
fieldName = "data"
}
h := make(textproto.MIMEHeader)
fieldName = strings.Replace(strings.Replace(fieldName, "\\", "\\\\", -1), `"`, "\\\\", -1)
h.Set("Content-Disposition", fmt.Sprintf(`form-data;name="%s"`, fieldName))
h.Set("Content-Type", "application/json")
fw, _ := mw.CreatePart(h)
contentJson, err := json.Marshal(s.SliceData)
if err != nil {
return nil, err
}
fw.Write(contentJson)
contentReader = buf
}
// add the files
if len(s.FileData) != 0 {
for _, file := range s.FileData {
fw, _ := mw.CreateFormFile(file.FieldName, file.FileName)
fw.Write(file.Data)
}
contentReader = buf
}
// close before call to FormDataContentType ! otherwise its not valid multipart
mw.Close()
if contentReader != nil {
contentType = mw.FormDataContentType()
}
default:
// return an error instead of an nil pointer exception here
return nil, errors.New("TargetType '" + s.TargetType + "' could not be determined")
}
if req, err = http.NewRequest(s.Method, s.Url, contentReader); err != nil {
return nil, err
}
for k, vals := range s.Header {
for _, v := range vals {
req.Header.Add(k, v)
}
// Setting the host header is a special case
if strings.EqualFold(k, "Host") {
req.Host = vals[0]
}
}
// Don't infer the content type header if an override is already provided
if len(contentType) != 0 && req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", contentType)
}
// Add all queryString from Query func
q := req.URL.Query()
for k, v := range s.QueryData {
for _, vv := range v {
q.Add(k, vv)
}
}
req.URL.RawQuery = q.Encode()
// Add basic auth
if s.BasicAuth != struct {
Username, Password string
}{} {
req.SetBasicAuth(s.BasicAuth.Username, s.BasicAuth.Password)
}
// Add cookies
for _, cookie := range s.Cookies {
req.AddCookie(cookie)
}
return req, nil
}
func (s *Agent) AsCurlCommand() (string, error) {
req, err := s.MakeRequest()
if err != nil {
return "", err
}
cmd, err := http2curl.GetCurlCommand(req)
if err != nil {
return "", err
}
return cmd.String(), nil
}
|
package main
import (
"log"
adm "github.com/appcelerator/amp/cluster/ampadmin"
"github.com/spf13/cobra"
)
type CheckOptions struct {
version bool
scheduling bool
all bool
}
var checksOpts = &CheckOptions{}
func checks(cmd *cobra.Command, args []string) {
if checksOpts.version || checksOpts.all {
out, err := adm.VerifyDockerVersion()
if err != nil {
log.Fatal(err)
}
log.Println(out)
}
if checksOpts.scheduling || checksOpts.all {
out, err := adm.VerifyServiceScheduling()
if err != nil {
log.Fatal(err)
}
log.Println(out)
}
}
|
package ssh
import (
"fmt"
"io"
"reflect"
"strings"
"time"
validation "github.com/go-ozzo/ozzo-validation"
"golang.org/x/crypto/ssh"
"github.com/cyberark/secretless-broker/pkg/secretless/log"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector"
)
// ServerConfig is the configuration info for the target server
type ServerConfig struct {
Network string
Address string
ClientConfig ssh.ClientConfig
}
// ServiceConnector contains the configuration and channels
type ServiceConnector struct {
channels <-chan ssh.NewChannel
logger log.Logger
}
func (h *ServiceConnector) serverConfig(values map[string][]byte) (config ServerConfig, err error) {
keys := reflect.ValueOf(values).MapKeys()
h.logger.Debugf("SSH backend connection parameters: %s", keys)
config.Network = "tcp"
if address, ok := values["address"]; ok {
config.Address = string(address)
if !strings.Contains(config.Address, ":") {
config.Address = config.Address + ":22"
}
}
// XXX: Should this be the user that the client was trying to connect as?
config.ClientConfig.User = "root"
if user, ok := values["user"]; ok {
config.ClientConfig.User = string(user)
}
h.logger.Debugf("Trying to connect with user: %s", config.ClientConfig.User)
if hostKeyStr, ok := values["hostKey"]; ok {
var hostKey ssh.PublicKey
if hostKey, err = ssh.ParsePublicKey([]byte(hostKeyStr)); err != nil {
h.logger.Debugf("Unable to parse public key: %v", err)
return
}
config.ClientConfig.HostKeyCallback = ssh.FixedHostKey(hostKey)
} else {
h.logger.Warnf("No SSH hostKey specified. Secretless will accept any backend host key!")
config.ClientConfig.HostKeyCallback = ssh.InsecureIgnoreHostKey()
}
if privateKeyStr, ok := values["privateKey"]; ok {
var signer ssh.Signer
if signer, err = ssh.ParsePrivateKey([]byte(privateKeyStr)); err != nil {
h.logger.Debugf("Unable to parse private key: %v", err)
return
}
config.ClientConfig.Auth = []ssh.AuthMethod{
ssh.PublicKeys(signer),
}
}
return
}
// Connect opens the connection to the target server and proxies requests
func (h *ServiceConnector) Connect(
credentialValuesByID connector.CredentialValuesByID,
) error {
var err error
var serverConfig ServerConfig
var server ssh.Conn
errors := validation.Errors{}
for _, credential := range [...]string{"address", "privateKey"} {
if _, hasCredential := credentialValuesByID[credential]; !hasCredential {
errors[credential] = fmt.Errorf("must have credential '%s'", credential)
}
}
if err := errors.Filter(); err != nil {
return err
}
if serverConfig, err = h.serverConfig(credentialValuesByID); err != nil {
return fmt.Errorf("could not resolve server config: '%s'", err)
}
if server, err = ssh.Dial(serverConfig.Network, serverConfig.Address, &serverConfig.ClientConfig); err != nil {
return fmt.Errorf("failed to dial SSH backend '%s': %s", serverConfig.Address, err)
}
// Service the incoming Channel channel.
for newChannel := range h.channels {
serverChannel, serverRequests, err := server.OpenChannel(newChannel.ChannelType(), newChannel.ExtraData())
if err != nil {
sshError := err.(*ssh.OpenChannelError)
if err := newChannel.Reject(sshError.Reason, sshError.Message); err != nil {
h.logger.Errorf("Failed to send new channel rejection : %s", err)
}
return err
}
clientChannel, clientRequests, err := newChannel.Accept()
if err != nil {
h.logger.Errorf("Failed to accept client channel : %s", err)
serverChannel.Close()
return err
}
go func() {
for clientRequest := range clientRequests {
h.logger.Debugf("Client request : %s", clientRequest.Type)
ok, err := serverChannel.SendRequest(clientRequest.Type, clientRequest.WantReply, clientRequest.Payload)
if err != nil {
h.logger.Warnf("Failed to send client request to server channel : %s", err)
}
if clientRequest.WantReply {
h.logger.Debugf("Server reply is %v", ok)
}
}
}()
go func() {
for serverRequest := range serverRequests {
h.logger.Debugf("Server request : %s", serverRequest.Type)
ok, err := clientChannel.SendRequest(serverRequest.Type, serverRequest.WantReply, serverRequest.Payload)
if err != nil {
h.logger.Debugf("WARN: Failed to send server request to client channel : %s", err)
}
if serverRequest.WantReply {
h.logger.Debugf("Client reply is %v", ok)
}
}
}()
// This delay is to prevent closing of channels on the other side
// too early when we receive an EOF but have not had the chance to
// pass that on to the client/server.
// TODO: Maybe use a better logic for handling EOF conditions
softDelay := time.Second * 2
go func() {
for {
data := make([]byte, 1024)
len, err := clientChannel.Read(data)
if err == io.EOF {
h.logger.Debugf("Client channel is closed")
time.Sleep(softDelay)
serverChannel.Close()
return
}
_, err = serverChannel.Write(data[0:len])
if err != nil {
h.logger.Debugf("Error writing %d bytes to server channel : %s", len, err)
}
}
}()
go func() {
for {
data := make([]byte, 1024)
len, err := serverChannel.Read(data)
if err == io.EOF {
h.logger.Debugf("Server channel is closed")
time.Sleep(softDelay)
clientChannel.Close()
return
}
_, err = clientChannel.Write(data[0:len])
if err != nil {
h.logger.Debugf("Error writing %d bytes to client channel : %s", len, err)
}
}
}()
}
return nil
}
|
package node
import (
"fmt"
"log"
"net/http"
"github.com/josetom/go-chain/common"
"github.com/josetom/go-chain/core"
)
const (
RequestBalances = "/balances"
RequestTransactions = "/transactions"
RequestAddPeers = "/node/peers"
RequestNodeStatus = "/node/status"
RequestNodeSync = "/node/sync"
QueryParamFromBlock = "fromBlock"
)
type HandlerFunc func(rw http.ResponseWriter, r *http.Request, n *Node)
type ErrRes struct {
Error string `json:"error"`
}
type BalancesRes struct {
Balances map[common.Address]uint `json:"balances"`
Hash common.Hash `json:"block_hash"`
}
type NodeStatusRes struct {
Hash common.Hash `json:"block_hash"`
Number uint64 `json:"block_number"`
Timestamp uint64 `json:"block_timestamp"`
KnownPeers map[string]PeerNode `json:"peers_known"`
PendingTxns []core.Transaction `json:"pending_txns"`
}
type NodeSyncRes struct {
Blocks []core.Block `json:"blocks"`
}
type NodeAddPeerReq struct {
Host string `json:"host"`
IsBootstrap bool `json:"isBootstrap"`
}
type NodeAddPeerRes struct {
Success bool `json:"success"`
Error string `json:"error"`
}
func balancesHandler(w http.ResponseWriter, r *http.Request, node *Node) {
switch r.Method {
case http.MethodGet:
res := BalancesRes{
Balances: node.state.Balances,
Hash: node.state.LatestBlockHash(),
}
writeRes(w, res)
default:
writeErrRes(w, fmt.Errorf("only GET is supported"))
}
}
func transactionsHandler(w http.ResponseWriter, r *http.Request, node *Node) {
switch r.Method {
case http.MethodPost:
reqObject := core.TransactionData{}
err := readReqBody(r, &reqObject)
if err != nil {
writeErrRes(w, err)
return
}
txn := core.NewTransaction(
reqObject.From,
reqObject.To,
reqObject.Value,
reqObject.Data,
)
// TODO : validate txn for balance and throw error
err = node.AddTransaction(txn)
if err != nil {
writeErrRes(w, err)
return
}
writeRes(w, txn)
default:
writeErrRes(w, fmt.Errorf("only POST is supported"))
}
}
func nodeStatusHandler(w http.ResponseWriter, r *http.Request, n *Node) {
switch r.Method {
case http.MethodGet:
res := NodeStatusRes{
Hash: n.state.LatestBlockHash(),
Number: n.state.LatestBlock().Header.Number,
Timestamp: n.state.LatestBlock().Header.Timestamp,
KnownPeers: n.knownPeers,
PendingTxns: txnMapToArray(n.miner.pendingTxns),
}
writeRes(w, res)
default:
writeErrRes(w, fmt.Errorf("only GET is supported"))
}
}
func nodeSyncHandler(w http.ResponseWriter, r *http.Request, node *Node) {
switch r.Method {
case http.MethodGet:
reqHash := r.URL.Query().Get(QueryParamFromBlock)
hash := common.Hash{}
err := hash.UnmarshalText([]byte(reqHash))
if err != nil {
writeErrRes(w, err)
return
}
blocks, err := node.state.GetBlocksAfter(hash)
if err != nil {
writeErrRes(w, err)
return
}
res := NodeSyncRes{
Blocks: blocks,
}
writeRes(w, res)
default:
writeErrRes(w, fmt.Errorf("only GET is supported"))
}
}
func nodePeersHandler(w http.ResponseWriter, r *http.Request, n *Node) {
switch r.Method {
case http.MethodPost:
napq := NodeAddPeerReq{}
err := readReqBody(r, &napq)
if err != nil {
writeErrRes(w, err)
return
}
peer := NewPeerNode(napq.Host, napq.IsBootstrap, true)
n.AddPeer(peer)
log.Println("Added new peer", peer.Host)
res := NodeAddPeerRes{
Success: true,
}
writeRes(w, res)
default:
writeErrRes(w, fmt.Errorf("only POST is supported"))
}
}
|
package exec
import (
"context"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/athena"
"github.com/aws/aws-sdk-go/service/athena/athenaiface"
"github.com/skatsuta/athenai/internal/stub"
"github.com/stretchr/testify/assert"
)
const testWaitInterval = 10 * time.Millisecond
var cfg = &QueryConfig{
Database: "sampledb",
Location: "s3://bucket/prefix/",
Encrypt: "SSE_KMS",
KMS: "test-kms-key",
}
func newQuery(client athenaiface.AthenaAPI, cfg *QueryConfig, query string) *Query {
return NewQuery(client, cfg, query).WithWaitInterval(testWaitInterval)
}
func TestStart(t *testing.T) {
tests := []struct {
id string
query string
want string
}{
{
id: "TestStart1",
query: "SELECT * FROM elb_logs",
want: "TestStart1",
},
}
for _, tt := range tests {
client := stub.NewStartQueryExecutionStub(&stub.Result{ID: tt.id, Query: tt.query})
q := NewQuery(client, cfg, tt.query)
err := q.Start(context.Background())
assert.NoError(t, err)
assert.Equal(t, tt.want, q.id, "Query: %q", tt.query)
}
}
func TestStartError(t *testing.T) {
tests := []struct {
query string
want string
}{
{
query: "",
want: athena.ErrCodeInvalidRequestException,
},
{
query: "SELET * FROM test",
want: athena.ErrCodeInvalidRequestException,
},
{
query: "CREATE INDEX",
want: athena.ErrCodeInvalidRequestException,
},
}
for _, tt := range tests {
client := stub.NewStartQueryExecutionStub(&stub.Result{Query: tt.query})
q := NewQuery(client, cfg, tt.query)
err := q.Start(context.Background())
if assert.Error(t, err) {
assert.Contains(t, err.Error(), tt.want, "Query: %q", tt.query)
}
}
}
func TestStartKMSKeyNotProvidedError(t *testing.T) {
want := "KMS Customer Master Key ID is null or empty"
query := "SELECT * FROM test"
cfg := &QueryConfig{Encrypt: "SSE_KMS"}
client := stub.NewStartQueryExecutionStub(&stub.Result{Query: query})
q := NewQuery(client, cfg, query)
err := q.Start(context.Background())
if assert.Error(t, err) {
assert.Contains(t, err.Error(), want, "Cfg: %#v, Query: %q", cfg, query)
}
}
func TestWait(t *testing.T) {
tests := []struct {
id string
query string
status string
}{
{
id: "TestWait_SELECT",
query: "SELECT * FROM cloudfront_logs",
status: athena.QueryExecutionStateSucceeded,
},
{
id: "TestWait_SHOW_TABLES",
query: "SHOW TABLES",
status: athena.QueryExecutionStateSucceeded,
},
}
for _, tt := range tests {
client := stub.NewGetQueryExecutionStub(&stub.Result{ID: tt.id, Query: tt.query})
q := newQuery(client, cfg, tt.query)
q.id = tt.id
err := q.Wait(context.Background())
assert.NoError(t, err)
assert.NotNil(t, q.Info(), "ID: %s, Query: %s", tt.id, tt.query)
got := aws.StringValue(q.Info().Status.State)
assert.Equal(t, tt.status, got, "ID: %s, Query: %s", tt.id, tt.query)
}
}
func TestWaitFailedError(t *testing.T) {
tests := []struct {
id string
query string
errMsg string
want string
}{
{
id: "",
query: "",
want: "", // Just any error is ok
},
{
id: "TestWaitFailedError_APIError",
query: "SELECT * FROM test_wait_error_table",
errMsg: athena.ErrCodeInternalServerException,
want: athena.ErrCodeInternalServerException,
},
{
id: "TestWaitFailedError_QueryFailed",
query: "SELECT * FROM test_wait_error_table",
want: "failed",
},
}
for _, tt := range tests {
client := stub.NewGetQueryExecutionStub(&stub.Result{
ID: tt.id,
Query: tt.query,
FinalState: stub.Failed,
ErrMsg: tt.errMsg,
})
q := newQuery(client, cfg, tt.query)
q.id = tt.id
err := q.Wait(context.Background())
assert.Error(t, err)
assert.Contains(t, err.Error(), tt.want, "ID: %s, Query: %q, ErrMsg: %q", tt.id, tt.query, tt.errMsg)
}
}
func TestGetResults(t *testing.T) {
tests := []struct {
id string
query string
info *athena.QueryExecution
maxPages int
numRows int
}{
{
id: "TestGetResults1",
query: "SELECT * FROM cloudfront_logs LIMIT 10",
info: &athena.QueryExecution{
Status: &athena.QueryExecutionStatus{
State: aws.String(athena.QueryExecutionStateSucceeded),
},
},
maxPages: 2,
numRows: 10,
},
}
for _, tt := range tests {
client := stub.NewGetQueryResultsStub(&stub.Result{
ID: tt.id,
Query: tt.query,
ResultSet: athena.ResultSet{
ResultSetMetadata: &athena.ResultSetMetadata{},
Rows: []*athena.Row{{}, {}, {}, {}, {}},
},
})
client.MaxPages = tt.maxPages
q := newQuery(client, cfg, tt.query)
q.id = tt.id
q.info = tt.info
err := q.GetResults(context.Background())
assert.NoError(t, err)
assert.Len(t, q.rs.Rows, tt.numRows, "Query: %s, Id: %s", tt.query, tt.id)
}
}
func TestFromQxGetResults(t *testing.T) {
tests := []struct {
qx *athena.QueryExecution
maxPages int
numRows int
}{
{
qx: &athena.QueryExecution{
QueryExecutionId: aws.String("TestFromQxGetResults1"),
Query: aws.String("SELECT * FROM cloudfront_logs LIMIT 10"),
Status: &athena.QueryExecutionStatus{
State: aws.String(athena.QueryExecutionStateSucceeded),
},
},
maxPages: 2,
numRows: 10,
},
}
for _, tt := range tests {
client := stub.NewGetQueryResultsStub(&stub.Result{
ID: aws.StringValue(tt.qx.QueryExecutionId),
Query: aws.StringValue(tt.qx.Query),
ResultSet: athena.ResultSet{
ResultSetMetadata: &athena.ResultSetMetadata{},
Rows: []*athena.Row{{}, {}, {}, {}, {}},
},
})
client.MaxPages = tt.maxPages
q := NewQueryFromQx(client, cfg, tt.qx).WithWaitInterval(testWaitInterval)
err := q.GetResults(context.Background())
assert.NoError(t, err)
assert.Len(t, q.rs.Rows, tt.numRows, "Qx: %#v", tt.qx)
}
}
func TestGetResultsError(t *testing.T) {
tests := []struct {
id string
query string
errMsg string
}{
{
id: "no_existent_id",
query: "SELECT * FROM test_get_result_errors",
errMsg: athena.ErrCodeInvalidRequestException,
},
}
for _, tt := range tests {
client := stub.NewGetQueryResultsStub(&stub.Result{
ID: tt.id,
Query: tt.query,
ErrMsg: tt.errMsg,
})
q := newQuery(client, cfg, tt.query)
q.id = tt.id
err := q.GetResults(context.Background())
assert.Error(t, err)
}
}
func TestRun(t *testing.T) {
tests := []struct {
id string
query string
rs athena.ResultSet
maxPages int
wantNumRows int
}{
{
id: "TestRun1",
query: "SELECT * FROM cloudfront_logs LIMIT 5",
rs: athena.ResultSet{
ResultSetMetadata: &athena.ResultSetMetadata{},
Rows: []*athena.Row{{}, {}, {}, {}, {}},
},
maxPages: 2,
wantNumRows: 10,
},
}
for _, tt := range tests {
client := stub.NewClient(&stub.Result{
ID: tt.id,
Query: tt.query,
ResultSet: tt.rs,
})
client.MaxPages = tt.maxPages
q := newQuery(client, cfg, tt.query)
r, err := q.Run(context.Background())
assert.NoError(t, err)
assert.Len(t, r.rs.Rows, tt.wantNumRows, "Query: %#v, Id: %#v", tt.query, tt.id)
}
}
func TestRunCanceledError(t *testing.T) {
tests := []struct {
id string
query string
want string
}{
{
id: "TestRunCanceledError",
query: "SELECT * FROM test_run_canceled_error_table",
want: "canceled",
},
}
for _, tt := range tests {
client := stub.NewClient(&stub.Result{ID: tt.id, Query: tt.query})
q := newQuery(client, cfg, tt.query)
q.id = tt.id
ctx, cancel := context.WithCancel(context.Background())
cancel()
r, err := q.Run(ctx)
assert.Error(t, err)
assert.Nil(t, r)
assert.Contains(t, err.Error(), tt.want, "ID: %s, Query: %q", tt.id, tt.query)
}
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cache
import (
"context"
"os"
"testing"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/config"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/graph"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/platform"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/schema/latest"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/util"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func stubDependencyLister(dependencies []string) DependencyLister {
return func(context.Context, *latest.Artifact) ([]string, error) {
return dependencies, nil
}
}
var mockCacheHasher = func(s string) (string, error) {
if s == "not-found" {
return "", os.ErrNotExist
}
return s, nil
}
var fakeArtifactConfig = func(a *latest.Artifact) (string, error) {
if a.ArtifactType.DockerArtifact != nil {
return "docker/target=" + a.ArtifactType.DockerArtifact.Target, nil
}
return "", nil
}
const Dockerfile = "Dockerfile"
func TestGetHashForArtifact(t *testing.T) {
tests := []struct {
description string
dependencies []string
artifact *latest.Artifact
mode config.RunMode
platforms platform.Resolver
expected string
}{
{
description: "hash for artifact",
dependencies: []string{"a", "b"},
artifact: &latest.Artifact{},
mode: config.RunModes.Dev,
expected: "d99ab295a682897269b4db0fe7c136ea1ecd542150fa224ee03155b4e3e995d9",
},
{
description: "ignore file not found",
dependencies: []string{"a", "b", "not-found"},
artifact: &latest.Artifact{},
mode: config.RunModes.Dev,
expected: "d99ab295a682897269b4db0fe7c136ea1ecd542150fa224ee03155b4e3e995d9",
},
{
description: "dependencies in different orders",
dependencies: []string{"b", "a"},
artifact: &latest.Artifact{},
mode: config.RunModes.Dev,
expected: "d99ab295a682897269b4db0fe7c136ea1ecd542150fa224ee03155b4e3e995d9",
},
{
description: "no dependencies",
artifact: &latest.Artifact{},
mode: config.RunModes.Dev,
expected: "7c077ca2308714493d07163e1033c4282bd869ff6d477b3e77408587f95e2930",
},
{
description: "docker target",
artifact: &latest.Artifact{
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
Target: "target",
},
},
},
mode: config.RunModes.Dev,
expected: "f947b5aad32734914aa2dea0ec95bceff257037e6c2a529007183c3f21547eae",
},
{
description: "different docker target",
artifact: &latest.Artifact{
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
Target: "other",
},
},
},
mode: config.RunModes.Dev,
expected: "09b366c764d0e39f942283cc081d5522b9dde52e725376661808054e3ed0177f",
},
{
description: "build args",
dependencies: []string{"a", "b"},
artifact: &latest.Artifact{
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
BuildArgs: map[string]*string{
"key": util.Ptr("value"),
},
},
},
},
mode: config.RunModes.Dev,
expected: "f3f710a4ec1d1bfb2a9b8ef2b4b7cc5f254102d17095a71872821b396953a4ce",
},
{
description: "buildpack in dev mode",
dependencies: []string{"a", "b"},
artifact: &latest.Artifact{
ArtifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{},
},
},
mode: config.RunModes.Dev,
expected: "d99ab295a682897269b4db0fe7c136ea1ecd542150fa224ee03155b4e3e995d9",
},
{
description: "buildpack in debug mode",
dependencies: []string{"a", "b"},
artifact: &latest.Artifact{
ArtifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{},
},
},
mode: config.RunModes.Debug,
expected: "c3a878f799b2a6532db71683a09771af4f9d20ef5884c57642a272934e5c93ea",
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&fileHasherFunc, mockCacheHasher)
t.Override(&artifactConfigFunc, fakeArtifactConfig)
if test.artifact.DockerArtifact != nil {
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
test.artifact.Workspace = tmpDir.Path(".")
test.artifact.DockerArtifact.DockerfilePath = Dockerfile
}
depLister := stubDependencyLister(test.dependencies)
actual, err := newArtifactHasher(nil, depLister, test.mode).hash(context.Background(), test.artifact, test.platforms)
t.CheckNoError(err)
t.CheckDeepEqual(test.expected, actual)
})
}
}
func TestGetHashForArtifactWithDependencies(t *testing.T) {
tests := []struct {
description string
artifacts []*latest.Artifact
fileDeps map[string][]string // keyed on artifact ImageName, returns a list of mock file dependencies.
mode config.RunMode
expected string
}{
{
description: "hash for artifact with two dependencies",
artifacts: []*latest.Artifact{
{ImageName: "img1", Dependencies: []*latest.ArtifactDependency{{ImageName: "img2"}, {ImageName: "img3"}}},
{ImageName: "img2", Dependencies: []*latest.ArtifactDependency{{ImageName: "img4"}}, ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target2"}}},
{ImageName: "img3", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target3"}}},
{ImageName: "img4", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target4"}}},
},
fileDeps: map[string][]string{"img1": {"a"}, "img2": {"b"}, "img3": {"c"}, "img4": {"d"}},
mode: config.RunModes.Dev,
expected: "ccd159a9a50853f89ab6784530b58d658a0b349c92828eba335f1074f9a63bb3",
},
{
description: "hash for artifact with two dependencies in different order",
artifacts: []*latest.Artifact{
{ImageName: "img1", Dependencies: []*latest.ArtifactDependency{{ImageName: "img3"}, {ImageName: "img2"}}},
{ImageName: "img2", Dependencies: []*latest.ArtifactDependency{{ImageName: "img4"}}, ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target2"}}},
{ImageName: "img3", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target3"}}},
{ImageName: "img4", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target4"}}},
},
fileDeps: map[string][]string{"img1": {"a"}, "img2": {"b"}, "img3": {"c"}, "img4": {"d"}},
mode: config.RunModes.Dev,
expected: "ccd159a9a50853f89ab6784530b58d658a0b349c92828eba335f1074f9a63bb3",
},
{
description: "hash for artifact with different dependencies (img4 builder changed)",
artifacts: []*latest.Artifact{
{ImageName: "img1", Dependencies: []*latest.ArtifactDependency{{ImageName: "img2"}, {ImageName: "img3"}}},
{ImageName: "img2", Dependencies: []*latest.ArtifactDependency{{ImageName: "img4"}}, ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target2"}}},
{ImageName: "img3", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target3"}}},
{ImageName: "img4", ArtifactType: latest.ArtifactType{BuildpackArtifact: &latest.BuildpackArtifact{Builder: "builder"}}},
},
fileDeps: map[string][]string{"img1": {"a"}, "img2": {"b"}, "img3": {"c"}, "img4": {"d"}},
mode: config.RunModes.Dev,
expected: "26defaa1291289f40b756b83824f0549a3a9c03cca5471bd268f0ac6e499aba6",
},
{
description: "hash for artifact with different dependencies (img4 files changed)",
artifacts: []*latest.Artifact{
{ImageName: "img1", Dependencies: []*latest.ArtifactDependency{{ImageName: "img2"}, {ImageName: "img3"}}},
{ImageName: "img2", Dependencies: []*latest.ArtifactDependency{{ImageName: "img4"}}, ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target2"}}},
{ImageName: "img3", ArtifactType: latest.ArtifactType{DockerArtifact: &latest.DockerArtifact{Target: "target3"}}},
{ImageName: "img4", ArtifactType: latest.ArtifactType{BuildpackArtifact: &latest.BuildpackArtifact{}}},
},
fileDeps: map[string][]string{"img1": {"a"}, "img2": {"b"}, "img3": {"c"}, "img4": {"e"}},
mode: config.RunModes.Dev,
expected: "bab56a88d483fa97ae072b027a46681177628156839b7e390842e6243b1ac6aa",
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&fileHasherFunc, mockCacheHasher)
t.Override(&artifactConfigFunc, fakeArtifactConfig)
g := graph.ToArtifactGraph(test.artifacts)
for _, a := range test.artifacts {
if a.DockerArtifact != nil {
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
a.Workspace = tmpDir.Path(".")
a.DockerArtifact.DockerfilePath = Dockerfile
}
}
depLister := func(_ context.Context, a *latest.Artifact) ([]string, error) {
return test.fileDeps[a.ImageName], nil
}
actual, err := newArtifactHasher(g, depLister, test.mode).hash(context.Background(), test.artifacts[0], platform.Resolver{})
t.CheckNoError(err)
t.CheckDeepEqual(test.expected, actual)
})
}
}
func TestArtifactConfig(t *testing.T) {
testutil.Run(t, "", func(t *testutil.T) {
config1, err := artifactConfig(&latest.Artifact{
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
Target: "target",
},
},
})
t.CheckNoError(err)
config2, err := artifactConfig(&latest.Artifact{
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
Target: "other",
},
},
})
t.CheckNoError(err)
if config1 == config2 {
t.Errorf("configs should be different: [%s] [%s]", config1, config2)
}
})
}
func TestBuildArgs(t *testing.T) {
tests := []struct {
mode config.RunMode
expected string
}{
{
mode: config.RunModes.Debug,
expected: "a8544410acafce64325abfffcb21e75efdcd575bd9f8d3be2a516125ec547651",
},
{
mode: config.RunModes.Dev,
expected: "f5b610f4fea07461411b2ea0e2cddfd2ffc28d1baed49180f5d3acee5a18f9e7",
},
}
for _, test := range tests {
testutil.Run(t, "", func(t *testutil.T) {
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
artifact := &latest.Artifact{
Workspace: tmpDir.Path("."),
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
DockerfilePath: Dockerfile,
BuildArgs: map[string]*string{"one": util.Ptr("1"), "two": util.Ptr("2")},
},
},
}
t.Override(&fileHasherFunc, mockCacheHasher)
t.Override(&artifactConfigFunc, fakeArtifactConfig)
actual, err := newArtifactHasher(nil, stubDependencyLister(nil), test.mode).hash(context.Background(), artifact, platform.Resolver{})
t.CheckNoError(err)
t.CheckDeepEqual(test.expected, actual)
// Change order of buildargs
artifact.ArtifactType.DockerArtifact.BuildArgs = map[string]*string{"two": util.Ptr("2"), "one": util.Ptr("1")}
actual, err = newArtifactHasher(nil, stubDependencyLister(nil), test.mode).hash(context.Background(), artifact, platform.Resolver{})
t.CheckNoError(err)
t.CheckDeepEqual(test.expected, actual)
// Change build args, get different hash
artifact.ArtifactType.DockerArtifact.BuildArgs = map[string]*string{"one": util.Ptr("1")}
actual, err = newArtifactHasher(nil, stubDependencyLister(nil), test.mode).hash(context.Background(), artifact, platform.Resolver{})
t.CheckNoError(err)
if actual == test.expected {
t.Fatal("got same hash as different artifact; expected different hashes.")
}
})
}
}
func TestBuildArgsEnvSubstitution(t *testing.T) {
testutil.Run(t, "", func(t *testutil.T) {
original := util.OSEnviron
defer func() { util.OSEnviron = original }()
util.OSEnviron = func() []string {
return []string{"FOO=bar"}
}
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
artifact := &latest.Artifact{
Workspace: tmpDir.Path("."),
ArtifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
BuildArgs: map[string]*string{"env": util.Ptr("${{.FOO}}")},
DockerfilePath: Dockerfile,
},
},
}
t.Override(&fileHasherFunc, mockCacheHasher)
t.Override(&artifactConfigFunc, fakeArtifactConfig)
depLister := stubDependencyLister([]string{"graph"})
hash1, err := newArtifactHasher(nil, depLister, config.RunModes.Build).hash(context.Background(), artifact, platform.Resolver{})
t.CheckNoError(err)
// Make sure hash is different with a new env
util.OSEnviron = func() []string {
return []string{"FOO=baz"}
}
hash2, err := newArtifactHasher(nil, depLister, config.RunModes.Build).hash(context.Background(), artifact, platform.Resolver{})
t.CheckNoError(err)
if hash1 == hash2 {
t.Fatal("hashes are the same even though build arg changed")
}
})
}
func TestCacheHasher(t *testing.T) {
tests := []struct {
description string
differentHash bool
newFilename string
update func(oldFile string, folder *testutil.TempDir)
}{
{
description: "change filename",
differentHash: true,
newFilename: "newfoo",
update: func(oldFile string, folder *testutil.TempDir) {
folder.Rename(oldFile, "newfoo")
},
},
{
description: "change file contents",
differentHash: true,
update: func(oldFile string, folder *testutil.TempDir) {
folder.Write(oldFile, "newcontents")
},
},
{
description: "change both",
differentHash: true,
newFilename: "newfoo",
update: func(oldFile string, folder *testutil.TempDir) {
folder.Rename(oldFile, "newfoo")
folder.Write(oldFile, "newcontents")
},
},
{
description: "change nothing",
differentHash: false,
update: func(oldFile string, folder *testutil.TempDir) {},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
originalFile := "foo"
originalContents := "contents"
tmpDir := t.NewTempDir().
Write(originalFile, originalContents)
path := originalFile
depLister := stubDependencyLister([]string{tmpDir.Path(originalFile)})
oldHash, err := newArtifactHasher(nil, depLister, config.RunModes.Build).hash(context.Background(), &latest.Artifact{}, platform.Resolver{})
t.CheckNoError(err)
test.update(originalFile, tmpDir)
if test.newFilename != "" {
path = test.newFilename
}
depLister = stubDependencyLister([]string{tmpDir.Path(path)})
newHash, err := newArtifactHasher(nil, depLister, config.RunModes.Build).hash(context.Background(), &latest.Artifact{}, platform.Resolver{})
t.CheckNoError(err)
t.CheckFalse(test.differentHash && oldHash == newHash)
t.CheckFalse(!test.differentHash && oldHash != newHash)
})
}
}
func TestHashBuildArgs(t *testing.T) {
tests := []struct {
description string
artifactType latest.ArtifactType
expected []string
mode config.RunMode
}{
{
description: "docker artifact with build args for dev",
artifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
BuildArgs: map[string]*string{
"foo": util.Ptr("bar"),
},
},
},
mode: config.RunModes.Dev,
expected: []string{"foo=bar"},
}, {
description: "docker artifact with build args for debug",
artifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{
BuildArgs: map[string]*string{
"foo": util.Ptr("bar"),
},
},
},
mode: config.RunModes.Debug,
expected: []string{"SKAFFOLD_GO_GCFLAGS=all=-N -l", "foo=bar"},
}, {
description: "docker artifact without build args for debug",
artifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{},
},
mode: config.RunModes.Debug,
expected: []string{"SKAFFOLD_GO_GCFLAGS=all=-N -l"},
}, {
description: "docker artifact without build args for dev",
artifactType: latest.ArtifactType{
DockerArtifact: &latest.DockerArtifact{},
},
mode: config.RunModes.Dev,
}, {
description: "kaniko artifact with build args",
artifactType: latest.ArtifactType{
KanikoArtifact: &latest.KanikoArtifact{
BuildArgs: map[string]*string{},
},
},
expected: nil,
}, {
description: "kaniko artifact without build args",
artifactType: latest.ArtifactType{
KanikoArtifact: &latest.KanikoArtifact{},
},
}, {
description: "buildpacks artifact with env for dev",
artifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{
Env: []string{"foo=bar"},
},
},
mode: config.RunModes.Dev,
expected: []string{"foo=bar"},
}, {
description: "buildpacks artifact without env for dev",
artifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{},
},
mode: config.RunModes.Dev,
}, {
description: "buildpacks artifact with env for debug",
artifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{
Env: []string{"foo=bar"},
},
},
mode: config.RunModes.Debug,
expected: []string{"GOOGLE_GOGCFLAGS=all=-N -l", "foo=bar"},
}, {
description: "buildpacks artifact without env for debug",
artifactType: latest.ArtifactType{
BuildpackArtifact: &latest.BuildpackArtifact{},
},
mode: config.RunModes.Debug,
expected: []string{"GOOGLE_GOGCFLAGS=all=-N -l"},
}, {
description: "custom artifact, dockerfile dependency, with build args",
artifactType: latest.ArtifactType{
CustomArtifact: &latest.CustomArtifact{
Dependencies: &latest.CustomDependencies{
Dockerfile: &latest.DockerfileDependency{
BuildArgs: map[string]*string{},
},
},
},
},
expected: nil,
}, {
description: "custom artifact, no dockerfile dependency",
artifactType: latest.ArtifactType{
CustomArtifact: &latest.CustomArtifact{
Dependencies: &latest.CustomDependencies{},
},
},
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
a := &latest.Artifact{
ArtifactType: test.artifactType,
}
if test.artifactType.DockerArtifact != nil {
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
a.Workspace = tmpDir.Path(".")
a.ArtifactType.DockerArtifact.DockerfilePath = Dockerfile
}
if test.artifactType.KanikoArtifact != nil {
tmpDir := t.NewTempDir()
tmpDir.Write("./Dockerfile", "ARG SKAFFOLD_GO_GCFLAGS\nFROM foo")
a.Workspace = tmpDir.Path(".")
a.ArtifactType.KanikoArtifact.DockerfilePath = Dockerfile
}
actual, err := hashBuildArgs(a, test.mode)
t.CheckNoError(err)
t.CheckDeepEqual(test.expected, actual)
})
}
}
|
package seaweed
import (
//"strconv"
//"path"
"fmt"
"github.com/qiaogw/pkg/config"
//"path"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSeaweedfs(t *testing.T) {
err := config.LoadConfig()
r := NewSeaweedfs(`test`)
fmt.Println(r.baseURL)
err = r.DeleteDir(`/buckets/buck1/s3data`)
assert.Nil(t, err)
//
//filename := "config.go"
//fpath := `/js11/test1/1config.go`
////
//f, err := os.Open(filename)
//assert.Nil(t, err)
//
//defer f.Close()
//
//fi, err := f.Stat()
//assert.Nil(t, err)
//
//_, _, err = r.Put(fpath, f, fi.Size())
//assert.Nil(t, err)
//
//_, err = r.PathInfo(`/js11`)
//assert.Nil(t, err)
////fmt.Println(pi)
//
//err = r.Rename(fpath, `/github/config.go`)
//assert.Nil(t, err)
////
//fr, _, err := r.Get(fpath)
//assert.Nil(t, err)
//assert.NotZero(t, fr)
//
//info, err := r.FilerInfo(fpath)
//assert.Nil(t, err)
//fmt.Println(info)
//url := r.FileDir(fpath)
////fmt.Println(url)
//
//url = r.URLDir(fpath)
//fmt.Println(url)
//err = r.Delete(fpath)
//assert.Nil(t, err)
//
//err = r.DeleteDir(`/js11/test1`)
//assert.Nil(t, err)
//err = r.DeleteDir(`/js11`)
//assert.Nil(t, err)
}
|
package protocol
// Machine defines on which machine "Job" is gonna be handled.
type Machine struct {
// Provider specifies machine provider, either of [local, ec2, gce, k8s]
Provider string `json:"provider" yaml:"provider"`
// CPU specifies how many CPU cores are required for the "Job"
CPU int `json:"cpu,omitempty" yaml:"cpu,omitempty"`
// Memory specifies how much memory are required for the "Job"
Memory string `json:"memory,omitempty" yaml:"memory,omitempty"`
}
|
package server
import (
"fmt"
"io"
"net"
"testing"
"time"
)
func TestNewNetServer(t *testing.T) {
go func() {
time.Sleep(1 * time.Second)
conn, err := net.Dial("tcp", "localhost:8789")
if err != nil {
t.Errorf("error dial")
}
message := &Message{
Data: []byte("set fwt llh"),
}
message.DataLen = uint32(len(message.Data))
sendData, err := Pack(message)
if err != nil {
t.Errorf("pack error")
}
_, err = conn.Write(sendData)
if err != nil {
t.Errorf("send error")
}
headData := make([]byte, 4)
if _, err := io.ReadFull(conn, headData); err != nil {
return
}
//拆包
msg, err := Unpack(headData)
if err != nil {
return
}
var data []byte
if msg.GetDataLen() > 0 {
data = make([]byte, msg.GetDataLen())
if _, err = io.ReadFull(conn, data); err != nil {
return
}
}
msg.SetData(data)
fmt.Println(string(msg.Data))
message = &Message{
Data: []byte("get fwt"),
}
message.DataLen = uint32(len(message.Data))
sendData, err = Pack(message)
if err != nil {
t.Errorf("pack error")
}
_, err = conn.Write(sendData)
if err != nil {
t.Errorf("send error")
}
headData = make([]byte, 4)
if _, err := io.ReadFull(conn, headData); err != nil {
return
}
//拆包
msg, err = Unpack(headData)
if err != nil {
return
}
if msg.GetDataLen() > 0 {
data = make([]byte, msg.GetDataLen())
if _, err = io.ReadFull(conn, data); err != nil {
return
}
}
msg.SetData(data)
fmt.Println(string(msg.Data))
}()
s := NewServer()
s.Start()
}
|
package internal
import (
"context"
"errors"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/docker/docker/api/types"
"github.com/packethost/pkg/log"
"github.com/stretchr/testify/assert"
)
func setupTestLogger(t *testing.T) log.Logger {
t.Helper()
service := "github.com/tinkerbell/tink"
logger, err := log.Init(service)
if err != nil {
t.Fatal(err)
}
return logger
}
type imagePullerMock struct {
stringReadCloser io.ReadCloser
imagePullErr error
}
func (d *imagePullerMock) ImagePull(_ context.Context, _ string, _ types.ImagePullOptions) (io.ReadCloser, error) {
return d.stringReadCloser, d.imagePullErr
}
func TestPullImageAnyFailure(t *testing.T) {
for _, test := range []struct {
testName string
testString string
testImagePullErr error
testErr error
}{
{
testName: "success",
testString: "{\"status\": \"hello\",\"error\":\"\"}{\"status\":\"world\",\"error\":\"\"}",
testImagePullErr: nil,
testErr: nil,
},
{
testName: "fail",
testString: "{\"error\": \"\"}",
testImagePullErr: errors.New("Tested, failure of the image pull"),
testErr: errors.New("DOCKER PULL: Tested, failure of the image pull"),
},
{
testName: "fail_partial",
testString: "{\"status\": \"hello\",\"error\":\"\"}{\"status\":\"world\",\"error\":\"Tested, failure of No space left on device\"}",
testImagePullErr: nil,
testErr: errors.New("DOCKER PULL: Tested, failure of No space left on device"),
},
} {
t.Run(test.testName, func(t *testing.T) {
ctx := context.Background()
rcon := NewRegistryConnDetails("test", "testUser", "testPwd", setupTestLogger(t))
stringReader := strings.NewReader(test.testString)
cli := &imagePullerMock{
stringReadCloser: ioutil.NopCloser(stringReader),
imagePullErr: test.testImagePullErr,
}
err := rcon.pullImage(ctx, cli, test.testName)
if test.testErr != nil {
assert.Equal(t, err.Error(), test.testErr.Error())
} else {
assert.Equal(t, err, test.testErr)
}
})
}
}
|
package connectioninfo
import (
"context"
"testing"
dynatracev1beta1 "github.com/Dynatrace/dynatrace-operator/src/api/v1beta1"
"github.com/Dynatrace/dynatrace-operator/src/dtclient"
"github.com/Dynatrace/dynatrace-operator/src/scheme"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
const (
testName = "test-name"
testNamespace = "test-namespace"
testTenantToken = "test-token"
testTenantUUID = "test-uuid"
testTenantEndpoints = "test-endpoints"
testOutdated = "outdated"
)
func TestReconcile_ActivegateSecret(t *testing.T) {
dynakube := &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testName,
}}
dtc := &dtclient.MockDynatraceClient{}
dtc.On("GetActiveGateConnectionInfo").Return(getTestActiveGateConnectionInfo(), nil)
dtc.On("GetOneAgentConnectionInfo").Return(getTestOneAgentConnectionInfo(), nil)
t.Run(`create activegate secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActivegateTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testTenantToken), actualSecret.Data[TenantTokenName])
})
t.Run(`update activegate secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateSecret(*dynakube, testOutdated)).Build()
dynakube.Status.DynatraceApi.ResetCachedTimestamps()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActivegateTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testTenantToken), actualSecret.Data[TenantTokenName])
})
t.Run(`check activegate secret caches`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateSecret(*dynakube, testOutdated)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActivegateTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testOutdated), actualSecret.Data[TenantTokenName])
})
t.Run(`up to date activegate secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateSecret(*dynakube, testTenantToken)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
})
}
func buildActiveGateSecret(dynakube dynatracev1beta1.DynaKube, token string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: dynakube.ActivegateTenantSecret(),
Namespace: testNamespace,
},
Data: map[string][]byte{
TenantTokenName: []byte(token),
},
}
}
func TestReconcile_ActivegateConfigMap(t *testing.T) {
dynakube := &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testName,
}}
dtc := &dtclient.MockDynatraceClient{}
dtc.On("GetActiveGateConnectionInfo").Return(getTestActiveGateConnectionInfo(), nil)
dtc.On("GetOneAgentConnectionInfo").Return(getTestOneAgentConnectionInfo(), nil)
t.Run(`create activegate ConfigMap`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActiveGateConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testTenantUUID, actual.Data[TenantUUIDName])
assert.Equal(t, testTenantEndpoints, actual.Data[CommunicationEndpointsName])
})
t.Run(`test activegate ConfigMap cache is used`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateConfigMap(*dynakube, testOutdated, testOutdated)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActiveGateConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testOutdated, actual.Data[TenantUUIDName])
assert.Equal(t, testOutdated, actual.Data[CommunicationEndpointsName])
})
t.Run(`update activegate ConfigMap`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateConfigMap(*dynakube, testOutdated, testOutdated)).Build()
dynakube.Status.DynatraceApi.ResetCachedTimestamps()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.ActiveGateConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testTenantUUID, actual.Data[TenantUUIDName])
assert.Equal(t, testTenantEndpoints, actual.Data[CommunicationEndpointsName])
})
t.Run(`up to date activegate secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildActiveGateConfigMap(*dynakube, testTenantUUID, testTenantEndpoints)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
})
}
func buildActiveGateConfigMap(dynakube dynatracev1beta1.DynaKube, tenantUUID, endpoints string) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: dynakube.ActiveGateConnectionInfoConfigMapName(),
Namespace: testNamespace,
},
Data: map[string]string{
TenantUUIDName: tenantUUID,
CommunicationEndpointsName: endpoints,
},
}
}
func TestReconcile_OneagentSecret(t *testing.T) {
dynakube := &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testName,
Annotations: map[string]string{
dynatracev1beta1.AnnotationFeatureActiveGateRawImage: "false",
},
}}
dtc := &dtclient.MockDynatraceClient{}
dtc.On("GetOneAgentConnectionInfo").Return(getTestOneAgentConnectionInfo(), nil)
t.Run(`create oneagent secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneagentTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testTenantToken), actualSecret.Data[TenantTokenName])
})
t.Run(`update oneagent secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentTenantSecret(*dynakube, testOutdated)).Build()
// responses from the Dynatrace API are cached for 15 minutes, so we need to reset the cache here and assume
// we traveled 15 minutes into the future
dynakube.Status.DynatraceApi.ResetCachedTimestamps()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneagentTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testTenantToken), actualSecret.Data[TenantTokenName])
})
t.Run(`update oneagent secret, check if caches are used`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentTenantSecret(*dynakube, testOutdated)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actualSecret corev1.Secret
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneagentTenantSecret(), Namespace: testNamespace}, &actualSecret)
require.NoError(t, err)
assert.Equal(t, []byte(testOutdated), actualSecret.Data[TenantTokenName])
})
t.Run(`up to date oneagent secret`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentTenantSecret(*dynakube, testTenantToken)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
})
}
func buildOneAgentTenantSecret(dynakube dynatracev1beta1.DynaKube, token string) *corev1.Secret {
return &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: dynakube.OneagentTenantSecret(),
Namespace: testNamespace,
},
Data: map[string][]byte{
TenantTokenName: []byte(token),
},
}
}
func TestReconcile_OneagentConfigMap(t *testing.T) {
dynakube := &dynatracev1beta1.DynaKube{
ObjectMeta: metav1.ObjectMeta{
Namespace: testNamespace,
Name: testName,
Annotations: map[string]string{
dynatracev1beta1.AnnotationFeatureActiveGateRawImage: "false",
},
}}
dtc := &dtclient.MockDynatraceClient{}
dtc.On("GetOneAgentConnectionInfo").Return(getTestOneAgentConnectionInfo(), nil)
t.Run(`create oneagent ConfigMap`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneAgentConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testTenantUUID, actual.Data[TenantUUIDName])
assert.Equal(t, testTenantEndpoints, actual.Data[CommunicationEndpointsName])
})
t.Run(`update oneagent ConfigMap`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentConfigMap(*dynakube, testOutdated, testOutdated)).Build()
dynakube.Status.DynatraceApi.ResetCachedTimestamps()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneAgentConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testTenantUUID, actual.Data[TenantUUIDName])
assert.Equal(t, testTenantEndpoints, actual.Data[CommunicationEndpointsName])
})
t.Run(`update oneagent ConfigMap, check if caches are used`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentConfigMap(*dynakube, testOutdated, testOutdated)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
var actual corev1.ConfigMap
err = fakeClient.Get(context.TODO(), client.ObjectKey{Name: dynakube.OneAgentConnectionInfoConfigMapName(), Namespace: testNamespace}, &actual)
require.NoError(t, err)
assert.Equal(t, testOutdated, actual.Data[TenantUUIDName])
assert.Equal(t, testOutdated, actual.Data[CommunicationEndpointsName])
})
t.Run(`up to date oneagent ConfigMap`, func(t *testing.T) {
fakeClient := fake.NewClientBuilder().WithObjects(buildOneAgentConfigMap(*dynakube, testTenantUUID, testTenantEndpoints)).Build()
r := NewReconciler(context.TODO(), fakeClient, fakeClient, scheme.Scheme, dynakube, dtc)
err := r.Reconcile()
require.NoError(t, err)
})
}
func buildOneAgentConfigMap(dynakube dynatracev1beta1.DynaKube, tenantUUID, tenantEndpoints string) *corev1.ConfigMap {
return &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: dynakube.OneAgentConnectionInfoConfigMapName(),
Namespace: testNamespace,
},
Data: map[string]string{
TenantUUIDName: tenantUUID,
CommunicationEndpointsName: tenantEndpoints,
},
}
}
func getTestOneAgentConnectionInfo() dtclient.OneAgentConnectionInfo {
return dtclient.OneAgentConnectionInfo{
ConnectionInfo: dtclient.ConnectionInfo{
TenantUUID: testTenantUUID,
TenantToken: testTenantToken,
Endpoints: testTenantEndpoints,
},
}
}
func getTestActiveGateConnectionInfo() *dtclient.ActiveGateConnectionInfo {
return &dtclient.ActiveGateConnectionInfo{
ConnectionInfo: dtclient.ConnectionInfo{
TenantUUID: testTenantUUID,
TenantToken: testTenantToken,
Endpoints: testTenantEndpoints,
},
}
}
|
package statefulset
import (
"context"
"encoding/json"
"net/http"
"reflect"
"go.uber.org/zap"
"k8s.io/api/admission/v1beta1"
admissionregistration "k8s.io/api/admissionregistration/v1beta1"
appsv1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
"code.cloudfoundry.org/quarks-operator/pkg/kube/util/monitorednamespace"
wh "code.cloudfoundry.org/quarks-operator/pkg/kube/util/webhook"
"code.cloudfoundry.org/quarks-utils/pkg/config"
"code.cloudfoundry.org/quarks-utils/pkg/logger"
"code.cloudfoundry.org/quarks-utils/pkg/names"
)
// Mutator changes statefulset definitions
type Mutator struct {
log *zap.SugaredLogger
config *config.Config
decoder *admission.Decoder
}
// Implement admission.Handler so the controller can handle admission request.
var _ admission.Handler = &Mutator{}
// NewMutator returns a new reconcile.Reconciler
func NewMutator(log *zap.SugaredLogger, config *config.Config) admission.Handler {
mutatorLog := log.Named("statefulset-rollout-mutator")
mutatorLog.Info("Creating a StatefulSet rollout mutator")
return &Mutator{
log: mutatorLog,
config: config,
}
}
func isControlledRolloutStatefulSet(statefulset *appsv1.StatefulSet) bool {
enabled, ok := statefulset.GetAnnotations()[AnnotationCanaryRolloutEnabled]
return ok && enabled == "true"
}
// Handle set the partion for StatefulSets
func (m *Mutator) Handle(ctx context.Context, req admission.Request) admission.Response {
statefulset := &appsv1.StatefulSet{}
oldStatefulset := &appsv1.StatefulSet{}
err := m.decoder.Decode(req, statefulset)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
if isControlledRolloutStatefulSet(statefulset) {
if req.Operation == v1beta1.Create {
ConfigureStatefulSetForInitialRollout(statefulset)
} else {
err = m.decoder.DecodeRaw(req.OldObject, oldStatefulset)
if err != nil {
return admission.Errored(http.StatusBadRequest, err)
}
m.log.Debugf("Mutator handler ran for statefulset '%s/%s'", statefulset.Namespace, statefulset.Name)
if !reflect.DeepEqual(statefulset.Spec.Template, oldStatefulset.Spec.Template) {
m.log.Debugf("StatefulSet '%s/%s' has changed", statefulset.Namespace, statefulset.Name)
ConfigureStatefulSetForRollout(statefulset)
}
}
}
marshaledStatefulset, err := json.Marshal(statefulset)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
return admission.PatchResponseFromRaw(req.Object.Raw, marshaledStatefulset)
}
// NewStatefulSetRolloutMutator creates a statefulset mutator for setting the partion
func NewStatefulSetRolloutMutator(log *zap.SugaredLogger, config *config.Config) *wh.OperatorWebhook {
log = logger.Unskip(log, "sts-rollout-mutator")
log.Info("Setting up mutator for statefulsets")
mutator := NewMutator(log, config)
globalScopeType := admissionregistration.NamespacedScope
return &wh.OperatorWebhook{
FailurePolicy: admissionregistration.Fail,
Rules: []admissionregistration.RuleWithOperations{
{
Rule: admissionregistration.Rule{
APIGroups: []string{"apps"},
APIVersions: []string{"v1"},
Resources: []string{"statefulsets"},
Scope: &globalScopeType,
},
Operations: []admissionregistration.OperationType{
"CREATE",
"UPDATE",
},
},
},
Path: "/mutate-statefulsets",
Name: "mutate-statefulsets." + names.GroupName,
NamespaceSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
monitorednamespace.LabelNamespace: config.MonitoredID,
},
},
Webhook: &admission.Webhook{
Handler: mutator,
},
}
}
// Validator implements inject.Decoder.
// A decoder will be automatically injected.
var _ admission.DecoderInjector = &Mutator{}
// InjectDecoder injects the decoder.
func (m *Mutator) InjectDecoder(d *admission.Decoder) error {
m.decoder = d
return nil
}
|
// Copyright (c) KwanJunWen
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
package estemplate
import "fmt"
// DatatypeByte Core Datatype for numeric value.
// A signed 8-bit integer with a minimum value of -128 and a maximum value of 127.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/number.html
// for details.
type DatatypeByte struct {
Datatype
name string
copyTo []string
// fields specific to byte datatype
coerce *bool
boost *float32
docValues *bool
ignoreMalformed *bool
index *bool
nullValue *int
store *bool
}
// NewDatatypeByte initializes a new DatatypeByte.
func NewDatatypeByte(name string) *DatatypeByte {
return &DatatypeByte{
name: name,
}
}
// Name returns field key for the Datatype.
func (b *DatatypeByte) Name() string {
return b.name
}
// CopyTo sets the field(s) to copy to which allows the values of multiple fields to be
// queried as a single field.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/copy-to.html
// for details.
func (b *DatatypeByte) CopyTo(copyTo ...string) *DatatypeByte {
b.copyTo = append(b.copyTo, copyTo...)
return b
}
// Coerce sets whether if the field should be coerced, attempting to clean up
// dirty values to fit the datatype. Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/coerce.html
// for details.
func (b *DatatypeByte) Coerce(coerce bool) *DatatypeByte {
b.coerce = &coerce
return b
}
// Boost sets Mapping field-level query time boosting. Defaults to 1.0.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-boost.html
// for details.
func (b *DatatypeByte) Boost(boost float32) *DatatypeByte {
b.boost = &boost
return b
}
// DocValues sets whether if the field should be stored on disk in a column-stride fashion
// so that it can later be used for sorting, aggregations, or scripting.
// Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/doc-values.html
// for details.
func (b *DatatypeByte) DocValues(docValues bool) *DatatypeByte {
b.docValues = &docValues
return b
}
// IgnoreMalformed sets whether if the field should ignore malformed numbers.
// Defaults to false.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/ignore-malformed.html
// for details.
func (b *DatatypeByte) IgnoreMalformed(ignoreMalformed bool) *DatatypeByte {
b.ignoreMalformed = &ignoreMalformed
return b
}
// Index sets whether if the field should be searchable. Defaults to true.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-index.html
// for details.
func (b *DatatypeByte) Index(index bool) *DatatypeByte {
b.index = &index
return b
}
// NullValue sets a numeric value which is substituted for any explicit null values.
// Defaults to null.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/null-value.html
// for details.
func (b *DatatypeByte) NullValue(nullValue int) *DatatypeByte {
b.nullValue = &nullValue
return b
}
// Store sets whether if the field value should be stored and retrievable separately
// from the `_source` field. Defaults to false.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.5/mapping-store.html
// for details.
func (b *DatatypeByte) Store(store bool) *DatatypeByte {
b.store = &store
return b
}
// Validate validates DatatypeByte.
func (b *DatatypeByte) Validate(includeName bool) error {
var invalid []string
if includeName && b.name == "" {
invalid = append(invalid, "Name")
}
if len(invalid) > 0 {
return fmt.Errorf("missing required fields: %v", invalid)
}
return nil
}
// Source returns the serializable JSON for the source builder.
func (b *DatatypeByte) Source(includeName bool) (interface{}, error) {
// {
// "test": {
// "type": "byte",
// "copy_to": ["field_1", "field_2"],
// "coerce": true,
// "boost": 2,
// "doc_values": true,
// "ignore_malformed": true,
// "index": true,
// "null_value": 0,
// "store": true
// }
// }
options := make(map[string]interface{})
options["type"] = "byte"
if len(b.copyTo) > 0 {
var copyTo interface{}
switch {
case len(b.copyTo) > 1:
copyTo = b.copyTo
break
case len(b.copyTo) == 1:
copyTo = b.copyTo[0]
break
default:
copyTo = ""
}
options["copy_to"] = copyTo
}
if b.coerce != nil {
options["coerce"] = b.coerce
}
if b.boost != nil {
options["boost"] = b.boost
}
if b.docValues != nil {
options["doc_values"] = b.docValues
}
if b.ignoreMalformed != nil {
options["ignore_malformed"] = b.ignoreMalformed
}
if b.index != nil {
options["index"] = b.index
}
if b.nullValue != nil {
options["null_value"] = b.nullValue
}
if b.store != nil {
options["store"] = b.store
}
if !includeName {
return options, nil
}
source := make(map[string]interface{})
source[b.name] = options
return source, nil
}
|
package word
import (
"strings"
"unicode"
)
// Word represents a word
type Word struct {
runes []rune
}
// New instantiate word
func New(str string) Word {
return Word{runes: []rune(str)}
}
// Cleanup removes not alphanumeric
func (r Word) Cleanup() Word {
words := strings.FieldsFunc(string(r.runes), isLetterOrNumber)
return Word{runes: []rune(strings.Join(words, ""))}
}
func (r Word) String() string {
return string(r.runes)
}
// Runes return runes
func (r Word) Runes() []rune {
return r.runes
}
func isLetterOrNumber(c rune) bool { return !unicode.IsLetter(c) && !unicode.IsNumber(c) }
|
package console
import "testing"
func TestParse(t *testing.T) {
ok := "23x14"
x, y, err := Parse(ok)
if err != nil {
t.Fatal(err)
}
if x != 23 || y != 14 {
t.Fatalf("bad coordinates: wanted %dx%d have %dx%d", 23, 14, x, y)
}
bad := []string{
"asdfg",
"123x",
"x123",
"132x143x43",
"x8xx6x5x",
}
for _, s := range bad {
_, _, err := Parse(s)
if err == nil {
t.Fatal("invalid input must return an error:", s)
}
}
}
|
package port
import (
"context"
"github.com/shandysiswandi/echo-service/internal/domain"
)
type TodoUsecase interface {
FetchTodos(context.Context) ([]*domain.Todo, error)
GetTodoByID(context.Context, string) (*domain.Todo, error)
CreateTodo(context.Context, domain.TodoCreatePayload) error
UpdateTodoByID(context.Context, domain.TodoUpdatePayload) error
ReplaceTodoByID(context.Context, domain.TodoReplacePayload) error
DeleteTodoByID(context.Context, string) error
}
type TodoRepository interface {
Fetch(context.Context) ([]*domain.Todo, error)
GetByID(context.Context, string) (*domain.Todo, error)
Create(context.Context, domain.Todo) error
UpdateByID(context.Context, domain.Todo) error
ReplaceByID(context.Context, domain.Todo) error
DeleteByID(context.Context, string) error
}
|
package requests
import (
"fmt"
"net/url"
"strings"
"time"
"github.com/google/go-querystring/query"
"github.com/atomicjolt/canvasapi"
"github.com/atomicjolt/string_utils"
)
// ListPlannerItemsPlanner Retrieve the paginated list of objects to be shown on the planner for the
// current user with the associated planner override to override an item's
// visibility if set.
//
// Planner items for a student may also be retrieved by a linked observer. Use
// the path that accepts a user_id and supply the student's id.
// https://canvas.instructure.com/doc/api/planner.html
//
// Query Parameters:
// # Query.StartDate (Optional) Only return items starting from the given date.
// The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
// # Query.EndDate (Optional) Only return items up to the given date.
// The value should be formatted as: yyyy-mm-dd or ISO 8601 YYYY-MM-DDTHH:MM:SSZ.
// # Query.ContextCodes (Optional) List of context codes of courses and/or groups whose items you want to see.
// If not specified, defaults to all contexts associated to the current user.
// Note that concluded courses will be ignored unless specified in the includes[]
// parameter. The format of this field is the context type, followed by an underscore,
// followed by the context id. For example: course_42, group_123
// # Query.Filter (Optional) . Must be one of new_activityOnly return items that have new or unread activity
//
type ListPlannerItemsPlanner struct {
Query struct {
StartDate time.Time `json:"start_date" url:"start_date,omitempty"` // (Optional)
EndDate time.Time `json:"end_date" url:"end_date,omitempty"` // (Optional)
ContextCodes []string `json:"context_codes" url:"context_codes,omitempty"` // (Optional)
Filter string `json:"filter" url:"filter,omitempty"` // (Optional) . Must be one of new_activity
} `json:"query"`
}
func (t *ListPlannerItemsPlanner) GetMethod() string {
return "GET"
}
func (t *ListPlannerItemsPlanner) GetURLPath() string {
return ""
}
func (t *ListPlannerItemsPlanner) GetQuery() (string, error) {
v, err := query.Values(t.Query)
if err != nil {
return "", err
}
return v.Encode(), nil
}
func (t *ListPlannerItemsPlanner) GetBody() (url.Values, error) {
return nil, nil
}
func (t *ListPlannerItemsPlanner) GetJSON() ([]byte, error) {
return nil, nil
}
func (t *ListPlannerItemsPlanner) HasErrors() error {
errs := []string{}
if t.Query.Filter != "" && !string_utils.Include([]string{"new_activity"}, t.Query.Filter) {
errs = append(errs, "Filter must be one of new_activity")
}
if len(errs) > 0 {
return fmt.Errorf(strings.Join(errs, ", "))
}
return nil
}
func (t *ListPlannerItemsPlanner) Do(c *canvasapi.Canvas) error {
_, err := c.SendRequest(t)
if err != nil {
return err
}
return nil
}
|
package candishared
import "math"
// Result common output
type Result struct {
Data interface{}
Error error
}
// SliceResult include meta
type SliceResult struct {
Data interface{}
Meta Meta
}
// Meta model
type Meta struct {
Page int `json:"page"`
Limit int `json:"limit"`
TotalRecords int `json:"totalRecords"`
TotalPages int `json:"totalPages"`
}
// NewMeta create new meta for slice data
func NewMeta(page, limit, totalRecords int) *Meta {
var m Meta
m.Page, m.Limit, m.TotalRecords = page, limit, totalRecords
m.CalculatePages()
return &m
}
// CalculatePages meta method
func (m *Meta) CalculatePages() {
m.TotalPages = int(math.Ceil(float64(m.TotalRecords) / float64(m.Limit)))
}
// ToResolver graphql
func (m *Meta) ToResolver() *MetaResolver {
return &MetaResolver{
Page: int32(m.Page), Limit: int32(m.Limit), TotalRecords: int32(m.TotalRecords), TotalPages: int32(m.TotalPages),
}
}
// MetaResolver model for graphql resolver, graphql doesn't support int64 data type (https://github.com/graphql/graphql-spec/issues/73)
type MetaResolver struct {
Page int32 `json:"page"`
Limit int32 `json:"limit"`
TotalRecords int32 `json:"totalRecords"`
TotalPages int32 `json:"totalPages"`
}
|
package pipelinerun
import (
"testing"
"k8s.io/apimachinery/pkg/util/diff"
)
func TestDashboardURL(t *testing.T) {
for _, tc := range []struct {
detailsURLAnnotation string
wantDetailsURL string
}{
{
detailsURLAnnotation: "https://tekton.dev",
wantDetailsURL: "https://tekton.dev",
},
{
detailsURLAnnotation: "https://dashboard.dogfooding.tekton.dev/#/namespaces/{{ .Namespace }}/pipelineruns/{{ .Name }}",
wantDetailsURL: "https://dashboard.dogfooding.tekton.dev/#/namespaces/test-namespace/pipelineruns/test-pipeline-run",
},
{
detailsURLAnnotation: "https://console-openshift-console.apps-crc.testing/k8s/ns/{{ .Namespace }}/{{ .Group }}~{{ .Version }}~{{ .Kind }}/{{ .Name }}",
wantDetailsURL: "https://console-openshift-console.apps-crc.testing/k8s/ns/test-namespace/tekton.dev~v1beta1~PipelineRun/test-pipeline-run",
},
} {
t.Run(tc.detailsURLAnnotation, func(t *testing.T) {
pr := makePipelineRunWithResources()
pr.Annotations = make(map[string]string)
pr.Annotations[statusTargetURLName] = tc.detailsURLAnnotation
url := dashboardURL(pr)
if tc.wantDetailsURL != url {
t.Errorf("-want,+got:\n%s", diff.StringDiff(tc.wantDetailsURL, url))
}
})
}
}
|
// Copyright 2009 The Go Authors. All rights reserved.
// Copyright 2019 The gVisor Authors.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This is mostly copied from the standard library's sync/rwmutex.go.
//
// Happens-before relationships indicated to the race detector:
// - Unlock -> Lock (via writerSem)
// - Unlock -> RLock (via readerSem)
// - RUnlock -> Lock (via writerSem)
// - DowngradeLock -> RLock (via readerSem)
package sync
import (
"sync/atomic"
"unsafe"
)
// CrossGoroutineRWMutex is equivalent to RWMutex, but it need not be unlocked
// by a the same goroutine that locked the mutex.
type CrossGoroutineRWMutex struct {
// w is held if there are pending writers
//
// We use CrossGoroutineMutex rather than Mutex because the lock
// annotation instrumentation in Mutex will trigger false positives in
// the race detector when called inside of RaceDisable.
w CrossGoroutineMutex
writerSem uint32 // semaphore for writers to wait for completing readers
readerSem uint32 // semaphore for readers to wait for completing writers
readerCount int32 // number of pending readers
readerWait int32 // number of departing readers
}
const rwmutexMaxReaders = 1 << 30
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryRLock() bool {
if RaceEnabled {
RaceDisable()
}
for {
rc := atomic.LoadInt32(&rw.readerCount)
if rc < 0 {
if RaceEnabled {
RaceEnable()
}
return false
}
if !atomic.CompareAndSwapInt32(&rw.readerCount, rc, rc+1) {
continue
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.readerSem))
}
return true
}
}
// RLock locks rw for reading.
//
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) RLock() {
if RaceEnabled {
RaceDisable()
}
if atomic.AddInt32(&rw.readerCount, 1) < 0 {
// A writer is pending, wait for it.
semacquire(&rw.readerSem)
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.readerSem))
}
}
// RUnlock undoes a single RLock call.
//
// Preconditions:
// - rw is locked for reading.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) RUnlock() {
if RaceEnabled {
RaceReleaseMerge(unsafe.Pointer(&rw.writerSem))
RaceDisable()
}
if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 {
if r+1 == 0 || r+1 == -rwmutexMaxReaders {
panic("RUnlock of unlocked RWMutex")
}
// A writer is pending.
if atomic.AddInt32(&rw.readerWait, -1) == 0 {
// The last reader unblocks the writer.
semrelease(&rw.writerSem, false, 0)
}
}
if RaceEnabled {
RaceEnable()
}
}
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
}
// First, resolve competition with other writers.
if !rw.w.TryLock() {
if RaceEnabled {
RaceEnable()
}
return false
}
// Only proceed if there are no readers.
if !atomic.CompareAndSwapInt32(&rw.readerCount, 0, -rwmutexMaxReaders) {
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
return false
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.writerSem))
}
return true
}
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
// +checklocksignore
func (rw *CrossGoroutineRWMutex) Lock() {
if RaceEnabled {
RaceDisable()
}
// First, resolve competition with other writers.
rw.w.Lock()
// Announce to readers there is a pending writer.
r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders
// Wait for active readers.
if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 {
semacquire(&rw.writerSem)
}
if RaceEnabled {
RaceEnable()
RaceAcquire(unsafe.Pointer(&rw.writerSem))
}
}
// Unlock unlocks rw for writing.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
}
// Announce to readers there is no active writer.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders)
if r >= rwmutexMaxReaders {
panic("Unlock of unlocked RWMutex")
}
// Unblock blocked readers, if any.
for i := 0; i < int(r); i++ {
semrelease(&rw.readerSem, false, 0)
}
// Allow other writers to proceed.
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *CrossGoroutineRWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
RaceDisable()
}
// Announce to readers there is no active writer and one additional reader.
r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders+1)
if r >= rwmutexMaxReaders+1 {
panic("DowngradeLock of unlocked RWMutex")
}
// Unblock blocked readers, if any. Note that this loop starts as 1 since r
// includes this goroutine.
for i := 1; i < int(r); i++ {
semrelease(&rw.readerSem, false, 0)
}
// Allow other writers to proceed to rw.w.Lock(). Note that they will still
// block on rw.writerSem since at least this reader exists, such that
// DowngradeLock() is atomic with the previous write lock.
rw.w.Unlock()
if RaceEnabled {
RaceEnable()
}
}
// A RWMutex is a reader/writer mutual exclusion lock. The lock can be held by
// an arbitrary number of readers or a single writer. The zero value for a
// RWMutex is an unlocked mutex.
//
// A RWMutex must not be copied after first use.
//
// If a goroutine holds a RWMutex for reading and another goroutine might call
// Lock, no goroutine should expect to be able to acquire a read lock until the
// initial read lock is released. In particular, this prohibits recursive read
// locking. This is to ensure that the lock eventually becomes available; a
// blocked Lock call excludes new readers from acquiring the lock.
//
// A Mutex must be unlocked by the same goroutine that locked it. This
// invariant is enforced with the 'checklocks' build tag.
type RWMutex struct {
m CrossGoroutineRWMutex
}
// TryRLock locks rw for reading. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *RWMutex) TryRLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
locked := rw.m.TryRLock()
if !locked {
noteUnlock(unsafe.Pointer(rw))
}
return locked
}
// RLock locks rw for reading.
//
// It should not be used for recursive read locking; a blocked Lock call
// excludes new readers from acquiring the lock. See the documentation on the
// RWMutex type.
// +checklocksignore
func (rw *RWMutex) RLock() {
noteLock(unsafe.Pointer(rw))
rw.m.RLock()
}
// RUnlock undoes a single RLock call.
//
// Preconditions:
// - rw is locked for reading.
// - rw was locked by this goroutine.
//
// +checklocksignore
func (rw *RWMutex) RUnlock() {
rw.m.RUnlock()
noteUnlock(unsafe.Pointer(rw))
}
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
// +checklocksignore
func (rw *RWMutex) TryLock() bool {
// Note lock first to enforce proper locking even if unsuccessful.
noteLock(unsafe.Pointer(rw))
locked := rw.m.TryLock()
if !locked {
noteUnlock(unsafe.Pointer(rw))
}
return locked
}
// Lock locks rw for writing. If the lock is already locked for reading or
// writing, Lock blocks until the lock is available.
// +checklocksignore
func (rw *RWMutex) Lock() {
noteLock(unsafe.Pointer(rw))
rw.m.Lock()
}
// Unlock unlocks rw for writing.
//
// Preconditions:
// - rw is locked for writing.
// - rw was locked by this goroutine.
//
// +checklocksignore
func (rw *RWMutex) Unlock() {
rw.m.Unlock()
noteUnlock(unsafe.Pointer(rw))
}
// DowngradeLock atomically unlocks rw for writing and locks it for reading.
//
// Preconditions:
// - rw is locked for writing.
//
// +checklocksignore
func (rw *RWMutex) DowngradeLock() {
// No note change for DowngradeLock.
rw.m.DowngradeLock()
}
|
package mapreduce
import "fmt"
// schedule starts and waits for all tasks in the given phase (Map or Reduce).
func (mr *Master) schedule(phase jobPhase) {
var ntasks int
var nios int // number of inputs (for reduce) or outputs (for map)
switch phase {
case mapPhase:
ntasks = len(mr.files)
nios = mr.nReduce
for i:=0; i < ntasks; i++ {
worker := <- mr.registerChannel
task := new(DoTaskArgs)
task.Phase = phase
task.JobName = mr.jobName
task.NumOtherPhase = nios
task.File = mr.files[i]
task.TaskNumber = i
success := call(worker, "Worker.DoTask", task, new(struct{}))
if success == true {
go func () {
mr.registerChannel <- worker
}()
} else{
i = i - 1
fmt.Printf("doMap ERROR\n")
}
}
case reducePhase:
ntasks = mr.nReduce
nios = len(mr.files)
for i:=0; i < ntasks; i++ {
worker := <- mr.registerChannel
task := new(DoTaskArgs)
task.Phase = phase
task.JobName = mr.jobName
task.TaskNumber = i
task.NumOtherPhase = nios
success := call(worker, "Worker.DoTask", task, new(struct{}))
if success == true {
go func () {
mr.registerChannel <- worker
}()
} else{
i = i - 1
fmt.Printf("doReduce ERROR!\n")
}
}
}
fmt.Printf("Schedule: %v %v tasks (%d I/Os)\n", ntasks, phase, nios)
// All ntasks tasks have to be scheduled on workers, and only once all of
// them have been completed successfully should the function return.
// Remember that workers may fail, and that any given worker may finish
// multiple tasks.
//
// TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO TODO
//
fmt.Printf("Schedule: %v phase done\n", phase)
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
// +build !windows,!freebsd,!dragonfly,!darwin
package server
import "golang.org/x/sys/unix"
func setRlimitNoFile(limits *rlimit) error {
return unix.Setrlimit(unix.RLIMIT_NOFILE, (*unix.Rlimit)(limits))
}
func getRlimitNoFile(limits *rlimit) error {
return unix.Getrlimit(unix.RLIMIT_NOFILE, (*unix.Rlimit)(limits))
}
|
package leetcode_go
func reverseList(head *ListNode) *ListNode {
cur := head
var pre, temp *ListNode
for cur != nil {
temp = cur.Next
cur.Next = pre
pre = cur
cur = temp
}
return pre
}
|
package main
import "fmt"
func main() {
switch "Medhi" {
case "Medhi", "Daniel":
fmt.Println("Sup Medhi and Daniel")
case "Jacob":
fmt.Println("Sup Jacob")
default:
fmt.Println("No matches")
}
}
|
package api
import (
"net"
"time"
"github.com/kklin/quilt-dev/db"
"github.com/kklin/quilt-dev/api/pb"
"github.com/kklin/quilt-dev/api/util"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Client provides methods to interact with the Quilt daemon.
type Client struct {
pbClient pb.APIClient
}
// NewClient creates a new Quilt client connected to `lAddr`.
func NewClient(lAddr string) (Client, error) {
proto, addr, err := util.ParseListenAddress(lAddr)
if err != nil {
return Client{}, err
}
dialer := func(dialAddr string, timeout time.Duration) (net.Conn, error) {
return net.DialTimeout(proto, dialAddr, timeout)
}
cc, err := grpc.Dial(addr, grpc.WithDialer(dialer), grpc.WithInsecure())
if err != nil {
return Client{}, err
}
pbClient := pb.NewAPIClient(cc)
return Client{pbClient: pbClient}, nil
}
// QueryMachines retrieves the machines tracked by the Quilt daemon.
func (c Client) QueryMachines() ([]db.Machine, error) {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
reply, err := c.pbClient.QueryMachines(ctx, &pb.DBQuery{})
if err != nil {
return []db.Machine{}, err
}
var dbMachines []db.Machine
for _, pbMachine := range reply.Machines {
dbMachines = append(dbMachines, convertMachine(*pbMachine))
}
return dbMachines, nil
}
func convertMachine(pbMachine pb.Machine) db.Machine {
return db.Machine{
ID: int(pbMachine.ID),
Role: db.Role(pbMachine.Role),
Provider: db.Provider(pbMachine.Provider),
Region: pbMachine.Region,
Size: pbMachine.Size,
DiskSize: int(pbMachine.DiskSize),
SSHKeys: pbMachine.SSHKeys,
CloudID: pbMachine.CloudID,
PublicIP: pbMachine.PublicIP,
PrivateIP: pbMachine.PrivateIP,
Connected: pbMachine.Connected,
}
}
// QueryContainers retrieves the containers tracked by the Quilt daemon.
func (c Client) QueryContainers() ([]db.Container, error) {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
reply, err := c.pbClient.QueryContainers(ctx, &pb.DBQuery{})
if err != nil {
return []db.Container{}, err
}
var dbContainers []db.Container
for _, pbContainer := range reply.Containers {
dbContainers = append(dbContainers, convertContainer(*pbContainer))
}
return dbContainers, nil
}
func convertContainer(pbContainer pb.Container) db.Container {
return db.Container{
ID: int(pbContainer.ID),
DockerID: pbContainer.DockerID,
Image: pbContainer.Image,
Command: pbContainer.Command,
Labels: pbContainer.Labels,
}
}
// RunStitch makes a request to the Quilt daemon to execute the given stitch.
func (c Client) RunStitch(stitch string) error {
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
if _, err := c.pbClient.Run(ctx, &pb.RunRequest{Stitch: stitch}); err != nil {
return err
}
return nil
}
|
package service
import (
"context"
"google.golang.org/grpc/status"
"login/model"
"shared/utility/errors"
"shared/utility/glog"
"shared/protobuf/pb"
)
type RPCLoginHandler struct {
pb.UnimplementedLoginServer
}
func (RPCLoginHandler) CheckToken(ctx context.Context, req *pb.CheckTokenReq) (*pb.CheckTokenResp, error) {
// login, err := model.GetLoginAccountByUserID(ctx, req.UserID)
// if err != nil {
// return nil, err
// }
token, err := model.GetToken(ctx, req.UserID)
if err != nil {
return nil, err
}
return &pb.CheckTokenResp{
OK: token == req.Token,
}, nil
}
func (RPCLoginHandler) Test(ctx context.Context, req *pb.LoginTestReq) (*pb.LoginTestResp, error) {
if req.Id == 0 {
return nil, status.Errorf(2100, "test error!")
}
if req.Id == 1 {
errors.SetDefaultCode(1000)
err := errors.NewCode(2200, "test error2!")
if s, ok := err.(interface {
GRPCStatus() *status.Status
}); ok {
glog.Infof("code :%d", s.GRPCStatus().Code())
}
s, ok := status.FromError(err)
if !ok {
glog.Errorf("xxxxxxxxxx!ok!ok!ok")
}
glog.Infof("code :%d, msg: %s", s.Code(), s.Message())
err = errors.WrapTrace(err)
err = errors.WrapText(err, "123")
err = errors.WrapTrace(err)
err = errors.Wrap(err, "456")
err = errors.WrapTrace(err)
return nil, err
}
return &pb.LoginTestResp{
Msg: req.Msg,
Id: req.Id,
}, nil
}
|
package main
import (
"fmt"
"github.com/kataras/iris"
"github.com/iris-contrib/middleware/logger"
)
func main() {
fmt.Print("USER MANAGEMENT START")
api := iris.New()
api.Use(logger.New())
api.Get("/", func(ctx *iris.Context) {
ctx.JSON(iris.StatusOK,iris.Map{"status":true})
})
user := api.Party("/user")
user.Get("/",getMyProfile)
user.Get("/:id",getUser)
user.Post("/login",login)
user.Post("/forgot",forgot)
user.Post("/register",register)
user.Post("/update/password",updatePassword)
user.Post("/update/profile",updateProfile)
admin := api.Party("/admin")
admin.Post("/create",createUser)
admin.Get("/list/:page",listUser)
admin.Get("/:id",getUserAdmin)
admin.Post("/delete",deleteUser)
admin.Post("/update/field",updateField)
api.Listen(":8080")
}
|
package util
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/cyberark/secretless-broker/bin/juxtaposer/timing"
)
func TestGetStandardDeviation(t *testing.T) {
t.Run("nil input", func(t *testing.T) {
input := &map[int]int{}
res := GetStandardDeviation(input)
assert.Equal(t, res, 0.0)
})
t.Run("empty input", func(t *testing.T) {
input := &map[int]int{}
res := GetStandardDeviation(input)
assert.Equal(t, res, 0.0)
})
t.Run("valid input", func(t *testing.T) {
input := &map[int]int{
1: 2,
2: 3,
3: 4,
}
res := GetStandardDeviation(input)
// ((1 - 2.22...)^2 * 2 + (2 - 2.22...)^2 * 3 + (3 - 2.22...)^2 * 4) / (2 + 3 + 4 - 1))^0.5 = 0.833...
assert.InDelta(t, res, 0.833, 0.01)
})
}
func TestGetMean(t *testing.T) {
t.Run("nil input", func(t *testing.T) {
input := &map[int]int{}
res := GetMean(input)
assert.Equal(t, res, 0.0)
})
t.Run("empty input", func(t *testing.T) {
input := &map[int]int{}
res := GetMean(input)
assert.Equal(t, res, 0.0)
})
t.Run("valid input", func(t *testing.T) {
input := &map[int]int{
1: 2,
2: 3,
3: 4,
}
res := GetMean(input)
// (1 * 2 + 2 * 3 + 3 * 4) / (2 + 3 + 4) = 2.22...
assert.InDelta(t, res, 2.22, 0.01)
})
}
func TestGetAverageDuration(t *testing.T) {
t.Run("empty input", func(t *testing.T) {
input := &timing.BackendTiming{}
res := GetAverageDuration(input)
assert.Equal(t, res, time.Duration(0))
})
t.Run("nil input", func(t *testing.T) {
input := &timing.BackendTiming{}
res := GetAverageDuration(input)
assert.Equal(t, res, time.Duration(0))
})
t.Run("valid input result is rounded down", func(t *testing.T) {
input := &timing.BackendTiming{
Count: 20,
Duration: time.Duration(50),
Errors: make([]timing.TestRunError, 4),
}
res := GetAverageDuration(input)
// 50 / (20 - 4) = 3.125
assert.Equal(t, res, time.Duration(3))
})
}
func Test_getMappedDataPointCount(t *testing.T) {
t.Run("empty input", func(t *testing.T) {
input := &map[int]int{}
res := getMappedDataPointCount(input)
assert.Equal(t, res, 0)
})
t.Run("nil input", func(t *testing.T) {
res := getMappedDataPointCount(nil)
assert.Equal(t, res, 0)
})
t.Run("valid input", func(t *testing.T) {
input := &map[int]int{
0: 1,
5: 2,
6: 3,
}
res := getMappedDataPointCount(input)
// 1 + 2 + 3 = 6
assert.Equal(t, res, 6)
})
}
|
package rcl
import (
dlog "github.com/dyweb/gommon/log"
)
const (
// Extension is file extension for RCL without dot
Extension = "rcl"
// DotExtension is '.' + Extension
DotExtension = ".rcl"
)
var (
logReg = dlog.NewRegistry()
log = logReg.Logger()
)
|
package main
import (
"bytes"
"crypto/tls"
"encoding/gob"
"fmt"
"log"
"net"
"os"
"sync"
"time"
)
func serve(app *config) {
if app.tls && !fileExists(app.tlsKey) {
log.Printf("key file not found: %s - disabling TLS", app.tlsKey)
app.tls = false
}
if app.tls && !fileExists(app.tlsCert) {
log.Printf("cert file not found: %s - disabling TLS", app.tlsCert)
app.tls = false
}
var wg sync.WaitGroup
for _, h := range app.listeners {
hh := appendPortIfMissing(h, app.defaultPort)
listenTCP(app, &wg, hh)
listenUDP(app, &wg, hh)
}
wg.Wait()
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func listenTCP(app *config, wg *sync.WaitGroup, h string) {
log.Printf("listenTCP: TLS=%v spawning TCP listener: %s", app.tls, h)
// first try TLS
if app.tls {
listener, errTLS := listenTLS(app, h)
if errTLS == nil {
spawnAcceptLoopTCP(app, wg, listener, true)
return
}
log.Printf("listenTLS: %v", errTLS)
// TLS failed, try plain TCP
}
listener, errListen := net.Listen("tcp", h)
if errListen != nil {
log.Printf("listenTCP: TLS=%v %s: %v", app.tls, h, errListen)
return
}
spawnAcceptLoopTCP(app, wg, listener, false)
}
func spawnAcceptLoopTCP(app *config, wg *sync.WaitGroup, listener net.Listener, isTLS bool) {
wg.Add(1)
go handleTCP(app, wg, listener, isTLS)
}
func listenTLS(app *config, h string) (net.Listener, error) {
cert, errCert := tls.LoadX509KeyPair(app.tlsCert, app.tlsKey)
if errCert != nil {
log.Printf("listenTLS: failure loading TLS key pair: %v", errCert)
app.tls = false // disable TLS
return nil, errCert
}
config := &tls.Config{Certificates: []tls.Certificate{cert}}
listener, errListen := tls.Listen("tcp", h, config)
return listener, errListen
}
func listenUDP(app *config, wg *sync.WaitGroup, h string) {
log.Printf("serve: spawning UDP listener: %s", h)
udpAddr, errAddr := net.ResolveUDPAddr("udp", h)
if errAddr != nil {
log.Printf("listenUDP: bad address: %s: %v", h, errAddr)
return
}
conn, errListen := net.ListenUDP("udp", udpAddr)
if errListen != nil {
log.Printf("net.ListenUDP: %s: %v", h, errListen)
return
}
wg.Add(1)
go handleUDP(app, wg, conn)
}
func appendPortIfMissing(host, port string) string {
LOOP:
for i := len(host) - 1; i >= 0; i-- {
c := host[i]
switch c {
case ']':
break LOOP
case ':':
/*
if i == len(host)-1 {
return host[:len(host)-1] + port // drop repeated :
}
*/
return host
}
}
return host + port
}
func handleTCP(app *config, wg *sync.WaitGroup, listener net.Listener, isTLS bool) {
defer wg.Done()
var id int
var aggReader aggregate
var aggWriter aggregate
for {
conn, errAccept := listener.Accept()
if errAccept != nil {
log.Printf("handle: accept: %v", errAccept)
break
}
go handleConnection(conn, id, 0, isTLS, &aggReader, &aggWriter)
id++
}
}
type udpInfo struct {
remote *net.UDPAddr
opt options
acc *account
start time.Time
id int
}
func handleUDP(app *config, wg *sync.WaitGroup, conn *net.UDPConn) {
defer wg.Done()
tab := map[string]*udpInfo{}
buf := make([]byte, app.opt.UDPReadSize)
var aggReader aggregate
var aggWriter aggregate
var idCount int
for {
var info *udpInfo
n, src, errRead := conn.ReadFromUDP(buf)
if src == nil {
log.Printf("handleUDP: read nil src: error: %v", errRead)
continue
}
var found bool
info, found = tab[src.String()]
if !found {
log.Printf("handleUDP: incoming: %v", src)
info = &udpInfo{
remote: src,
acc: &account{},
start: time.Now(),
id: idCount,
}
idCount++
info.acc.prevTime = info.start
tab[src.String()] = info
dec := gob.NewDecoder(bytes.NewBuffer(buf[:n]))
if errOpt := dec.Decode(&info.opt); errOpt != nil {
log.Printf("handleUDP: options failure: %v", errOpt)
continue
}
log.Printf("handleUDP: options received: %v", info.opt)
if !info.opt.PassiveServer {
opt := info.opt // copy for gorouting
go serverWriterTo(conn, opt, src, info.acc, info.id, 0, &aggWriter)
}
continue
}
connIndex := fmt.Sprintf("%d/%d", info.id, 0)
if errRead != nil {
log.Printf("handleUDP: %s read error: %s: %v", connIndex, src, errRead)
continue
}
if time.Since(info.start) > info.opt.TotalDuration {
log.Printf("handleUDP: total duration %s timer: %s", info.opt.TotalDuration, src)
info.acc.average(info.start, connIndex, "handleUDP", "rcv/s", &aggReader)
log.Printf("handleUDP: FIXME: remove idle udp entry from udp table")
continue
}
// account read from UDP socket
info.acc.update(n, info.opt.ReportInterval, connIndex, "handleUDP", "rcv/s", nil)
}
}
func handleConnection(conn net.Conn, c, connections int, isTLS bool, aggReader, aggWriter *aggregate) {
defer conn.Close()
log.Printf("handleConnection: incoming: %s %v", protoLabel(isTLS), conn.RemoteAddr())
// receive options
var opt options
dec := gob.NewDecoder(conn)
if errOpt := dec.Decode(&opt); errOpt != nil {
if isTLS {
log.Printf("handleConnection: options failure: %v", errOpt)
} else {
log.Printf("handleConnection: options failure - it might be client attempting our (disabled) TLS first: %v", errOpt)
}
return
}
log.Printf("handleConnection: options received: %v", opt)
if clientVersion, ok := opt.Table["clientVersion"]; ok {
log.Printf("handleConnection: clientVersion=%s", clientVersion)
}
// send ack
a := newAck()
if errAck := ackSend(false, conn, a); errAck != nil {
log.Printf("handleConnection: sending ack: %v", errAck)
return
}
go serverReader(conn, opt, c, connections, isTLS, aggReader)
if !opt.PassiveServer {
go serverWriter(conn, opt, c, connections, isTLS, aggWriter)
}
tickerPeriod := time.NewTimer(opt.TotalDuration)
<-tickerPeriod.C
log.Printf("handleConnection: %v timer", opt.TotalDuration)
tickerPeriod.Stop()
log.Printf("handleConnection: closing: %v", conn.RemoteAddr())
}
func serverReader(conn net.Conn, opt options, c, connections int, isTLS bool, agg *aggregate) {
log.Printf("serverReader: starting: %s %v", protoLabel(isTLS), conn.RemoteAddr())
connIndex := fmt.Sprintf("%d/%d", c, connections)
buf := make([]byte, opt.TCPReadSize)
workLoop(connIndex, "serverReader", "rcv/s", conn.Read, buf, opt.ReportInterval, 0, nil, agg)
log.Printf("serverReader: exiting: %v", conn.RemoteAddr())
}
func protoLabel(isTLS bool) string {
if isTLS {
return "TLS"
}
return "TCP"
}
func serverWriter(conn net.Conn, opt options, c, connections int, isTLS bool, agg *aggregate) {
log.Printf("serverWriter: starting: %s %v", protoLabel(isTLS), conn.RemoteAddr())
connIndex := fmt.Sprintf("%d/%d", c, connections)
buf := randBuf(opt.TCPWriteSize)
workLoop(connIndex, "serverWriter", "snd/s", conn.Write, buf, opt.ReportInterval, opt.MaxSpeed, nil, agg)
log.Printf("serverWriter: exiting: %v", conn.RemoteAddr())
}
func serverWriterTo(conn *net.UDPConn, opt options, dst net.Addr, acc *account, c, connections int, agg *aggregate) {
log.Printf("serverWriterTo: starting: UDP %v", dst)
start := acc.prevTime
udpWriteTo := func(b []byte) (int, error) {
if time.Since(start) > opt.TotalDuration {
return -1, fmt.Errorf("udpWriteTo: total duration %s timer", opt.TotalDuration)
}
return conn.WriteTo(b, dst)
}
connIndex := fmt.Sprintf("%d/%d", c, connections)
buf := randBuf(opt.UDPWriteSize)
workLoop(connIndex, "serverWriterTo", "snd/s", udpWriteTo, buf, opt.ReportInterval, opt.MaxSpeed, nil, agg)
log.Printf("serverWriterTo: exiting: %v", dst)
}
|
// Copyright 2018 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package cat
// Object is implemented by all objects in the catalog.
type Object interface {
// ID is the unique, stable identifier for this object. See the comment for
// StableID for more detail.
ID() StableID
// DescriptorID is the descriptor ID for this object. This can differ from the
// ID of this object in some cases. This is only important for reporting the
// Postgres-compatible identifiers for objects in the various object catalogs.
// In the vast majority of cases, you should use ID() instead.
PostgresDescriptorID() StableID
// Equals returns true if this object is identical to the given Object.
//
// Two objects are identical if they have the same identifier and there were
// no changes to schema or table statistics between the times the two objects
// were resolved.
//
// Used for invalidating cached plans.
Equals(other Object) bool
}
|
package parser
import (
"github.com/google/go-cmp/cmp"
"github.com/jdormit/logr/timeseries"
"log"
"testing"
"time"
)
func parseTime(timeStr string) time.Time {
time, err := time.Parse("02/Jan/2006:15:04:05 -0700", timeStr)
if err != nil {
log.Fatal(err)
}
return time
}
func TestParseLogLine(t *testing.T) {
testCases := []struct {
inputLine string
expectedOutput timeseries.LogLine
expectedError error
}{
{
inputLine: `127.0.0.1 - james [09/May/2018:16:00:39 +0000] "GET /report HTTP/1.0" 200 123`,
expectedOutput: timeseries.LogLine{
"127.0.0.1",
"-",
"james",
parseTime("09/May/2018:16:00:39 +0000"),
"GET",
"/report",
200,
123,
},
},
{
inputLine: `Not a real log line`,
expectedError: ParseError,
},
{
inputLine: ``,
expectedError: ParseError,
},
{
inputLine: `127.0.0.1 - james [09/May/2018:16:00:39 +0000] "GET /report HTTP/1.0" 200 123 foo bar baz some more stuff [with brackets]`,
expectedOutput: timeseries.LogLine{
"127.0.0.1",
"-",
"james",
parseTime("09/May/2018:16:00:39 +0000"),
"GET",
"/report",
200,
123,
},
},
}
for caseIdx, testCase := range testCases {
logLine, err := ParseLogLine(testCase.inputLine)
if testCase.expectedError != nil {
if err != testCase.expectedError {
t.Errorf("Error on case %d.\nExpected: %#v\nActual: %#v",
caseIdx, testCase.expectedError, err)
}
continue
}
if !cmp.Equal(testCase.expectedOutput, logLine) {
t.Errorf("Error on case %d.\nExpected: %#v\nActual: %#v",
caseIdx, testCase.expectedOutput, logLine)
}
}
}
|
package Integer
import (
"testing"
)
func TestAdd(t *testing.T) {
var a Integer = 1
var b Integer = 2
var c *Integer = &a
c.Add(b)
if *c != 3 {
t.Error("Integer Less() failed.Got ", *c, "Expected 3")
}
}
func TestLess(t *testing.T) {
var a Integer = 1
var b Integer = 2
if a.Less(b) == false {
t.Error("Integer Less() failed.Got false ,Expected true")
}
}
|
package main
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"path"
)
const URL = "http://img.omdbapi.com/"
type Movie struct {
Title string
Year string
Poster string
}
func generateFilename(movie *Movie) string {
ext := path.Ext(movie.Poster)
return fmt.Sprintf("%s_%s%s", movie.Title, movie.Year, ext)
}
func getMovie(query string) (*Movie, error) {
q := url.QueryEscape(query)
resp, err := http.Get(URL + "?t=" + q)
defer resp.Body.Close()
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("search query failed: %s", resp.Status)
}
var movie Movie
if err := json.NewDecoder(resp.Body).Decode(&movie); err != nil {
return nil, err
}
return &movie, nil
}
func savePoster(movie *Movie) {
resp, err := http.Get(movie.Poster)
defer resp.Body.Close()
if err != nil {
fmt.Fprintf(os.Stderr, "Cant get poster: %v\n", err)
return
}
if resp.StatusCode != http.StatusOK {
fmt.Fprintf(os.Stderr, "getting poster failed: %s", resp.Status)
return
}
filename := generateFilename(movie)
file, err := os.Create(filename)
if err != nil {
fmt.Fprintf(os.Stderr, "Cant create file: %s", err)
return
}
defer file.Close()
if _, err = io.Copy(file, resp.Body); err != nil {
fmt.Fprintf(os.Stderr, "Cant save to file: %s", err)
return
}
}
func main() {
movieTitle := os.Args[1]
movie, err := getMovie(movieTitle)
if err != nil {
fmt.Fprintf(os.Stderr, "Error: v\n", err)
os.Exit(1)
}
savePoster(movie)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.