text stringlengths 11 4.05M |
|---|
package controllers
import (
"github.com/w2hhda/candy/models"
"github.com/astaxie/beego"
"encoding/json"
)
type CandyController struct {
BaseController
}
func (c *CandyController) URLMapping() {
c.Mapping("ListAllCandyCountAndGame", c.ListAllCandyCountAndGame)
c.Mapping("ListCandyPage", c.ListCandyPage)
}
// @router /api/candy/list [*]
func (c *CandyController) ListCandyPage() {
var request RequestData
err := json.Unmarshal(c.Ctx.Input.RequestBody, &request)
if err != nil {
c.RetError(errParse)
return
}
beego.Info("request=", request)
act := models.Candy{}
page, err := act.ListCandyPage(request.PageNumber)
if err != nil {
beego.Warn(err)
c.RetError(errDB)
} else {
c.RetSuccess(page)
}
}
// 钱包app 应用tab 显示的内容
// @router /api/candy/index [*]
func (c *CandyController) ListAllCandyCountAndGame() {
indexInfo, err := models.ListIndex()
if err != nil {
c.RetError(errDB)
} else {
beego.Info(indexInfo)
c.RetSuccess(indexInfo)
}
}
|
package life
import (
"math/rand"
)
func RandomValue(x, y int) int {
return rand.Intn(2)
}
|
package sandbox
// @class Square
// This is Square, it operates on int32 and can compute areas
type Square struct {
// Internal width value
width int32
// Internal height value
height int32
}
// Constructor which creates a square by taking two values in
// @param width The width value
// @param height The height value
func NewSquare(width int32, height int32) *Square {
return &Square{
width: width,
height: height,
}
}
// Width returns the width value
func (sq *Square) Width() int32 {
return sq.width
}
// Height returns the height value
func (sq *Square) Height() int32 {
return sq.height
}
// Area calculates and returns the area (width x height)
func (sq *Square) Area() int64 {
return int64(sq.width) * int64(sq.height)
}
|
package 一维数组
import "fmt"
// ------------------------------------------ 1. 滑动窗口(开始) ------------------------------------------
// INF 无穷大。
const INF = 10000000000
// minSubArrayLen 获取最短的子数组,满足数和大于 target。
func minSubArrayLen(target int, nums []int) int {
// 1. 异常返回。
if len(nums) == 0 {
panic("题目出错")
}
// 2. 初始化窗口。
left, right := 0, 0 // 窗口区间为 [left, right]。
windowSum := 0
// 3. 滑动。
minLength := INF
for right < len(nums) {
windowSum += nums[right]
for left <= right && windowSum >= target {
minLength = min(minLength, right-left+1)
windowSum -= nums[left]
left++
}
right++
}
// 4. 判断是否存在满足条件的窗口。
if minLength == INF {
return 0
}
// 5. 返回。
return minLength
}
func min(a, b int) int {
if a > b {
return b
}
return a
}
// ------------------------------------------ 1. 滑动窗口(结束) ------------------------------------------
// ------------------------------------------ 2. 前缀和 + 二分(开始) ------------------------------------------
// minSubArrayLen 获取最短的子数组,满足数和大于 target。
func minSubArrayLen(target int, nums []int) int {
// 1. 获取前缀和。
prefixSumArray := getPrefixSumArray(nums)
// 2. 二分。
minLength := INF
for i := 0; i < len(nums); i++ {
// 2.1 查找满足条件的子数组边界。 (返回的边界相对于前缀和数组,前缀和数组索引 - 1 = 原数组索引)
pos := i + 1
left := pos
right := getFirstGreaterOrEqual(
prefixSumArray, target+prefixSumArray[left-1], left, len(prefixSumArray)-1)
// 2.2 右边界越界说明不存在满足条件的子数组。
if right <= len(prefixSumArray)-1 {
minLength = min(minLength, right-left+1)
}
}
// 3. 判断是否存在满足条件的长度。
if minLength == INF {
return 0
}
// 4. 返回。
return minLength
}
// getPrefixSumArray 获取前缀和数组。
func getPrefixSumArray(nums []int) []int {
prefixSum := make([]int, len(nums)+1)
for index := range nums {
pos := index + 1
prefixSum[pos] = prefixSum[pos-1] + nums[index]
}
return prefixSum
}
// getFirstGreaterOrEqual 获取第一个大于或等于 target 的数的索引。
func getFirstGreaterOrEqual(nums []int, target int, left, right int) int {
for left <= right {
mid := (left + right) / 2
if nums[mid] == target {
right = mid - 1
} else if nums[mid] < target {
left = mid + 1
} else {
right = mid - 1
}
}
return left
}
// ------------------------------------------ 2. 前缀和 + 二分(开始) ------------------------------------------
|
package use
import (
"github.com/devspace-cloud/devspace/cmd/flags"
"github.com/devspace-cloud/devspace/pkg/devspace/config/loader"
"github.com/devspace-cloud/devspace/pkg/util/factory"
"github.com/devspace-cloud/devspace/pkg/util/survey"
"github.com/mgutz/ansi"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
type ContextCmd struct {
*flags.GlobalFlags
}
func newContextCmd(f factory.Factory, globalFlags *flags.GlobalFlags) *cobra.Command {
cmd := &ContextCmd{GlobalFlags: globalFlags}
useContext := &cobra.Command{
Use: "context",
Short: "Tells DevSpace which kube context to use",
Long: `
#######################################################
############### devspace use context ##################
#######################################################
Switch the current kube context
Example:
devspace use context my-context
#######################################################
`,
Args: cobra.MaximumNArgs(1),
RunE: func(cobraCmd *cobra.Command, args []string) error {
return cmd.RunUseContext(f, cobraCmd, args)
},
}
return useContext
}
// RunUseContext executes the functionality "devspace use namespace"
func (cmd *ContextCmd) RunUseContext(f factory.Factory, cobraCmd *cobra.Command, args []string) error {
// Load kube-config
log := f.GetLog()
kubeLoader := f.NewKubeConfigLoader()
kubeConfig, err := kubeLoader.LoadRawConfig()
if err != nil {
return errors.Wrap(err, "load kube config")
}
var context string
if len(args) > 0 {
// First arg is context name
context = args[0]
} else {
contexts := []string{}
for ctx := range kubeConfig.Contexts {
contexts = append(contexts, ctx)
}
context, err = log.Question(&survey.QuestionOptions{
Question: "Which context do you want to use?",
DefaultValue: kubeConfig.CurrentContext,
Options: contexts,
Sort: true,
})
if err != nil {
return err
}
}
// Save old context
oldContext := kubeConfig.CurrentContext
// Set current kube-context
kubeConfig.CurrentContext = context
if oldContext != context {
// Save updated kube-config
kubeLoader.SaveConfig(kubeConfig)
log.Infof("Your kube-context has been updated to '%s'", ansi.Color(kubeConfig.CurrentContext, "white+b"))
log.Infof("\r To revert this operation, run: %s\n", ansi.Color("devspace use context "+oldContext, "white+b"))
}
// clear project kube context
err = ClearProjectKubeContext(f.NewConfigLoader(cmd.ToConfigOptions(), f.GetLog()))
if err != nil {
return errors.Wrap(err, "clear generated kube context")
}
log.Donef("Successfully set kube-context to '%s'", ansi.Color(context, "white+b"))
return nil
}
func ClearProjectKubeContext(configLoader loader.ConfigLoader) error {
configExists, err := configLoader.SetDevSpaceRoot()
if err != nil {
return err
} else if !configExists {
return nil
}
// load config if it exists
generatedConfig, err := configLoader.Generated()
if err != nil {
return err
}
// update last context
generatedConfig.GetActive().LastContext = nil
// save it
return configLoader.SaveGenerated()
}
|
package main
import (
"fmt"
"os"
"strconv"
"github.com/fjukstad/scratch"
)
func main() {
id := "86536890"
p, err := scratch.GetProject(id)
fmt.Println(p, err)
tags := []string{"kodeklubbentromso", "tromso"}
projects := []*scratch.Project{}
for _, tag := range tags {
ps, err := scratch.GetProjects(tag)
if err != nil {
fmt.Println(err)
return
}
projects = append(projects, ps...)
}
filename := "ids"
f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
panic(err)
}
defer f.Close()
for _, p := range projects {
fmt.Println(p)
_, err = f.WriteString(strconv.Itoa(p.ID) + "\n")
if err != nil {
fmt.Println(err)
return
}
}
fmt.Println(projects, err)
}
|
package proto
import (
"github.com/qianguozheng/goadmin/model"
)
type ConfigRead struct {
Cmd string `json:"cmd"`
SeqId string `json:"seqId"`
Code string `json:"code"`
Data ConfigReadCore `json:"data"`
}
type ConfigReadCore struct {
Mode int `json:"mode"`
CC CloudConfig `json:"cc"`
Wans []model.Wan `json:"wans"`
Lan Lan `json:"lan"`
Rfs []Rfs `json:"rfs"`
Dhcp Dhcp `json:"dhcp"`
Ssid []model.Ssid `json:"ssids"`
Qos Qos `json:"qos"`
}
type CloudConfig struct {
Host string `json:"host"`
Port int `json:"port"`
Token string `json:"token"`
}
|
package main
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
"time"
"github.com/mmcdole/gofeed"
)
const pushoverURL = "https://api.pushover.net/1/messages.json"
const envToken = "PUSHOVER_RSS_TOKEN"
const envUser = "PUSHOVER_USER"
const envData = "FEED_DATA"
const defaultData = "feeds.json"
var (
pushoverToken = os.Getenv(envToken)
pushoverUser = os.Getenv(envUser)
dataFile = os.Getenv(envData)
)
type feedConfig struct {
Name string `json:"name"`
URL string `json:"url"`
LastSeen time.Time `json:"lastSeen"`
}
func main() {
flag.Parse()
if dataFile == "" {
dataFile = defaultData
}
if _, err := os.Stat(dataFile); err != nil {
fmt.Fprintf(os.Stderr, "Could not open configuration file: %s", dataFile)
os.Exit(1)
}
data, err := ioutil.ReadFile(dataFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not open configuration file: %s", err)
os.Exit(1)
}
configs := []feedConfig{}
if err = json.Unmarshal(data, &configs); err != nil {
fmt.Fprintf(os.Stderr, "Could not read data file: %s", err)
os.Exit(1)
}
for i, feed := range configs {
fp := gofeed.NewParser()
f, _ := fp.ParseURL(feed.URL)
for _, item := range f.Items {
if item.PublishedParsed.After(feed.LastSeen) {
err = push(fmt.Sprintf("New post to %s: %s", feed.Name, item.Title), item.Link, item.Title)
if err != nil {
fmt.Fprintf(os.Stderr, "Error sending pushover: %s", err)
continue
}
configs[i].LastSeen = *item.PublishedParsed
}
}
}
data, _ = json.Marshal(configs)
if err = ioutil.WriteFile(dataFile, data, 0666); err != nil {
fmt.Fprintf(os.Stderr, "Error saving data file: %s", err)
os.Exit(1)
}
}
func push(msg, linkURL, linkTitle string) error {
values := url.Values{}
values.Set("token", pushoverToken)
values.Set("user", pushoverUser)
values.Set("message", msg)
if linkURL != "" {
values.Set("url", linkURL)
values.Set("url_title", linkTitle)
}
resp, err := http.PostForm(pushoverURL, values)
if err != nil {
return err
}
if resp.StatusCode > 399 {
return fmt.Errorf("pushever returned status %d", resp.StatusCode)
}
return nil
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//451. Sort Characters By Frequency
//Given a string, sort it in decreasing order based on the frequency of characters.
//Example 1:
//Input:
//"tree"
//Output:
//"eert"
//Explanation:
//'e' appears twice while 'r' and 't' both appear once.
//So 'e' must appear before both 'r' and 't'. Therefore "eetr" is also a valid answer.
//Example 2:
//Input:
//"cccaaa"
//Output:
//"cccaaa"
//Explanation:
//Both 'c' and 'a' appear three times, so "aaaccc" is also a valid answer.
//Note that "cacaca" is incorrect, as the same characters must be together.
//Example 3:
//Input:
//"Aabb"
//Output:
//"bbAa"
//Explanation:
//"bbaA" is also a valid answer, but "Aabb" is incorrect.
//Note that 'A' and 'a' are treated as two different characters.
//func frequencySort(s string) string {
//}
// Time Is Money |
package field
import (
"encoding/binary"
"fmt"
"io"
)
// Header is a data structure that contains details about the following field in
// the ADAT chunk, in the Serato session file.
type Header struct {
Identifier uint32
Length uint32
}
func (h *Header) String() string {
return fmt.Sprintf("field (%d) length (%d)", h.Identifier, h.Length)
}
// NewHeader returns an initialised Header that has been read using the given
// reader.
func NewHeader(r io.Reader) (*Header, error) {
var hdr Header
if err := binary.Read(r, binary.BigEndian, &hdr); err != nil {
return nil, err
}
return &hdr, nil
}
|
package mergetwosortedlists
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestMergeTwoLists(t *testing.T) {
must := assert.New(t)
cases := []struct {
NumNode1 *ListNode
NumNode2 *ListNode
Expect *ListNode
}{
{
NumNode1: &ListNode{Val: 1, Next: &ListNode{Val: 2, Next: &ListNode{Val: 4}}},
NumNode2: &ListNode{Val: 1, Next: &ListNode{Val: 3, Next: &ListNode{Val: 4}}},
Expect: &ListNode{Val: 1, Next: &ListNode{Val: 1, Next: &ListNode{Val: 2, Next: &ListNode{Val: 3, Next: &ListNode{Val: 4, Next: &ListNode{Val: 4}}}}}},
},
}
for _, v := range cases {
actual := mergeTwoLists(v.NumNode1, v.NumNode2)
must.Equal(v.Expect, actual)
}
}
|
package blockchain
import (
"github.com/stretchr/testify/assert"
"golang-blockchain/testutils"
"testing"
)
func TestGenesisBlockHasHeightOfZero(t *testing.T) {
cbtx := CoinbaseTx(testutils.RandomAddress(), genesisData)
block := Genesis(cbtx)
assert.Equal(t, block.Height, 0)
}
func TestGenesisBlockContainsNoPreviousHash(t *testing.T) {
cbtx := CoinbaseTx(testutils.RandomAddress(), genesisData)
block := Genesis(cbtx)
assert.Equal(t, len(block.PrevHash), 0)
}
|
package main
import (
"github.com/FourLineCode/financer/internal/config"
"github.com/FourLineCode/financer/pkg/server"
)
func main() {
config := config.GetConfig()
server := server.New(config)
server.Run(config.Port)
}
|
package psql
import "github.com/Mrcampbell/pgo2/protorepo/pokemon"
func Filter(bm []*pokemon.BreedMove, f func(*pokemon.BreedMove) bool) []*pokemon.BreedMove {
bmf := make([]*pokemon.BreedMove, 0)
for _, m := range bm {
if f(m) {
bmf = append(bmf, m)
}
}
return bmf
}
|
package main
import (
"fmt"
)
func main() {
chanDemo2()
}
type worker1 struct {
in chan int
done chan bool
}
func chanDemo2() {
var workers [10]worker1
for i := 0; i < 10; i++ {
workers[i] = createWorker1(i)
}
for i, worker := range workers {
//workers[i].in <- i
worker.in <- 'A' + i
}
for i, worker := range workers {
//workers[i].in <- i
worker.in <- 'A' + i
}
for _, worker := range workers {
<-worker.done
<-worker.done
}
}
func createWorker1(id int) worker1 {
w := worker1{
in: make(chan int),
done: make(chan bool),
}
go doWork(id, w.in, w.done)
return w
}
func doWork(id int, c chan int, done chan bool) {
for n := range c {
fmt.Printf("worker %d, received %d\n", id, n)
done <- true
}
}
|
package main
import "fmt"
/*
The obligatory Hello World example.
run this with
> go run 01_helloworld.go
or, compile it and run the binary:
>go build 01_helloworld.go
> 01_helloword
*/
func main() {
fmt.Println("Hello Gareth, you little 壹 壱")
}
/*
the package main is special. It defines a stabdalone executable program, rather than a library
the function main() is also special - it;s where the execution of the program begins
*/
|
package main
import "fmt"
func main() {
for count := 10; count > 0; count-- {
fmt.Println("T minus", count)
}
fmt.Println("Liftoff!")
for count := 1; count <= 10; count++ {
fmt.Println("T plus", count)
}
}
|
package main
import (
"fmt"
"github.com/astaxie/beego/config"
)
func main(){
conf,err:=config.NewConfig("ini","./Web/logrecord/config/logagent.conf")
if err!=nil{
fmt.Println("new config err",err)
return
}
port,err:=conf.Int("server::listen_port")
if err!=nil{
fmt.Println("conf int err",err)
return
}
fmt.Println("server port",port)
level:=conf.String("logs::log_level")
fmt.Println("logs level:",level)
}
|
// SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2020 Intel Corporation
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
type VF struct {
PCIAddress string `json:"pciAddress"`
Driver string `json:"driver"`
DeviceID string `json:"deviceID"`
}
type SriovAccelerator struct {
VendorID string `json:"vendorID"`
DeviceID string `json:"deviceID"`
PCIAddress string `json:"pciAddress"`
Driver string `json:"driver"`
MaxVFs int `json:"maxVirtualFunctions"`
VFs []VF `json:"virtualFunctions"`
}
type NodeInventory struct {
SriovAccelerators []SriovAccelerator `json:"sriovAccelerators,omitempty"`
}
// SriovFecNodeConfigSpec defines the desired state of SriovFecNodeConfig
type SriovFecNodeConfigSpec struct {
// +kubebuilder:validation:Required
// List of PhysicalFunctions configs
PhysicalFunctions []PhysicalFunctionConfig `json:"physicalFunctions"`
// +kubebuilder:validation:Optional
DrainSkip bool `json:"drainSkip,omitempty"`
}
// SriovFecNodeConfigStatus defines the observed state of SriovFecNodeConfig
type SriovFecNodeConfigStatus struct {
Conditions []metav1.Condition `json:"conditions,omitempty"`
Inventory NodeInventory `json:"inventory,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Configured",type=string,JSONPath=`.status.conditions[?(@.type=="Configured")].status`
// SriovFecNodeConfig is the Schema for the sriovfecnodeconfigs API
type SriovFecNodeConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SriovFecNodeConfigSpec `json:"spec,omitempty"`
Status SriovFecNodeConfigStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// SriovFecNodeConfigList contains a list of SriovFecNodeConfig
type SriovFecNodeConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SriovFecNodeConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&SriovFecNodeConfig{}, &SriovFecNodeConfigList{})
}
|
package common
type InputMsg struct {
EnterpriseCode string `json:"enterpriseCode"`
}
|
// Fibonacci example to measure performance between different calls
//go:generate dllcall -fast -keep fibon_if.go fibonlib/fibon_if.h
package main
import (
"errors"
"flag"
"fmt"
"log"
"os"
"strconv"
"strings"
"time"
)
var count int
func main() {
flag.IntVar(&count, "count", 1, "Number of iterations")
flag.Parse()
if flag.NArg() < 2 {
usage()
}
value, err := strconv.ParseInt(flag.Arg(1), 10, 64)
if err != nil {
log.Fatal("Invalid value: ", err)
}
var result *int64
start := time.Now()
dllName := "fibonlib.dll"
err = load_fibon_if(dllName)
if err != nil {
log.Fatal("Failed to load ", dllName, ": ", err)
}
var fn func(int64) (*int64, error)
switch strings.ToLower(flag.Arg(0)) {
case "go":
fn = goFibon
case "syscall":
fn = stdFibon
case "fastcall":
fn = fastFibon
default:
usage()
}
for idx := 0; idx < count; idx++ {
result, err = fn(value)
if err != nil {
log.Fatal("Calculation failure: ", err)
}
}
fmt.Print("Result: ", *result)
if count > 1 {
fmt.Println(" durations ", time.Now().Sub(start).Seconds()*1000, " ms")
} else {
fmt.Println()
}
}
func goFibon(n int64) (*int64, error) {
if n < 1 {
return nil, errors.New("Value must be at least 1")
}
r := fibon(n)
return &r, nil
}
func stdFibon(n int64) (*int64, error) {
var res int64
cl := &calcFibonacci{n: n, result: &res}
err := cl.calc()
if err != nil {
return nil, err
}
return cl.result, nil
}
func fastFibon(n int64) (*int64, error) {
cl := &calcFibonacci{n: n, result: new(int64)}
err := cl.fastCalc()
if err != nil {
return nil, err
}
return cl.result, nil
}
func fibon(n int64) int64 {
if n > 2 {
return fibon(n-1) + fibon(n-2)
}
return 1
}
func usage() {
fmt.Println("fibon -count n method value")
fmt.Println(" value - Fibonacci to calculate")
fmt.Println(" count - Number of iterations (is set, total runtime will be shown)")
fmt.Println(" methods: go - go fibonacci")
fmt.Println(" syscall - c fibonacci using standard call")
fmt.Println(" fastcall - c fibonacci using fast call")
os.Exit(1)
}
|
package main
import "fmt"
func main() {
count := 1
for ; count < 100; count++ {
count++
}
fmt.Println("count is", count)
}
|
package main
import (
"fmt"
"github.com/gomodule/redigo/redis"
"github.com/qingwenjie/goredis"
)
func main() {
options := &goredis.Options{
Protocol: "tcp",
Addr: "127.0.0.1:6379",
Password: "123456",
Database: 1,
}
conn := goredis.Connect(options)
reply, err := conn.Do("SELECT", 1)
fmt.Println(reply, err)
reply1, err := redis.Int(conn.Do("HSET", "myhash", "field1", "Hello"))
fmt.Println(reply1, err)
reply2, err := redis.String(conn.Do("HGET", "myhash", "field1"))
fmt.Println(reply2, err)
}
|
package http_handlers
import (
"fmt"
"github.com/go-martini/martini"
"net/http"
)
func GetCaches() func(
martini.Context,
martini.Params,
http.ResponseWriter,
*http.Request,
) {
return HttpHandler(
[]string{
AUTH_REQUIRED,
},
func(h *Http) {
h.SetResponse(
h.session.Caches,
)
},
)
}
func GetCache() func(
martini.Context,
martini.Params,
http.ResponseWriter,
*http.Request,
) {
return HttpHandler(
[]string{
AUTH_REQUIRED,
CACHE_REQUIRED,
},
func(h *Http) {
if c := h.session.GetCache(
h.vars["cache_id"],
); c != nil {
h.SetResponse(
c,
)
} else {
h.AddError(
fmt.Errorf(
`Cache not found`,
),
404,
)
}
},
)
}
func RegisterCache() func(
martini.Context,
martini.Params,
http.ResponseWriter,
*http.Request,
) {
return HttpHandler(
[]string{
AUTH_REQUIRED,
},
func(h *Http) {
h.SetResponseCreatedObject(
h.session.CreateCache(),
)
},
)
}
|
// DRUNKWATER TEMPLATE(add description and prototypes)
// Question Title and Description on leetcode.com
// Function Declaration and Function Prototypes on leetcode.com
//114. Flatten Binary Tree to Linked List
//Given a binary tree, flatten it to a linked list in-place.
//For example, given the following tree:
// 1
// / \
// 2 5
// / \ \
//3 4 6
//The flattened tree should look like:
//1
// \
// 2
// \
// 3
// \
// 4
// \
// 5
// \
// 6
///**
// * Definition for a binary tree node.
// * type TreeNode struct {
// * Val int
// * Left *TreeNode
// * Right *TreeNode
// * }
// */
//func flatten(root *TreeNode) {
//}
// Time Is Money |
package memory
import (
"github.com/DoraALin/go-config/source"
"context"
)
type dataKey struct{}
// WithData allows the source data to be set
func WithData(d []byte) source.Option {
return func(o *source.Options) {
if o.Context == nil {
o.Context = context.Background()
}
o.Context = context.WithValue(o.Context, dataKey{}, d)
}
}
|
package controllers
import (
"encoding/json"
"fmt"
"github.com/astaxie/beego"
"io/ioutil"
"onework/models"
)
type MainController struct {
beego.Controller
}
func (c *MainController) Get() {
//1.获取请求数据
name:=c.Ctx.Input.Query("name")
//或者用c.GetString()
age:=c.Ctx.Input.Query("age")
//2.使用固定数据进行数据校验
if name!="l" && age!="19"{
//代表错误处理
c.Ctx.ResponseWriter.Write([]byte("对不起,数据校验错误"))
return
}else {
c.Ctx.ResponseWriter.Write([]byte("恭喜,数据校验正确"))
}
c.Data["Website"] = "Tom-ly.me"
c.Data["Email"] = "3419572132@gmail.com"
c.TplName = "index.tpl"
}
//func (c*MainController)Post() {
//1.解析post请求的参数,from表单格式
// name :=c.Ctx.Request.FormValue("name")
// age := c.Ctx.Request.FormValue("age")
// fmt.Println(name,age)
//2.进行数据校验
// if name!="adam"&&age!="19"{
// c.Ctx.ResponseWriter.Write([]byte("数据校验失败"))
// return
// }else {
// c.Ctx.WriteString("数据校验成功")
// }
//}
//该方法用于处理post类型请求
func (c*MainController)Post(){
//1.解析post请求的参数jsong格式
var person models.Person
dataBytes,err:=ioutil.ReadAll(c.Ctx.Request.Body)
if err!=nil{
c.Ctx.WriteString("数据接收错误")
return
}
err=json.Unmarshal(dataBytes,&person)
if err!=nil{
c.Ctx.WriteString("数据解析失败")
return
}
fmt.Println("姓名:",person.Name)
fmt.Println("年龄:",person.Age)
fmt.Println("性别:",person.Sex)
c.Ctx.WriteString("数据解析成功")
}
func (c *MainController)Delete(){
} |
package main
import (
"flag"
"fmt"
"net/http"
"os"
"runtime"
"file-upload-srv/config"
"file-upload-srv/router"
"github.com/facebookgo/grace/gracehttp"
"github.com/gin-gonic/gin"
"github.com/go-xweb/log"
"github.com/spf13/viper"
)
var (
c string
)
func init() {
flag.StringVar(&c, "c", "dev", "配置文件")
runtime.GOMAXPROCS(runtime.NumCPU())
}
func main() {
flag.Parse()
// init config
if err := config.Init(c); err != nil {
panic(err)
}
// init db
//models.DB.Init()
//defer models.DB.Close()
// init redis
//cache.RedisDB.Init()
// set gin mode
gin.SetMode(viper.GetString("runmode"))
g := gin.New()
// Routes
router.Load(g)
fmt.Printf("pid: %d addr: %s\n", os.Getpid(), viper.GetString("addr"))
log.Infof("Start to listening the incoming requests on http address: %s", viper.GetString("addr"))
gracehttp.Serve(&http.Server{Addr: viper.GetString("addr"), Handler: g})
}
|
package main
import (
"fmt"
"log"
"os"
)
func main() {
stat()
fmt.Println("=======================")
lstat()
}
func stat() {
//fInfo, err := os.Stat("D:\\workspace\\备忘.txt") //原文件
fInfo, err := os.Stat("C:\\Users\\kc\\Desktop\\备忘.lnk") //快捷方式
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Name: %s\n", fInfo.Name())
fmt.Printf("Size: %d\n", fInfo.Size())
fmt.Printf("Mode: %s\n", fInfo.Mode())
fmt.Printf("ModTime: %s\n", fInfo.ModTime())
fmt.Printf("IsDir: %t\n", fInfo.IsDir())
fmt.Printf("Sys: %v\n", fInfo.Sys())
}
func lstat() {
//fInfo, err := os.Lstat("D:\\workspace\\备忘.txt") //原文件
fInfo, err := os.Lstat("C:\\Users\\kc\\Desktop\\备忘.lnk") //快捷方式
if err != nil {
log.Fatalln(err)
}
fmt.Printf("Name: %s\n", fInfo.Name())
fmt.Printf("Size: %d\n", fInfo.Size())
fmt.Printf("Mode: %s\n", fInfo.Mode())
fmt.Printf("ModTime: %s\n", fInfo.ModTime())
fmt.Printf("IsDir: %t\n", fInfo.IsDir())
fmt.Printf("Sys: %v\n", fInfo.Sys())
}
|
package events
import (
"context"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestNewReceiver(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
user, err := NewReceiver(ctx, "mock", "cloud")
require.NoError(t, err)
user.input <- alertMessage{
fingerprint: "azxcqewrpad",
title: "test task",
message: "this is a test message, value is 23%",
}
time.Sleep(time.Second)
cancel()
_, ok := <-user.input
require.False(t, ok, "input was closed, should get false")
}
// TestUserActivate 测试在有频繁 Event 产生的情况下, 是否能保证正常的发送逻辑
func TestReceiverActivate(t *testing.T) {
sendIntervalShort = time.Second
sendIntervalLong = time.Second * 3 // 无论多频繁的事件,都要在 3 秒后有输出
*mockSender = MockSender{}
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
rcv, err := NewReceiver(ctx, "mock", "abc")
if err != nil {
panic(err)
}
eac := alertMessage{
fingerprint: "azxcqewrpad",
title: "test task",
message: "this is a test message, value is 23%",
}
go func() {
for i := 0; i < 18; i++ {
rcv.input <- eac // print
time.Sleep(time.Second / 2)
}
}()
fmt.Println(">>> alerting messages, waiting for 10 seconds...")
time.Sleep(time.Second * 10)
assert.Equal(t, 3, len(mockSender.data))
}
|
package metadata
import (
"strconv"
"incognito-chain/common"
)
// UnStakingMetadata : unstaking metadata
type UnStakingMetadata struct {
MetadataBaseWithSignature
CommitteePublicKey string
}
func (meta *UnStakingMetadata) Hash() *common.Hash {
record := strconv.Itoa(meta.Type)
data := []byte(record)
hash := common.HashH(data)
return &hash
}
func (meta *UnStakingMetadata) HashWithoutSig() *common.Hash {
return meta.MetadataBaseWithSignature.Hash()
}
func (unStakingMetadata UnStakingMetadata) GetType() int {
return unStakingMetadata.Type
}
|
/*
This is "programming" at its most fundamental.
Build a diagram of (two-wire) NAND logic gates that will take the input wires A1, A2, A4, A8, B1, B2, B4, B8,
representing two binary numbers A to B from 0 to 15, and return values on the output wires C1, C2, C4, and C8 representing C, which is the sum of A and B modulo 16.
Your score is determined by the number of NAND gates (1 point per gate). To simplify things, you may use AND, OR, NOT, and XOR gates in your diagram, with the following corresponding scores:
NOT: 1
AND: 2
OR: 3
XOR: 4
Each of these scores corresponds to the number of NAND gates that it takes to construct the corresponding gate.
Lowest score wins.
*/
package main
func main() {
for a := 0; a < 16; a++ {
for b := 0; b < 16; b++ {
x := add4(a, b)
y := (a + b) % 16
assert(x == y)
}
}
}
func assert(x bool) {
if !x {
panic("assertion failed")
}
}
/*
https://en.wikipedia.org/wiki/NAND_logic
https://www.electronics-tutorials.ws/combination/comb_7.html
*/
func nand(a, b int) int {
return ^(a & b) & 1
}
func and(a, b int) int {
return nand(nand(a, b), nand(a, b))
}
func or(a, b int) int {
return nand(nand(a, a), nand(b, b))
}
func xor(a, b int) int {
return nand(nand(a, nand(a, b)), nand(b, nand(a, b)))
}
func hadd(a, b int) (S, C int) {
S = xor(a, b)
C = and(a, b)
return
}
func fadd(a, b, c int) (S, C int) {
s1, c1 := hadd(a, b)
s2, c2 := hadd(s1, c)
S = s2
C = or(c1, c2)
return
}
func add4(a, b int) int {
a0 := (a >> 0) & 1
a1 := (a >> 1) & 1
a2 := (a >> 2) & 1
a3 := (a >> 3) & 1
b0 := (b >> 0) & 1
b1 := (b >> 1) & 1
b2 := (b >> 2) & 1
b3 := (b >> 3) & 1
s0, c0 := fadd(a0, b0, 0)
s1, c1 := fadd(a1, b1, c0)
s2, c2 := fadd(a2, b2, c1)
s3, _ := fadd(a3, b3, c2)
return s0 | s1<<1 | s2<<2 | s3<<3
}
|
package main
import (
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"github.com/serverless/better/lib/db"
"github.com/serverless/better/lib/model"
)
type QueryMapping struct {
User string `json:":u"`
}
type ListInput struct {
User string `json:"user"`
}
type deps struct {
ddb dynamodbiface.DynamoDBAPI
}
/* HandleRequest is a function for lambda function to take an input of a goal in a json form
and add it to dynamodb
*/
func (d *deps) HandleRequest(ctx context.Context, listInput ListInput) ([]model.Goal, error) {
// validate input
if listInput.User == "" {
return []model.Goal{}, model.ResponseError{
Code: 400,
Message: "You must include a user in your request",
}
}
// get local dynamodb session
if d.ddb == nil {
d.ddb = db.GetDbSession()
}
// create query mapping
query, err := dynamodbattribute.MarshalMap(QueryMapping{
User: listInput.User,
})
if err != nil {
fmt.Println(err)
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Problem creating query attribute map",
}
}
input := &dynamodb.QueryInput{
TableName: aws.String("Goals"),
ExpressionAttributeNames: aws.StringMap(map[string]string{"#U": "user"}),
KeyConditionExpression: aws.String("#U = :u"),
ExpressionAttributeValues: query,
}
result, err := d.ddb.Query(input)
// handle possible errors
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case dynamodb.ErrCodeProvisionedThroughputExceededException:
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Dyanamodb provisioned throughput limit reached",
}
case dynamodb.ErrCodeResourceNotFoundException:
return []model.Goal{}, model.ResponseError{
Code: 404,
Message: "User not found",
}
case dynamodb.ErrCodeRequestLimitExceeded:
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Reached dynamodb request limit",
}
default:
fmt.Println(aerr.Error())
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Problem getting all goals for the user",
}
}
} else {
fmt.Println(err.Error())
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Problem getting all goals for the user",
}
}
}
// return success
goals := []model.Goal{}
dynamodbattribute.UnmarshalListOfMaps(result.Items, &goals)
if err != nil {
fmt.Println(err.Error())
return []model.Goal{}, model.ResponseError{
Code: 500,
Message: "Problem unmarshalling response from dynamodb",
}
}
return goals, nil
}
func main() {
d := deps{}
lambda.Start(d.HandleRequest)
}
|
package main
import "fmt"
func perbandingan(a, b int) string {
if a < b {
return "a lebih kecil dari b"
}
return "b lebih kecil dari a"
}
func main(){
fmt.Println(perbandingan(10,5))
}
|
package lambdacalculus
import (
"testing"
)
func TestIsEmptyWithEmpty(t *testing.T) {
res := IsEmpty(EmptyList)(true)(false)
if !res.(bool) {
t.Errorf("The empty list should be empty instead is %v", res)
}
}
func TestValueOfEmpty(t *testing.T) {
val := ListElementVal(EmptyList)
res := IsEmpty(val.(List))(true)(false)
if !res.(bool) {
t.Errorf("The value of empty list should be the empty list instead is %v", res)
}
}
func TestNextOfEmpty(t *testing.T) {
nextEl := NextListElement(EmptyList)
res := IsEmpty(nextEl)(true)(false)
if !res.(bool) {
t.Errorf("The next element of empty list should be empty list instead is %v", res)
}
}
func TestIsEmptyWithNotEmpty(t *testing.T) {
list := InsertOnTop(one)(EmptyList)
res := IsEmpty(list)(true)(false)
if res.(bool) {
t.Errorf("IsEmpty applied to a non empty list should be false instead is %v", res)
}
}
func TestValueWithNotEmpty(t *testing.T) {
list := InsertOnTop(one)(EmptyList)
list = InsertOnTop(two)(list)
val := ListElementVal(list).(ChurchNumber)
res := val(f)(x)
if res != 2 {
t.Errorf("The current value of the list should be 2 instead is %v", res)
}
}
func TestNextOfNotEmpty(t *testing.T) {
list := InsertOnTop(one)(EmptyList)
list = InsertOnTop(two)(list)
nextEl := NextListElement(list)
val := ListElementVal(nextEl).(ChurchNumber)
res := val(f)(x)
if res != 1 {
t.Errorf("The value of the next elemenet should be 1 instead is %v", res)
}
}
func TestLsatNextOfNotEmpty(t *testing.T) {
// build a list with 3 elements: 3, 2 and 1
list := InsertOnTop(three)(InsertOnTop(two)(InsertOnTop(one)(EmptyList)))
// get the next element starting from 3 for three times: the first time you get 2, the second time you get 1 and the third time
// you should get the empty list, which represents the end of the list
nextEl := NextListElement(NextListElement(NextListElement(list)))
res := IsEmpty(nextEl)(true)(false)
if !res.(bool) {
t.Errorf("IsEmpty applied to the end element of a list should be true instead is %v", res)
}
}
|
package main
import (
"testing"
)
func TestRandomBalancer(t *testing.T) {
b1 := NewBackend("192.168.1.1:80", 5)
b2 := NewBackend("192.168.1.2:80", 1)
b3 := NewBackend("192.168.1.3:80", 1)
// no backends
balancer := NewRdm()
_, found := balancer.Select()
if found {
t.Error("no backend should found!")
}
// one backend
balancer = NewRdm(b1)
_, found = balancer.Select()
if !found {
t.Error("one backend should found!")
}
balancer = NewRdm(b1, b2, b3)
_, found = balancer.Select()
if !found {
t.Error("one backend should found!")
}
}
func BenchmarkRdmSelect(b *testing.B) {
b1 := NewBackend("192.168.1.1:80", 5)
b2 := NewBackend("192.168.1.2:80", 1)
b3 := NewBackend("192.168.1.3:80", 1)
// no backends
balancer := NewRdm(b1, b2, b3)
for i := 0; i < b.N; i++ {
balancer.Select()
}
}
|
package aiven
import (
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
"github.com/stretchr/testify/assert"
"os"
"reflect"
"testing"
"time"
)
var (
testAccProviders map[string]terraform.ResourceProvider
testAccProvider *schema.Provider
)
func init() {
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"aiven": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProviderImpl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("AIVEN_TOKEN"); v == "" {
t.Log(v)
t.Fatal("AIVEN_TOKEN must be set for acceptance tests")
}
if v := os.Getenv("AIVEN_CARD_ID"); v == "" {
t.Fatal("AIVEN_CARD_ID must be set for acceptance tests")
}
}
func Test_validateDurationString(t *testing.T) {
type args struct {
v interface{}
k string
}
tests := []struct {
name string
args args
wantWs []string
wantErrors bool
}{
{
"basic",
args{
v: "2m",
k: "",
},
nil,
false,
},
{
"wrong-duration",
args{
v: "123qweert",
k: "",
},
nil,
true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotWs, gotErrors := validateDurationString(tt.args.v, tt.args.k)
if !reflect.DeepEqual(gotWs, tt.wantWs) {
t.Errorf("validateDurationString() gotWs = %v, want %v", gotWs, tt.wantWs)
}
if !(tt.wantErrors == (len(gotErrors) > 0)) {
t.Errorf("validateDurationString() gotErrors = %v", gotErrors)
}
})
}
}
func Test_generateClientTimeoutsSchema(t *testing.T) {
type args struct {
timeouts map[string]time.Duration
}
tests := []struct {
name string
args args
want *schema.Schema
}{
{
"basic",
args{map[string]time.Duration{"create": 1 * time.Minute}},
&schema.Schema{
Type: schema.TypeSet,
MaxItems: 1,
Description: "Custom Terraform Client timeouts",
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"create": {
Type: schema.TypeString,
Description: "create timeout",
Optional: true,
ValidateFunc: validateDurationString,
},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := generateClientTimeoutsSchema(tt.args.timeouts)
assert.Equal(t, tt.want.Type, got.Type)
assert.Equal(t, tt.want.MaxItems, got.MaxItems)
assert.Equal(t, tt.want.Description, got.Description)
assert.Equal(t, tt.want.Optional, got.Optional)
for name, s := range got.Elem.(*schema.Resource).Schema {
want := tt.want.Elem.(*schema.Resource).Schema[name]
assert.Equal(t, want.Type, s.Type)
assert.Equal(t, want.Description, s.Description)
assert.Equal(t, want.Optional, s.Optional)
}
})
}
}
func Test_getTimeoutHelper(t *testing.T) {
type args struct {
d *schema.ResourceData
name string
defaultDuration time.Duration
}
tests := []struct {
name string
args args
want time.Duration
wantErr bool
}{
{
name: "basic",
args: args{
d: resourceService().Data(&terraform.InstanceState{
ID: "",
Attributes: nil,
Ephemeral: terraform.EphemeralState{},
Meta: nil,
Tainted: false,
}),
name: "create",
defaultDuration: 1 * time.Minute,
},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := getTimeoutHelper(tt.args.d, tt.args.name)
if (err != nil) != tt.wantErr {
t.Errorf("getTimeoutHelper() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("getTimeoutHelper() got = %v, want %v", got, tt.want)
}
})
}
}
|
package main
import (
jwt "github.com/dgrijalva/jwt-go"
"go.mongodb.org/mongo-driver/bson/primitive"
)
type Assignment struct {
Name string `bson:"name,omitempty"`
Filename string `bson:"filename,omitempty"`
Deadline string `bson:"deadline,omitempty"`
}
type Batch struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Name string `bson:"name,omitempty"`
Timeslot string `bson:"timeslot,omitempty"`
Assignments []Assignment `bson:"assignments,omitempty"`
}
// Instructor represents the schema for the "Instructors" collection
type Instructor struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Username string `bson:"username,omitempty"`
Password string `bson:"password,omitempty"`
Name string `bson:"name,omitempty"`
Email string `bson:"email,omitempty"`
Qualification []string `bson:"qualifcations,omitempty"`
Experience []string `bson:"experience,omitempty"`
Fees float64 `bson:"fees,omitempty"`
Assignments []string `bson:"assignments,omitempty"`
Batches []primitive.ObjectID `bson:"batches,omitempty"`
}
// Student represents the schema for the "Students" collection
type Student struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Username string `bson:"username,omitempty"`
Password string `bson:"password,omitempty"`
Name string `bson:"name,omitempty"`
Email string `bson:"email,omitempty"`
Std string `bson:"std,omitempty"`
Balance float64 `bson:"balance,omitempty"`
Location string `bson:"location,omitempty"`
Batches []primitive.ObjectID `bson:"batches,omitempty"`
}
// StudentDetail represents schema for joining "Students" with "Batches" collection
type StudentDetail struct {
ID primitive.ObjectID `bson:"_id,omitempty"`
Name string `bson:"name,omitempty"`
Email string `bson:"email,omitempty"`
Std string `bson:"std,omitempty"`
Balance float64 `bson:"balance,omitempty"`
Location string `bson:"location,omitempty"`
Batches []primitive.ObjectID `bson:"batches,omitempty"`
BatchDetails []Batch `bson:"batch_details,omitempty"`
}
type IDRequest struct {
ID string
}
type AssignmentRequest struct {
Name string
Filename string
Deadline string
BatchID string
}
type DownloadRequest struct {
Filename string
}
type LoginRequest struct {
Username string
Password string
}
type InstructorProfileRequest struct {
Name string
Email string
Qualification []string
Experience []string
Fees float64
}
type StudentProfileRequest struct {
Name string
Email string
Location string
}
type Token struct {
ID string
Username string
Role string
*jwt.StandardClaims
}
type Exception struct {
Message string `json:"message"`
}
|
package decompress_run_length_encoded_list
func decompressRLElist(nums []int) []int {
var i, length = 0, len(nums)
var values []int
for i < length {
for j := nums[i]; j > 0; j-- {
values = append(values, nums[i+1])
}
i += 2
}
return values
}
|
package bench
import (
"database/sql"
"log"
_ "github.com/mattn/go-sqlite3"
)
const Text = "N769r32BAkaQj6uzZQA6IsFICROqZEA3OOXFhn8" //rnhFluvqPAl4L7VHr0yFk0O3DWSY7k //4d7QwVbA8Sim6ZijqeEPdMr71XXRVpAi6amGvfT3HayxcZK UflRnZw1xShTJwyn395RU92dpgKO9Nl3IJCdpJogUww06j18QWeZJ8NmiSXxhMldc28mXjbx0TsL6agsfT4c0Fny6fEDwZMv38Y7hOyJAJ6ftu7hgee"
const sales = "sales"
const bank = "bank"
const auto = "auto"
const card = "card"
const misc = "misc"
func WithpkClone(table1Rows, table2Rows int) (err error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return
}
defer db.Close()
Withpk(db, table1Rows, table2Rows)
//CloneTable1(db)
return
}
func WithnopkClone(table1Rows, table2Rows int) (err error) {
db, err := sql.Open("sqlite3", ":memory:")
if err != nil {
return
}
defer db.Close()
Withnopk(db, table1Rows, table2Rows)
CloneTable1(db)
return
}
func Withpk(db *sql.DB, table1Rows, table2Rows int) (err error) {
//totTime := time.Now()
//start := time.Now()
//Table1
_, err = db.Exec("create table table1(id int primary key, sample1 Text,sample2 integer,sample3 integer,sample4 integer,sample5 integer,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table1TX, err := db.Begin()
if err != nil {
return
}
table1Stmt, err := table1TX.Prepare("insert into table1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table1Stmt.Close()
for i := 0; i < table1Rows; i++ {
if i%7 == 0 {
if i%5 != 0 && i%3 != 0 && i%2 != 0 {
_, err = table1Stmt.Exec(i, sales, 7, 7, 7, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
} else {
_, err = table1Stmt.Exec(i, sales, 7, 0, 7, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
}
} else if i%5 == 0 {
if i%3 != 0 && i%2 != 0 {
_, err = table1Stmt.Exec(i, bank, 5, 5, 5, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
} else {
_, err = table1Stmt.Exec(i, bank, 5, 0, 5, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
}
} else if i%3 == 0 {
if i%2 != 0 {
_, err = table1Stmt.Exec(i, auto, 3, 3, 3, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
} else {
_, err = table1Stmt.Exec(i, auto, 3, 0, 3, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
}
} else if i%2 == 0 {
_, err = table1Stmt.Exec(i, card, 2, 2, 2, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
} else {
_, err = table1Stmt.Exec(i, misc, 1, 1, 1, 10, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
}
if err != nil {
return
}
}
table1TX.Commit()
//fmt.Println("Time to create table1 ", time.Since(//start))
//start = time.Now()
//Table2
_, err = db.Exec("create table table2(id int primary key, sample1 Text,sample2 integer,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table2TX, err := db.Begin()
if err != nil {
return
}
table2Stmt, err := table2TX.Prepare("insert into table2 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table2Stmt.Close()
for i := 0; i < table2Rows; i++ {
_, err = table2Stmt.Exec(i, Text, 100, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table2TX.Commit()
//fmt.Println("Time to create table2 ", time.Since(//start))
//start = time.Now()
//Join Table1 & Table2
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table2 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
//retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
//fmt.Println("Time to select from join ", time.Since(//start))
//fmt.Println("NumColumns: ", len(//retrievedColumns))
//fmt.Println("NumRows: ", resultRowNum)
//fmt.Println("Total Time in WithPk ", time.Since(//totTime))
return
}
func Withnopk(db *sql.DB, table1Rows, table2Rows int) (err error) {
//totTime := time.Now()
//start := time.Now()
//Table1
_, err = db.Exec("create table table1(id int , sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table1TX, err := db.Begin()
if err != nil {
return
}
table1Stmt, err := table1TX.Prepare("insert into table1 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table1Stmt.Close()
for i := 0; i < table1Rows; i++ {
_, err = table1Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table1TX.Commit()
//fmt.Println("Time to create table1 ", time.Since(//start))
//start = time.Now()
//Table2
_, err = db.Exec("create table table2(id int , sample1 Text,sample2 Text,sample3 Text,sample4 Text,sample5 Text,sample6 Text,sample7 Text,sample8 Text,sample9 Text,sample10 Text,sample11 Text,sample12 Text,sample13 Text,sample14 Text,sample15 Text,sample16 Text,sample17 Text,sample18 Text,sample19 Text,sample20 Text,sample21 Text,sample22 Text,sample23 Text,sample24 Text,sample25 Text,sample26 Text,sample27 Text,sample28 Text,sample29 Text,sample30 Text,sample31 Text,sample32 Text,sample33 Text,sample34 Text,sample35 Text);")
if err != nil {
log.Fatal("Failed to create table:", err)
}
table2TX, err := db.Begin()
if err != nil {
return
}
table2Stmt, err := table2TX.Prepare("insert into table2 values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)")
if err != nil {
return
}
defer table2Stmt.Close()
for i := 0; i < table2Rows; i++ {
_, err = table2Stmt.Exec(i, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text, Text)
if err != nil {
return
}
}
table2TX.Commit()
//fmt.Println("Time to create table2 ", time.Since(//start))
//start = time.Now()
//Join Table1 & Table2
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table2 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
//retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
//fmt.Println("Time to select from join ", time.Since(//start))
//fmt.Println("NumColumns: ", len(//retrievedColumns))
//fmt.Println("NumRows: ", resultRowNum)
//fmt.Println("Total Time is NoPK", time.Since(//totTime))
return
}
func CloneTable1(db *sql.DB) (err error) {
//totTime := time.Now()
//start := time.Now()
//Table3
_, err = db.Exec("create table table3 as select * from table1;")
if err != nil {
log.Fatal("Failed to create table:", err)
}
//fmt.Println("Time to clone table1 ", time.Since(//start))
//start = time.Now()
//Join Table1 & Table3
resultRowNum := 0
rows, err := db.Query("select one.sample1,one.sample2,one.sample3,one.sample4,one.sample5,one.sample6,one.sample7,one.sample8,one.sample9,one.sample10,one.sample11,one.sample12,one.sample13,one.sample14,one.sample15,one.sample16,one.sample17,one.sample18,one.sample19,one.sample20,one.sample21,one.sample22,one.sample23,one.sample24,one.sample25,one.sample26,one.sample27,one.sample28,one.sample29,one.sample30,one.sample31,one.sample32,one.sample33,one.sample34,one.sample35, two.sample1,two.sample2,two.sample3,two.sample4,two.sample5,two.sample6,two.sample7,two.sample8,two.sample9,two.sample10,two.sample11,two.sample12,two.sample13,two.sample14,two.sample15,two.sample16,two.sample17,two.sample18,two.sample19,two.sample20,two.sample21,two.sample22,two.sample23,two.sample24,two.sample25,two.sample26,two.sample27,two.sample28,two.sample29,two.sample30,two.sample31,two.sample32,two.sample33,two.sample34,two.sample35 from table1 as one inner join table3 as two on one.id=two.id; ")
if err != nil {
return
}
defer rows.Close()
//retrievedColumns, err := rows.Columns()
for rows.Next() {
resultRowNum++
}
//fmt.Println("Time to select from join of table1 and table3 ", time.Since(//start))
//fmt.Println("NumColumns: ", len(//retrievedColumns))
//fmt.Println("NumRows: ", resultRowNum)
//fmt.Println("Total Time in CloneTable1 is ", time.Since(//totTime))
return
}
|
package server
import (
"reflect"
"testing"
)
func TestRouteConfig(t *testing.T) {
opts, err := ProcessConfigFile("./configs/cluster.conf")
if err != nil {
t.Fatalf("Received an error reading route config file: %v\n", err)
}
golden := &Options{
Host: "0.0.0.0",
Port: 4222,
Username: "nats",
Password: "nats",
ZkPath: "/gnatsd",
AuthTimeout: 1.0,
LogFile: "/export/home/jae/gnatsd.log",
PidFile: "/export/home/jae/gnatsd.pid",
Trace: true,
Debug: true,
Logtime: true,
HTTPPort: 4224,
RouteHost: "0.0.0.0",
RoutePort: 4223,
ZkTimeout: 10,
MaxProcs: 8,
}
golden.ZkAddrs = append(golden.ZkAddrs, "127.0.0.1:2181")
golden.ZkAddrs = append(golden.ZkAddrs, "127.0.0.1:2182")
golden.ZkAddrs = append(golden.ZkAddrs, "127.0.0.1:2183")
if !reflect.DeepEqual(golden, opts) {
t.Fatalf("Options are incorrect.\nexpected: %+v\ngot: %+v",
golden, opts)
}
}
|
package main
import (
"fmt"
"github.com/YarkoL/GoTraining/04_scope/vis"
)
var x int = 42
func main() {
fmt.Println(x)
foo()
bar()
bar()
inc := wrapper()
fmt.Println(inc())
fmt.Println(inc())
//fmt.Println(wrapper()) //prints the address
//fmt.Println(inc) //same as above
baz()
}
func foo() {
fmt.Println(x) //package scope
vis.PrintVar() //from import
fmt.Println(increment())
fmt.Println(x)
}
func increment() int {
fmt.Println("in increment func block")
x++
return x
}
func bar() {
fmt.Println("call bar")
y := 0
increment := func() int {
y++
return y
}
fmt.Println(increment())
fmt.Println(increment())
//fmt.Println(increment)
}
func wrapper() func() int { //returns a func that returns an int
fmt.Println("call wrapper")
y := 0
return func() int {
y++
return y
}
}
func baz() {
fmt.Println("call baz")
inc := wrapper()
fmt.Println(inc()) //new scope so wrapper is reset
fmt.Println(inc())
}
|
package main
import (
"fmt"
"github.com/askovpen/goated/pkg/areasconfig"
"github.com/askovpen/goated/pkg/config"
"github.com/askovpen/goated/pkg/ui"
"github.com/askovpen/gocui"
"log"
"os"
"time"
)
func main() {
log.Printf("%s started", config.LongPID)
if len(os.Args) == 1 {
log.Printf("Usage: %s <config.yml>", os.Args[0])
return
}
err := config.Read()
if err != nil {
log.Print(err)
return
}
f, _ := os.OpenFile(config.Config.Log, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
defer f.Close()
log.SetOutput(f)
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
err = areasconfig.Read()
if err != nil {
log.Print(err)
return
}
ui.App, err = gocui.NewGui(gocui.OutputNormal)
if err != nil {
log.Panicln(err)
}
defer ui.App.Close()
ui.App.InputEsc = true
ui.App.SetManagerFunc(ui.Layout)
ui.App.BgColor = gocui.ColorBlack
ui.App.FgColor = gocui.ColorWhite
ui.ActiveWindow = "AreaList"
if err := ui.App.SetKeybinding("", gocui.KeyCtrlC, gocui.ModNone, ui.Quit); err != nil {
log.Panicln(err)
}
if err := ui.Keybindings(ui.App); err != nil {
log.Panicln(err)
}
ticker := time.NewTicker(1 * time.Second)
go func() {
for t := range ticker.C {
ui.StatusTime = fmt.Sprintf("│ %s ", t.Format("15:04:05"))
ui.App.Update(func(*gocui.Gui) error {
return nil
})
}
}()
if err := ui.App.MainLoop(); err != nil && err != gocui.ErrQuit {
log.Panicln(err)
}
}
|
package client
import (
"errors"
"fmt"
"github.com/slince/spike-go/event"
"github.com/slince/spike-go/log"
"github.com/slince/spike-go/protol"
"github.com/slince/spike-go/tunnel"
"net"
"runtime"
)
// 初始化常量
const (
Version = "0.0.1"
EventClientInit = "init"
EventClientStart = "start"
EventMessage = "message"
EventUnknownMessage = "unknownMessage"
)
type Client struct {
// client id
Id string
// Server Address
ServerAddress string
// Logger
Logger *log.Logger
// identifier
Auth map[string]string
// Tunnels
Tunnels []tunnel.Tunnel
// 控制连接
ControlConn net.Conn
// event dispatcher
Dispatcher *event.Dispatcher
}
// Run client
func (client *Client) Start() {
client.registerListeners()
conn,err := net.Dial("tcp", client.ServerAddress)
if err != nil {
panic(err)
}
client.Logger.Info("the client has been connected to the server")
client.ControlConn = conn
client.handleControlConnection()
}
// Close the client
func (client *Client) Close() {
}
// 发送消息给服务端
func (client *Client) SendMessage(message *protol.Protocol) (int, error){
if client.Id != "" {
if message.Headers == nil {
message.Headers = make(map[string]string, 1)
}
message.Headers["client-id"] = client.Id
}
return client.ControlConn.Write(message.ToBytes())
}
// Register all listeners
func (client *Client) registerListeners() {
// 注册系统监听者
RegisterSystemListener(client.Dispatcher)
}
// 处理控制连接
func (client *Client) handleControlConnection() {
// 第一步获取授权
client.sendAuthRequest()
reader := protol.NewReader(client.ControlConn)
for {
// 监听消息
messages, err := reader.Read()
if err != nil {
client.Logger.Error(err) //忽略读取
}
for _, message := range messages {
client.Logger.Info("Received a message:\r\n" + message.ToString())
client.handleMessage(message)
}
}
}
// 处理消息
func (client *Client) handleMessage(message *protol.Protocol) error {
// fire event
ev := event.NewEvent(EventMessage, map[string]interface{}{
"message": message,
"client": client,
})
client.Dispatcher.Fire(ev)
// 获取handler
hd, ok := ev.Parameters["handler"]
// 收到不知名的报文
if !ok {
ev = event.NewEvent("unknownMessage", map[string]interface{}{"message": message})
client.Dispatcher.Fire(ev)
client.Logger.Warn("receive a unknown message")
return fmt.Errorf("receive a unknown message")
}
// 处理消息
err := hd.(MessageHandler).Handle(message)
if err != nil {
client.Logger.Warn("message handle error:", err)
}
return nil
}
// find tunnel by id
func (client *Client) findTunnelById(id string) (tunnel.Tunnel, error) {
for _, tn := range client.Tunnels {
if tn.GetId() == id {
return tn, nil
}
}
return nil, errors.New("the tunnel is missing with id")
}
// 发送验证信息给服务端
func (client *Client) sendAuthRequest() {
message := &protol.Protocol{
Action: "auth",
Body: map[string]interface{}{
"os": runtime.GOOS + runtime.GOARCH,
"version": Version,
"auth": client.Auth,
},
}
client.ControlConn.Write([]byte(message.ToString()))
}
func NewClient(configuration *Configuration) *Client {
// set logger
logger := log.NewLogger()
logger.SetLogFile(configuration.Log["file"]).EnableConsole()
tunnels := createTunnelsWithTunnelConfiguration(configuration.Tunnels)
return &Client{
Id: "",
ServerAddress: configuration.ServerAddress,
Logger: logger,
Auth: configuration.Auth,
Tunnels: tunnels,
Dispatcher: event.NewDispatcher(),
}
}
// 创建tunnel
func createTunnelsWithTunnelConfiguration(configurations []TunnelConfiguration) []tunnel.Tunnel{
var details []map[string]interface{}
for _, config := range configurations {
details = append(details, map[string]interface{}{
"protocol": config.Protocol,
"local_port": config.LocalPort,
"server_port": config.ServerPort,
"host": config.Host,
"proxy_hosts": config.ProxyHosts,
})
}
return tunnel.NewManyTunnels(details)
} |
package plndrcp
import (
"context"
"fmt"
"github.com/plunder-app/plndr-cloud-provider/pkg/ipam"
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/klog"
)
type plndrServices struct {
Services []services `json:"services"`
}
type services struct {
Vip string `json:"vip"`
Port int `json:"port"`
Type string `json:"type"`
UID string `json:"uid"`
ServiceName string `json:"serviceName"`
}
//PlndrLoadBalancer -
type plndrLoadBalancerManager struct {
kubeClient *kubernetes.Clientset
nameSpace string
cloudConfigMap string
}
func newLoadBalancer(kubeClient *kubernetes.Clientset, ns, cm, serviceCidr string) cloudprovider.LoadBalancer {
return &plndrLoadBalancerManager{
kubeClient: kubeClient,
nameSpace: ns,
cloudConfigMap: cm,
}
}
func (plb *plndrLoadBalancerManager) EnsureLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (lbs *v1.LoadBalancerStatus, err error) {
return plb.syncLoadBalancer(service)
}
func (plb *plndrLoadBalancerManager) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (err error) {
_, err = plb.syncLoadBalancer(service)
return err
}
func (plb *plndrLoadBalancerManager) EnsureLoadBalancerDeleted(ctx context.Context, clusterName string, service *v1.Service) error {
return plb.deleteLoadBalancer(service)
}
func (plb *plndrLoadBalancerManager) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) {
// Retrieve the kube-vip configuration from it's namespace
cm, err := plb.GetConfigMap(PlunderClientConfig, service.Namespace)
if err != nil {
return nil, true, nil
}
// Find the services configuration in the configMap
svc, err := plb.GetServices(cm)
if err != nil {
return nil, false, err
}
for x := range svc.Services {
if svc.Services[x].UID == string(service.UID) {
return &service.Status.LoadBalancer, true, nil
// return &v1.LoadBalancerStatus{
// Ingress: []v1.LoadBalancerIngress{
// {
// IP: svc.Services[x].Vip,
// },
// },
// }, true, nil
}
}
return nil, false, nil
}
// GetLoadBalancerName returns the name of the load balancer. Implementations must treat the
// *v1.Service parameter as read-only and not modify it.
func (plb *plndrLoadBalancerManager) GetLoadBalancerName(_ context.Context, clusterName string, service *v1.Service) string {
return getDefaultLoadBalancerName(service)
}
func getDefaultLoadBalancerName(service *v1.Service) string {
return cloudprovider.DefaultLoadBalancerName(service)
}
func (plb *plndrLoadBalancerManager) deleteLoadBalancer(service *v1.Service) error {
klog.Infof("deleting service '%s' (%s)", service.Name, service.UID)
// Get the kube-vip (client) configuration from it's namespace
cm, err := plb.GetConfigMap(PlunderClientConfig, service.Namespace)
if err != nil {
klog.Errorf("The configMap [%s] doensn't exist", PlunderClientConfig)
return nil
}
// Find the services configuraiton in the configMap
svc, err := plb.GetServices(cm)
if err != nil {
klog.Errorf("The service [%s] in configMap [%s] doensn't exist", service.Name, PlunderClientConfig)
return nil
}
// Update the services configuration, by removing the service
updatedSvc := svc.delServiceFromUID(string(service.UID))
if len(service.Status.LoadBalancer.Ingress) != 0 {
err = ipam.ReleaseAddress(service.Namespace, service.Spec.LoadBalancerIP)
if err != nil {
klog.Errorln(err)
}
}
// Update the configMap
_, err = plb.UpdateConfigMap(cm, updatedSvc)
return err
}
func (plb *plndrLoadBalancerManager) syncLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) {
// Get the clound controller configuration map
controllerCM, err := plb.GetConfigMap(PlunderCloudConfig, "kube-system")
if err != nil {
klog.Errorf("Unable to retrieve kube-vip ipam config from configMap [%s] in kube-system", PlunderClientConfig)
// TODO - determine best course of action, create one if it doesn't exist
controllerCM, err = plb.CreateConfigMap(PlunderCloudConfig, "kube-system")
if err != nil {
return nil, err
}
}
// Retrieve the kube-vip configuration map
namespaceCM, err := plb.GetConfigMap(PlunderClientConfig, service.Namespace)
if err != nil {
klog.Errorf("Unable to retrieve kube-vip service cache from configMap [%s] in [%s]", PlunderClientConfig, service.Namespace)
// TODO - determine best course of action
namespaceCM, err = plb.CreateConfigMap(PlunderClientConfig, service.Namespace)
if err != nil {
return nil, err
}
}
// This function reconciles the load balancer state
klog.Infof("syncing service '%s' (%s)", service.Name, service.UID)
// Find the services configuraiton in the configMap
svc, err := plb.GetServices(namespaceCM)
if err != nil {
klog.Errorf("Unable to retrieve services from configMap [%s], [%s]", PlunderClientConfig, err.Error())
// TODO best course of action, currently we create a new services config
svc = &plndrServices{}
}
// Check for existing configuration
existing := svc.findService(string(service.UID))
if existing != nil {
klog.Infof("found existing service '%s' (%s) with vip %s", service.Name, service.UID, existing.Vip)
return &service.Status.LoadBalancer, nil
// If this is 0.0.0.0 then it's a DHCP lease and we need to return that not the 0.0.0.0
// if existing.Vip == "0.0.0.0" {
// return &service.Status.LoadBalancer, nil
// }
// //
// return &v1.LoadBalancerStatus{
// Ingress: []v1.LoadBalancerIngress{
// {
// IP: existing.Vip,
// },
// },
// }, nil
}
if service.Spec.LoadBalancerIP == "" {
service.Spec.LoadBalancerIP, err = discoverAddress(controllerCM, service.Namespace, plb.cloudConfigMap)
if err != nil {
return nil, err
}
}
// TODO - manage more than one set of ports
newSvc := services{
ServiceName: service.Name,
UID: string(service.UID),
Type: string(service.Spec.Ports[0].Protocol),
Vip: service.Spec.LoadBalancerIP,
Port: int(service.Spec.Ports[0].Port),
}
klog.Infof("Updating service [%s], with load balancer address [%s]", service.Name, service.Spec.LoadBalancerIP)
_, err = plb.kubeClient.CoreV1().Services(service.Namespace).Update(service)
if err != nil {
// release the address internally as we failed to update service
ipamerr := ipam.ReleaseAddress(service.Namespace, service.Spec.LoadBalancerIP)
if ipamerr != nil {
klog.Errorln(ipamerr)
}
return nil, fmt.Errorf("Error updating Service Spec [%s] : %v", service.Name, err)
}
svc.addService(newSvc)
namespaceCM, err = plb.UpdateConfigMap(namespaceCM, svc)
if err != nil {
return nil, err
}
return &service.Status.LoadBalancer, nil
// return &v1.LoadBalancerStatus{
// Ingress: []v1.LoadBalancerIngress{
// {
// IP: vip,
// },
// },
// }, nil
}
func discoverAddress(cm *v1.ConfigMap, namespace, configMapName string) (vip string, err error) {
var cidr, ipRange string
var ok bool
// Find Cidr
cidrKey := fmt.Sprintf("cidr-%s", namespace)
// Lookup current namespace
if cidr, ok = cm.Data[cidrKey]; !ok {
klog.Info(fmt.Errorf("No cidr config for namespace [%s] exists in key [%s] configmap [%s]", namespace, cidrKey, configMapName))
// Lookup global cidr configmap data
if cidr, ok = cm.Data["cidr-global"]; !ok {
klog.Info(fmt.Errorf("No global cidr config exists [cidr-global]"))
} else {
klog.Infof("Taking address from [cidr-global] pool")
}
} else {
klog.Infof("Taking address from [%s] pool", cidrKey)
}
if ok {
vip, err = ipam.FindAvailableHostFromCidr(namespace, cidr)
if err != nil {
return "", err
}
return
}
// Find Range
rangeKey := fmt.Sprintf("range-%s", namespace)
// Lookup current namespace
if ipRange, ok = cm.Data[rangeKey]; !ok {
klog.Info(fmt.Errorf("No range config for namespace [%s] exists in key [%s] configmap [%s]", namespace, rangeKey, configMapName))
// Lookup global range configmap data
if ipRange, ok = cm.Data["range-global"]; !ok {
klog.Info(fmt.Errorf("No global range config exists [range-global]"))
} else {
klog.Infof("Taking address from [range-global] pool")
}
} else {
klog.Infof("Taking address from [%s] pool", rangeKey)
}
if ok {
vip, err = ipam.FindAvailableHostFromRange(namespace, ipRange)
if err != nil {
return vip, err
}
return
}
return "", fmt.Errorf("No IP address ranges could be found either range-global or range-<namespace>")
}
|
package main
import (
"html/template"
"log"
"net/http"
)
var tmpl = template.Must(template.ParseFiles("templates/index.html"))
func main() {
http.HandleFunc("/", indexHandler)
http.ListenAndServe(":8080", nil)
}
func indexHandler(w http.ResponseWriter, r *http.Request) {
type Hoge struct {
Name string
Score [3]int
}
foo := Hoge{
Name: "hoge hoge",
Score: [3]int{10, 4, 43},
}
if err := tmpl.ExecuteTemplate(w, "index.html", foo); err != nil {
log.Fatal(err)
}
}
|
package main
import (
"flag"
"fmt"
_ "image/jpeg"
_ "image/png"
"log"
"github.com/sshsmz/grcode"
)
//go build -ldflags "-linkmode external -extldflags -static"
func main() {
flag.Parse()
//log.SetFlags(0)
if len(flag.Args()) < 1 {
log.Fatal("Need specify the image file")
}
filePath := flag.Arg(0)
results, err := grcode.GetDataFromFile(filePath)
if err != nil {
log.Fatal(err)
}
if len(results) == 0 {
log.Printf("No qrcode detected from file: %s", filePath)
}
for _, result := range results {
fmt.Printf("%s\n", result)
}
}
|
package main
import (
"bufio"
"fmt"
"os"
"strings"
)
func main() {
//bufioSplit()
bufioWriter()
}
func bufioSplit() {
const input = "feng chen ni hao a"
scanner := bufio.NewScanner(strings.NewReader(input))
split := func(data []byte, atEOF bool) (addvace int, token []byte, err error) {
addvace, token, err = bufio.ScanWords(data, atEOF)
return
}
scanner.Split(split)
for scanner.Scan() {
fmt.Println(scanner.Text())
}
}
func bufioWriter() {
bw := bufio.NewWriter(os.Stdout)
fmt.Fprintln(bw, "hello", "feng")
fmt.Fprintln(bw, "zd")
bw.Flush()
}
|
package request
import (
"context"
"net/http"
"sync"
)
type do func(r *http.Request) (*http.Response, error)
type client struct {
do do
concurrencyLimit int
}
func NewClient(
do do,
concurrencyLimit int,
) *client {
return &client{
do: do,
concurrencyLimit: concurrencyLimit,
}
}
type result struct {
Status *status
Err error
}
type status struct {
Code int
Msg string
}
func (c *client) GetResult(
ctx context.Context,
url string,
) (*status, error) {
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
if err != nil {
return nil, err
}
res, err := c.do(req)
if err != nil {
return nil, err
}
defer res.Body.Close()
return &status{
Code: res.StatusCode,
Msg: res.Status,
}, err
}
func (c *client) GetResultChannel(
ctx context.Context,
urls []string,
) <-chan result {
semaphoreChan := make(chan struct{}, c.concurrencyLimit)
resultsChan := make(chan result)
var wg sync.WaitGroup
wg.Add(len(urls))
for _, url := range urls {
go func(url string) {
select {
case <-ctx.Done():
wg.Done()
default:
semaphoreChan <- struct{}{}
resp, err := c.GetResult(ctx, url)
resultsChan <- result{
Status: resp,
Err: err,
}
<-semaphoreChan
wg.Done()
}
}(url)
}
go func() {
wg.Wait()
close(resultsChan)
close(semaphoreChan)
}()
return resultsChan
}
|
package keypair
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("keypair.FromAddress", func() {
var subject KeyPair
JustBeforeEach(func() {
subject = &FromAddress{address}
})
ItBehavesLikeAKP(&subject)
Describe("Sign()", func() {
It("fails", func() {
_, err := subject.Sign(message)
Expect(err).To(HaveOccurred())
})
})
Describe("LibP2PPrivKey()", func() {
It("fails", func() {
_, err := subject.LibP2PPrivKey()
Expect(err).To(HaveOccurred())
})
})
Describe("LibP2PPubKey()", func() {
It("succeeds", func() {
_, err := subject.LibP2PPubKey()
Expect(err).To(BeNil())
})
})
})
|
// Copyright 2023 SpotHero
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package geocollection provides a data structure for storing and quickly
// searching for items based on geographic coordinates on Earth.
package geocollection
import (
"sync"
"github.com/golang/geo/s1"
"github.com/golang/geo/s2"
)
// EarthRadiusMeters is an approximate representation of the earth's radius in meters.
const EarthRadiusMeters = 6371008.8
// maxCellLevel is the number of levels to specify a leaf cell in s2 -- this is copied
// from s2 because they do not export this value
const maxCellLevel = 30
// cellItems is a map of cell ids to the set of keys pertaining to items geographically contained in that cell
type cellItems map[uint64]map[interface{}]bool
// itemIndex keeps track of which cells a given item belongs to in order to enable fast deletions
type itemIndex struct {
cellPosition uint64
cellLevel int
}
// collectionContents stores the contents of a key and the original latitude and longitude
// stored with the key.
type collectionContents struct {
contents interface{}
latitude, longitude float64
}
// Collection implements the GeoLocationCollection interface and provides a location based
// cache
type Collection struct {
// cells is a map of cell level to the items contained in each cell at that zoom level
cells map[int]cellItems
// keys maps each key stored to its associated cells to enable fast deletions
keys map[interface{}][]itemIndex
// items maps the item key to the item contents
items map[interface{}]collectionContents
mutex *sync.RWMutex
}
// LocationCollection defines the interface for interacting with Geo-based collections
type LocationCollection interface {
Set(key, contents interface{}, latitude, longitude float64)
Delete(key interface{})
ItemsWithinDistance(latitude, longitude, distanceMeters float64, params SearchCoveringParameters) ([]interface{}, SearchCoveringResult)
ItemByKey(key interface{}) interface{}
}
// NewCollection creates a new collection
func NewCollection() Collection {
return Collection{
cells: make(map[int]cellItems),
keys: make(map[interface{}][]itemIndex),
items: make(map[interface{}]collectionContents),
mutex: &sync.RWMutex{},
}
}
// Set adds an item with a given key to the geo collection at a particular latitude and longitude.
// If the given key already exists in the collection, it is created, otherwise the contents and location is
// updated to the new values.
func (c Collection) Set(key, contents interface{}, latitude, longitude float64) {
c.mutex.Lock()
defer c.mutex.Unlock()
newContents := collectionContents{contents: contents, latitude: latitude, longitude: longitude}
if existingContents, ok := c.items[key]; ok &&
existingContents.latitude == latitude && existingContents.longitude == longitude {
// contents changed but the location has not, swap contents and exit
c.items[key] = newContents
return
}
c.delete(key)
c.items[key] = newContents
c.keys[key] = make([]itemIndex, 0, maxCellLevel)
leafCellID := s2.CellIDFromLatLng(s2.LatLngFromDegrees(latitude, longitude))
for level := maxCellLevel; level >= 0; level-- {
if _, ok := c.cells[level]; !ok {
c.cells[level] = make(cellItems)
}
cellPos := leafCellID.Parent(level).Pos()
if _, ok := c.cells[level][cellPos]; !ok {
c.cells[level][cellPos] = make(map[interface{}]bool)
}
c.cells[level][cellPos][key] = true
c.keys[key] = append(
c.keys[key],
itemIndex{
cellPosition: cellPos,
cellLevel: level,
},
)
}
}
// Delete removes an item by its key from the collection.
func (c Collection) Delete(key interface{}) {
c.mutex.Lock()
defer c.mutex.Unlock()
c.delete(key)
}
// delete is the internal function that actually performs the deletion.
func (c Collection) delete(key interface{}) {
delete(c.items, key)
itemIndices, ok := c.keys[key]
if !ok {
return
}
for _, index := range itemIndices {
delete(c.cells[index.cellLevel][index.cellPosition], key)
}
delete(c.keys, key)
}
// SearchCoveringResult are the boundaries of the cells used in the requested search
type SearchCoveringResult [][][]float64
// SearchCoveringParameters controls the algorithm and parameters used by S2 to determine the covering for the
// requested search area
type SearchCoveringParameters struct {
LevelMod int `json:"level_mod"`
MaxCells int `json:"max_cells"`
MaxLevel int `json:"max_level"`
MinLevel int `json:"min_level"`
UseFastCovering bool `json:"use_fast_covering"`
}
// ItemsWithinDistance returns all contents stored in the collection within distanceMeters radius from the provided
// latitude an longitude. Note that this is an approximation and items further than distanceMeters may be returned, but
// it is guaranteed that all item ids returned are within distanceMeters. The caller of this function
// must specify all parameters used to generate cell covering as well as whether or not the coverer will use the
// standard covering algorithm or the fast covering algorithm which may be less precise.
func (c Collection) ItemsWithinDistance(
latitude, longitude, distanceMeters float64, params SearchCoveringParameters,
) ([]interface{}, SearchCoveringResult) {
// First, generate a spherical cap with an arc length of distanceMeters centered on the given latitude/longitude
// This is the angle required (in radians) to trace an arc length of distanceMeters on the surface of the sphere
capAngle := s1.Angle(distanceMeters / EarthRadiusMeters)
capCenter := NewPointFromLatLng(latitude, longitude)
searchCap := s2.CapFromCenterAngle(capCenter, capAngle)
coverer := s2.RegionCoverer{
MaxLevel: params.MaxLevel,
MinLevel: params.MinLevel,
LevelMod: params.LevelMod,
MaxCells: params.MaxCells,
}
region := s2.Region(searchCap)
var cellUnion s2.CellUnion
if params.UseFastCovering {
cellUnion = coverer.FastCovering(region)
} else {
cellUnion = coverer.Covering(region)
}
c.mutex.RLock()
defer c.mutex.RUnlock()
foundItems := make([]interface{}, 0)
cellBounds := make(SearchCoveringResult, 0, len(cellUnion))
for _, cell := range cellUnion {
// get vertices in counter-clockwise order starting from the lower left
vertices := make([][]float64, 5)
for i := 0; i < 4; i++ {
vertex := s2.CellFromCellID(cell).Vertex(i)
ll := s2.LatLngFromPoint(vertex)
vertices[i] = []float64{ll.Lng.Degrees(), ll.Lat.Degrees()}
}
// close the polygon loop
vertices[4] = vertices[0]
cellBounds = append(cellBounds, vertices)
for key := range c.cells[cell.Level()][cell.Pos()] {
foundItems = append(foundItems, c.items[key].contents)
}
}
return foundItems, SearchCoveringResult(cellBounds)
}
// ItemByKey returns the contents stored in the collection by its key instead of by a geolocation lookup
func (c Collection) ItemByKey(key interface{}) interface{} {
c.mutex.RLock()
defer c.mutex.RUnlock()
contents, ok := c.items[key]
if !ok {
return nil
}
return contents.contents
}
// NewPointFromLatLng constructs an s2 point from a lat/lon ordered pair
func NewPointFromLatLng(latitude, longitude float64) s2.Point {
latLng := s2.LatLngFromDegrees(latitude, longitude)
return s2.PointFromLatLng(latLng)
}
// EarthDistanceMeters calculates the distance in meters between two points on the surface of the Earth
func EarthDistanceMeters(p1, p2 s2.Point) float64 {
return float64(p1.Distance(p2)) * EarthRadiusMeters
}
|
package word
import "sort"
const (
Size = 64 //bit size
Log2 = 6 //w >> Log2 == w/64
m1 uint64 = 0x5555555555555555 //binary: 0101...
m2 uint64 = 0x3333333333333333 //binary: 00110011..
m4 uint64 = 0x0f0f0f0f0f0f0f0f //binary: 4 zeros, 4 ones ...
h01 uint64 = 0x0101010101010101 //the sum of 256 to the power of 0,1,2,3...
)
// Weight counts 1 in w
func Weight(w uint64) int {
x := w
x -= x >> 1 & m1 //put count of each 2 bits into those 2 bits
x = x&m2 + x>>2&m2 //put count of each 4 bits into those 4 bits
x = (x + x>>4) & m4 //put count of each 8 bits into those 8 bits
return int((x * h01) >> 56)
}
func WeightSlice(ws []uint64) int {
k := 0
for _, w := range ws {
k += Weight(w)
}
return k
}
// Rank1 counts 1 in w[0:index]
func Rank1(w uint64, i int) int {
if i >= Size {
return Weight(w)
}
nbit := uint64(1<<uint(i) - 1)
return Weight(w & nbit)
}
func Rank1Slice(ws []uint64, i int) (c int) {
q, r := i/Size, i%Size
if q > 0 {
c += WeightSlice(ws[0:q])
}
if len(ws) > q {
c += Rank1(ws[q], r)
}
return
}
// Rank0 counts 0 in w[0:index]
func Rank0(w uint64, i int) int {
return Rank1(^w, i)
}
func Rank0Slice(ws []uint64, i int) int {
return i - Rank1Slice(ws, i)
}
// Select1 return 'count+1'-th 1's index. Panic if `PopCount(w) < count`.
func Select1(w uint64, c int) int {
if Weight(w) < c {
panic("[bit] Select1: Weight(w) < c")
}
return -1 + sort.Search(Size, func(i int) bool {
return Rank1(w, i) > c
})
}
// Select0 return 'count+1'-th 0's index.
func Select0(w uint64, count int) int {
return Select1(^w, count)
}
|
package metre
import (
"fmt"
zmq "github.com/pebbe/zmq4"
)
const queueFlag zmq.Flag = 0
type Queue struct {
URI string
PushSocket *zmq.Socket // Socket to push messages to
PullSocket *zmq.Socket // SOcket to pull mesasges form
}
// BindPush binds to the socket to push
func (q Queue) BindPush() error {
return q.PushSocket.Bind(q.URI)
}
// ConnectPull connects to the socket to listen for messages
func (q Queue) ConnectPull() error {
return q.PullSocket.Connect(q.URI)
}
// Push pushes and element onto the queue
func (q Queue) Push(msg string) (int, error) {
result, err := q.PushSocket.Send(msg, 1)
return result, err
}
// Pop pulls off the last element from the queue
func (q Queue) Pop() string {
m, _ := q.PullSocket.Recv(queueFlag)
return m
}
// New acts as a queue constructor
func NewQueue(uri string) (Queue, error) {
u := "tcp://" + uri
c, _ := zmq.NewContext()
pullSoc, pullErr := c.NewSocket(zmq.PULL)
if pullErr != nil {
return Queue{}, fmt.Errorf("pull socket initialization failed: %v", pullErr)
}
pushSoc, pushErr := c.NewSocket(zmq.PUSH)
if pushErr != nil {
return Queue{}, fmt.Errorf("push socket initialization failed: %v", pushErr)
}
q := Queue{u, pushSoc, pullSoc}
return q, nil
}
|
package leetcode
/*Given an array of string words. Return all strings in words which is substring of another word in any order.
String words[i] is substring of words[j], if can be obtained removing some characters to left and/or right side of words[j].*/
import "strings"
func stringMatching(words []string) []string {
res := make([]string, 0)
size := len(words)
for i := 0; i < size; i++ {
subSize := len(words[i])
for j := 0; j < size; j++ {
if subSize >= len(words[j]) {
continue
}
if strings.Contains(words[j], words[i]) {
res = append(res, words[i])
break
}
}
}
return res
}
|
package goSolution
func findMin(nums []int) int {
var l = 0
var r = len(nums) - 1
var mid int
for ; l != r; {
mid = (l + r) >> 1
if nums[l] <= nums[r] {
break
} else {
if nums[mid] >= nums[l] {
l = mid + 1
} else {
r = mid
}
}
}
return nums[l]
} |
package router
import (
"reflect"
"testing"
)
func TestRouterSubscribeUnsubscribe(t *testing.T) {
g := New(true)
c := make(chan []byte)
u, _, err := g.Subscribe(1, c)
if err != nil {
t.Errorf("First subscribe returned error %s", err.Error())
}
if len(g.allConnected) != 1 {
t.Error("Subscribe should add connection to allConnected")
}
if _, _, err := g.Subscribe(2, c); err != ErrChannelAlreadySubscribed {
t.Error("Second subscribe didn't return ErrChannelAlreadySubscribed error")
}
if len(g.allConnected) != 1 {
t.Error("allConnected should not change")
}
u()
if len(g.allConnected) != 0 {
t.Error("allConnected should be empty")
}
u() // should be a NOOP at this point
}
func TestRouterActions(t *testing.T) {
g := New(true)
c1 := make(chan []byte, 1)
c2 := make(chan []byte, 1)
c3 := make(chan []byte, 1)
g.Subscribe(1, c1)
g.Subscribe(2, c2)
g.Subscribe(3, c3)
msg := []byte("msg")
receivedMsg := func(c <-chan []byte) bool {
select {
case m := <-c:
if !reflect.DeepEqual(m, msg) {
t.Fatalf("Received incorrect message, %#v != %#v", m, msg)
}
return true
default:
return false
}
}
g.SendMsg(2, msg)
if a, b, c := receivedMsg(c1), receivedMsg(c2), receivedMsg(c3); a || !b || c {
t.Errorf("Only client 2 should receive a message (%v, %v, %v)", a, b, c)
}
g.Broadcast(msg)
if a, b, c := receivedMsg(c1), receivedMsg(c2), receivedMsg(c3); !a || !b || !c {
t.Errorf("All clients should receive a message (%v, %v, %v)", a, b, c)
}
g.Follow(1, 3)
g.Follow(2, 3)
g.Follow(3, 2)
g.SendMsgToFollowers(3, msg)
if a, b, c := receivedMsg(c1), receivedMsg(c2), receivedMsg(c3); !a || !b || c {
t.Errorf("Only clients 1 and 2 (followers of 3) should receive a message (%v, %v, %v)", a, b, c)
}
g.Unfollow(2, 3)
g.SendMsgToFollowers(3, msg)
if a, b, c := receivedMsg(c1), receivedMsg(c2), receivedMsg(c3); !a || b || c {
t.Errorf("Only client 1 (follower of 3) should receive a message (%v, %v, %v)", a, b, c)
}
// NOOPS
g.SendMsg(4, msg)
g.Unfollow(2, 3)
// first follow, then subscribe
g.Follow(4, 1)
c4 := make(chan []byte, 1)
g.Subscribe(4, c4)
g.SendMsgToFollowers(1, msg)
if a, b, c, d := receivedMsg(c1), receivedMsg(c2), receivedMsg(c3), receivedMsg(c4); a || b || c || !d {
t.Errorf("Only client 4 (follower of 1) should receive a message (%v, %v, %v, %v)", a, b, c, d)
}
}
|
package annotations
import (
"github.com/haproxytech/kubernetes-ingress/controller/haproxy/api"
"github.com/haproxytech/kubernetes-ingress/controller/store"
)
func HandleGlobalAnnotations(k8sStore store.K8s, client api.HAProxyClient, forcePase bool, annotations store.MapStringW) (restart bool, reload bool) {
annList := GetGlobalAnnotations(client)
for _, a := range annList {
annValue, _ := k8sStore.GetValueFromAnnotations(a.GetName(), annotations)
if annValue == nil {
continue
}
reload = HandleAnnotation(a, *annValue, forcePase) || reload
}
// Check syslog-server annotation for a restart (stdout logging)
if a, ok := annList[3].(*GlobalSyslogServers); ok {
restart = a.Restart()
}
return restart, reload
}
func GetGlobalAnnotations(client api.HAProxyClient) []Annotation {
return []Annotation{
NewFrontendCfgSnippet("frontend-config-snippet", client, []string{"http", "https"}),
NewFrontendCfgSnippet("stats-config-snippet", client, []string{"stats"}),
NewGlobalCfgSnippet("global-config-snippet", client),
NewGlobalSyslogServers("syslog-server", client),
NewGlobalNbthread("nbthread", client),
NewGlobalMaxconn("maxconn", client),
NewGlobalHardStopAfter("hard-stop-after", client),
NewDefaultOption("http-server-close", client),
NewDefaultOption("http-keep-alive", client),
NewDefaultOption("dontlognull", client),
NewDefaultOption("logasap", client),
NewDefaultTimeout("timeout-http-request", client),
NewDefaultTimeout("timeout-connect", client),
NewDefaultTimeout("timeout-client", client),
NewDefaultTimeout("timeout-client-fin", client),
NewDefaultTimeout("timeout-queue", client),
NewDefaultTimeout("timeout-server", client),
NewDefaultTimeout("timeout-server-fin", client),
NewDefaultTimeout("timeout-tunnel", client),
NewDefaultTimeout("timeout-http-keep-alive", client),
NewDefaultLogFormat("log-format", client),
}
}
|
package _692_Top_K_Frequent_Words
import (
"testing"
"github.com/shadas/leetcode_notes/utils/array"
)
func TestTopKFrequent(t *testing.T) {
var (
words, ret []string
k int
)
words, k = []string{"i", "love", "leetcode", "i", "love", "coding"}, 2
ret = topKFrequent(words, k)
if !array.IsStrArrayEqual(ret, []string{"i", "love"}) {
t.Errorf("wrong ret=%v", ret)
}
words, k = []string{"the", "day", "is", "sunny", "the", "the", "the", "sunny", "is", "is"}, 4
ret = topKFrequent(words, k)
if !array.IsStrArrayEqual(ret, []string{"the", "is", "sunny", "day"}) {
t.Errorf("wrong ret=%v", ret)
}
}
func TestLess(t *testing.T) {
var a, b *Item
a, b = &Item{word: "i", count: 2}, &Item{word: "love", count: 2}
if !Less(a, b) {
t.Error("should be true")
}
a, b = &Item{word: "i", count: 2}, &Item{word: "love", count: 1}
if Less(a, b) {
t.Error("should be false")
}
}
func TestSortUtilK(t *testing.T) {
var (
s []*Item
k int
)
s, k = []*Item{&Item{"the", 4}, &Item{"day", 1}, &Item{"is", 3}, &Item{"sunny", 2}}, 4
SortUtilK(s, k)
//printL(s)
}
|
package api
import (
"github.com/golang/glog"
)
// api服务结构体,必须实现了所有service ApiService中的方法
type ApiService struct {
DeployAddress string
middleWare []func()
}
func (this *ApiService) init() {
this.middleWare = make([]func(), 0)
this.SetMiddleWare(func() {
if r := recover(); r != nil {
glog.Errorln("panic", r)
}
})
}
func (this *ApiService) SetMiddleWare(f func()) {
this.middleWare = append(this.middleWare, f)
}
func (this *ApiService) RunMiddleWare() {
for _, f := range this.middleWare {
f()
}
}
|
// DO NOT EDIT. This file was generated by "github.com/frk/gosql".
package testdata
import (
"time"
)
var _FilterBasicRecords_colmap = map[string]string{
"Id": `"id"`,
"Email": `"email"`,
"FullName": `"full_name"`,
"CreatedAt": `"created_at"`,
}
func (f *FilterBasicRecords) Init() {
f.Filter.Init(_FilterBasicRecords_colmap, "")
}
func (f *FilterBasicRecords) Id(op string, val int) *FilterBasicRecords {
f.Filter.Col(`"id"`, op, val)
return f
}
func (f *FilterBasicRecords) Email(op string, val string) *FilterBasicRecords {
f.Filter.Col(`"email"`, op, val)
return f
}
func (f *FilterBasicRecords) FullName(op string, val string) *FilterBasicRecords {
f.Filter.Col(`"full_name"`, op, val)
return f
}
func (f *FilterBasicRecords) CreatedAt(op string, val time.Time) *FilterBasicRecords {
f.Filter.Col(`"created_at"`, op, val)
return f
}
func (f *FilterBasicRecords) And(nest func(*FilterBasicRecords)) *FilterBasicRecords {
if nest == nil {
f.Filter.And(nil)
return f
}
f.Filter.And(func() {
nest(f)
})
return f
}
func (f *FilterBasicRecords) Or(nest func(*FilterBasicRecords)) *FilterBasicRecords {
if nest == nil {
f.Filter.Or(nil)
return f
}
f.Filter.Or(func() {
nest(f)
})
return f
}
|
package message
import (
"bufio"
"crypto/sha256"
"encoding/hex"
"golang.org/x/crypto/ripemd160"
"io"
"os"
)
// CalculateChecksums calculates a number of hashes for the given reader in one go.
// Taken from http://marcio.io/2015/07/calculating-multiple-file-hashes-in-a-single-pass/
func CalculateChecksums(r io.Reader) (ChecksumList, error) {
sha256Hash := sha256.New()
ripemd160Hash := ripemd160.New()
pageSize := os.Getpagesize()
reader := bufio.NewReaderSize(r, pageSize)
multiWriter := io.MultiWriter(ripemd160Hash, sha256Hash)
_, err := io.Copy(multiWriter, reader)
if err != nil {
return ChecksumList{}, err
}
ret := make(ChecksumList, 4)
ret["sha256"] = hex.EncodeToString(sha256Hash.Sum(nil))
ret["ripemd160"] = hex.EncodeToString(ripemd160Hash.Sum(nil))
return ret, nil
}
|
package main
import (
"app/templates"
"database/sql"
"log"
"sort"
"strconv"
"github.com/kataras/iris/context"
)
func jsonHandler(ctx context.Context) {
ctx.Header("Server", "Iris")
ctx.JSON(context.Map{"message": "Hello, World!"})
}
func plaintextHandler(ctx context.Context) {
ctx.Header("Server", "Iris")
ctx.Text("Hello, World!")
}
func dbHandler(ctx context.Context) {
ctx.Header("Server", "Iris")
ctx.JSON(getOneRandomWorld())
}
func queriesHandler(ctx context.Context) {
q, err := strconv.Atoi(ctx.URLParam("queries"))
if err != nil || q < 1 {
q = 1
} else if q > 500 {
q = 500
}
results := make([]World, q)
for i := 0; i < q; i++ {
results[i] = getOneRandomWorld()
}
ctx.Header("Server", "Iris")
ctx.JSON(results)
}
func updateHandler(db *sql.DB) func(ctx context.Context) {
return func(ctx context.Context) {
q, err := strconv.Atoi(ctx.URLParam("queries"))
if err != nil || q < 1 {
q = 1
} else if q > 500 {
q = 500
}
ctx.Header("Server", "Iris")
ctx.JSON(updateRandomWorlds(db, q))
}
}
func fortuneHandler(ctx context.Context) {
rows, err := fortuneStmt.Query()
if err != nil {
log.Fatalf("Can't query fortunes: %s\n", err)
}
fortunes := make([]Fortune, 0, 16)
var fortune Fortune
for rows.Next() {
if err = rows.Scan(&fortune.ID, &fortune.Message); err != nil {
log.Fatalf("Can't scan fortune: %s\n", err)
}
fortunes = append(fortunes, fortune)
}
rows.Close()
fortunes = append(fortunes, Fortune{Message: "Additional fortune added at request time."})
sort.Slice(fortunes, func(i, j int) bool {
return fortunes[i].Message < fortunes[j].Message
})
ctx.Header("Server", "Iris")
ctx.View("fortunes.html", struct {
Fortunes []Fortune
}{fortunes})
}
func fortuneQuickHandler(ctx context.Context) {
rows, err := fortuneStmt.Query()
if err != nil {
log.Fatalf("Can't query fortunes: %s\n", err)
}
fortunes := make([]templates.Fortune, 0, 16)
var fortune templates.Fortune
for rows.Next() {
if err = rows.Scan(&fortune.ID, &fortune.Message); err != nil {
log.Fatalf("Can't scan fortune: %s\n", err)
}
fortunes = append(fortunes, fortune)
}
rows.Close()
fortunes = append(fortunes, templates.Fortune{Message: "Additional fortune added at request time."})
sort.Slice(fortunes, func(i, j int) bool {
return fortunes[i].Message < fortunes[j].Message
})
ctx.Header("Server", "Iris")
ctx.ContentType("text/html; charset=utf-8")
templates.WriteFortunePage(ctx.ResponseWriter(), fortunes)
}
|
package envoyconfig
import (
"fmt"
"strconv"
envoy_config_core_v3 "github.com/envoyproxy/go-control-plane/envoy/config/core/v3"
envoy_config_listener_v3 "github.com/envoyproxy/go-control-plane/envoy/config/listener/v3"
envoy_config_route_v3 "github.com/envoyproxy/go-control-plane/envoy/config/route/v3"
envoy_http_connection_manager "github.com/envoyproxy/go-control-plane/envoy/extensions/filters/network/http_connection_manager/v3"
"google.golang.org/protobuf/types/known/durationpb"
"google.golang.org/protobuf/types/known/wrapperspb"
"github.com/pomerium/pomerium/config"
)
func (b *Builder) buildOutboundListener(cfg *config.Config) (*envoy_config_listener_v3.Listener, error) {
outboundPort, err := strconv.Atoi(cfg.OutboundPort)
if err != nil {
return nil, fmt.Errorf("invalid outbound port %v: %w", cfg.OutboundPort, err)
}
filter, err := b.buildOutboundHTTPConnectionManager()
if err != nil {
return nil, fmt.Errorf("error building outbound http connection manager filter: %w", err)
}
li := &envoy_config_listener_v3.Listener{
Name: "outbound-ingress",
Address: &envoy_config_core_v3.Address{
Address: &envoy_config_core_v3.Address_SocketAddress{
SocketAddress: &envoy_config_core_v3.SocketAddress{
Address: "127.0.0.1",
PortSpecifier: &envoy_config_core_v3.SocketAddress_PortValue{
PortValue: uint32(outboundPort),
},
},
},
},
FilterChains: []*envoy_config_listener_v3.FilterChain{{
Name: "outbound-ingress",
Filters: []*envoy_config_listener_v3.Filter{filter},
}},
}
return li, nil
}
func (b *Builder) buildOutboundHTTPConnectionManager() (*envoy_config_listener_v3.Filter, error) {
rc, err := b.buildOutboundRouteConfiguration()
if err != nil {
return nil, err
}
tc := marshalAny(&envoy_http_connection_manager.HttpConnectionManager{
CodecType: envoy_http_connection_manager.HttpConnectionManager_AUTO,
StatPrefix: "grpc_egress",
// limit request first byte to last byte time
RequestTimeout: &durationpb.Duration{
Seconds: 15,
},
RouteSpecifier: &envoy_http_connection_manager.HttpConnectionManager_RouteConfig{
RouteConfig: rc,
},
HttpFilters: []*envoy_http_connection_manager.HttpFilter{
HTTPRouterFilter(),
},
})
return &envoy_config_listener_v3.Filter{
Name: "envoy.filters.network.http_connection_manager",
ConfigType: &envoy_config_listener_v3.Filter_TypedConfig{
TypedConfig: tc,
},
}, nil
}
func (b *Builder) buildOutboundRouteConfiguration() (*envoy_config_route_v3.RouteConfiguration, error) {
return b.buildRouteConfiguration("grpc", []*envoy_config_route_v3.VirtualHost{{
Name: "grpc",
Domains: []string{"*"},
Routes: b.buildOutboundRoutes(),
}})
}
func (b *Builder) buildOutboundRoutes() []*envoy_config_route_v3.Route {
type Def struct {
Cluster string
Prefixes []string
}
defs := []Def{
{
Cluster: "pomerium-authorize",
Prefixes: []string{
"/envoy.service.auth.v3.Authorization/",
},
},
{
Cluster: "pomerium-databroker",
Prefixes: []string{
"/databroker.DataBrokerService/",
"/registry.Registry/",
},
},
{
Cluster: "pomerium-control-plane-grpc",
Prefixes: []string{
"/",
},
},
}
var routes []*envoy_config_route_v3.Route
for _, def := range defs {
for _, prefix := range def.Prefixes {
routes = append(routes, &envoy_config_route_v3.Route{
Name: def.Cluster,
Match: &envoy_config_route_v3.RouteMatch{
PathSpecifier: &envoy_config_route_v3.RouteMatch_Prefix{Prefix: prefix},
Grpc: &envoy_config_route_v3.RouteMatch_GrpcRouteMatchOptions{},
},
Action: &envoy_config_route_v3.Route_Route{
Route: &envoy_config_route_v3.RouteAction{
ClusterSpecifier: &envoy_config_route_v3.RouteAction_Cluster{
Cluster: def.Cluster,
},
// rewrite the host header
HostRewriteSpecifier: &envoy_config_route_v3.RouteAction_AutoHostRewrite{
AutoHostRewrite: wrapperspb.Bool(true),
},
// disable the timeout to support grpc streaming
Timeout: durationpb.New(0),
IdleTimeout: durationpb.New(0),
},
},
})
}
}
return routes
}
|
package steps
import (
"context"
"fmt"
componenttest "github.com/ONSdigital/dp-component-test"
"github.com/chromedp/chromedp"
"github.com/hashicorp/go-uuid"
"github.com/stretchr/testify/assert"
)
type Collection struct {
componenttest.ErrorFeature
api *FakeApi
chromeCtx context.Context
}
func NewCollectionAction(f *FakeApi, c context.Context) *Collection {
return &Collection{
api: f,
chromeCtx: c,
}
}
func (c *Collection) create(collectionName string) error {
var collectionId, _ = uuid.GenerateUUID()
// Here we set up the fake responses that would be returned from the collection-creating API
// including the fake collection details that will appear in the browser UI as a result
collectionDetailsResponse := buildCollectionDetailsResponseForId(collectionName, collectionId)
c.api.setJsonResponseForPost("/collection", collectionDetailsResponse, 200).AssertCustom(c.api.collectOutboundRequestBodies)
c.api.setJsonResponseForGet("/collectionDetails/"+collectionId, collectionDetailsResponse)
// Enter the name of the collection and press the button to send the form
err := chromedp.Run(c.chromeCtx,
chromedp.SendKeys("#collection-name", collectionName),
chromedp.Click(`input[value="manual"]`),
chromedp.Click("button"),
)
if err != nil {
return err
}
return nil
}
func (c *Collection) assertHasTitle(expectedTitle string) error {
return c.assertHasTextInSelector(expectedTitle, ".drawer h2")
}
func (c *Collection) assertHasPublishSchedule(expectedPublishSchedule string) error {
return c.assertHasTextInSelector(expectedPublishSchedule, ".drawer h2 + p")
}
func (c *Collection) assertHasTextInSelector(expectedText string, selector string) error {
var actualText string
if err := chromedp.Run(
c.chromeCtx,
chromedp.Text(selector, &actualText, chromedp.NodeVisible, chromedp.ByQuery),
); err != nil {
return err
}
assert.Equal(c, expectedText, actualText, fmt.Sprintf("expected to see text: '%s' in selector: %s", expectedText, selector))
return c.StepError()
}
func buildCollectionDetailsResponseForId(collectionName string, id string) string {
return fmt.Sprintf(`
{
"inProgress":[],
"complete":[],
"reviewed":[],
"timeseriesImportFiles":[],
"approvalStatus":"NOT_STARTED",
"pendingDeletes":[],
"datasets":[],
"datasetVersions":[],
"teamsDetails":[],
"id":"%s",
"name":"%s",
"type":"manual",
"teams":[]
}`, id, collectionName)
}
|
package chirp
import (
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"log"
"net"
"regexp"
"time"
"golang.org/x/net/ipv4"
"golang.org/x/net/ipv6"
)
// MaxPayloadBytes is the maximum allowed size of a payload when serialized into
// JSON
const MaxPayloadBytes = 32 * 1024
const maxMessageBytes = 33 * 1024
var ipv4Group = net.IPv4(224, 0, 0, 224)
var ipv6Group = net.ParseIP("FF06::224")
const chirpPort = 6464
var serviceNameRegExp = regexp.MustCompile(`[a-zA-Z0-9\.\-]+`)
type temporaryError struct {
cause string
}
func (te temporaryError) Error() string {
return te.cause
}
type publisherPayload []byte
func (b publisherPayload) MarshalJSON() ([]byte, error) {
if len(b) == 0 {
return []byte("null"), nil
}
return b, nil
}
type messageType string
const (
messageTypeNewListener messageType = "new_listener"
messageTypePublishService = "publish"
messageTypeRemoveService = "remove_service"
)
type message struct {
srcIP net.IP
payloadBytes publisherPayload
Type messageType `json:"type"`
SenderID string `json:"sender_id"`
ServiceName string `json:"service_name"`
Payload map[string]interface{} `json:"payload"`
TTL uint `json:"ttl"`
}
func (m *message) valid() error {
if m == nil {
return errors.New("message is nil")
}
idBytes, err := hex.DecodeString(m.SenderID)
if err != nil {
return errors.New("unable to decode 'sender_id' from hex")
}
if len(idBytes) != 16 {
return fmt.Errorf("'sender_id' must be 16 bytes long (found %d)", len(idBytes))
}
if m.ServiceName == "" {
return fmt.Errorf("'service_name' is missing")
}
switch m.Type {
case messageTypeNewListener:
// wildcard is acceptable for listeners
if m.ServiceName != "*" {
if err := ValidateServiceName(m.ServiceName); err != nil {
return err
}
}
case messageTypePublishService:
if err := ValidateServiceName(m.ServiceName); err != nil {
return err
}
if m.TTL < 10 {
return errors.New("ttl must be at least 10 seconds")
}
case messageTypeRemoveService:
if err := ValidateServiceName(m.ServiceName); err != nil {
return err
}
default:
// unknown message type
return errors.New("unknown message type")
}
return nil
}
func (m message) MarshalJSON() ([]byte, error) {
jsonMsg := map[string]interface{}{
"type": m.Type,
"sender_id": m.SenderID,
"service_name": m.ServiceName,
}
switch m.Type {
case messageTypeNewListener:
case messageTypeRemoveService:
case messageTypePublishService:
jsonMsg["ttl"] = m.TTL
if m.payloadBytes != nil {
jsonMsg["payload"] = m.payloadBytes
}
}
return json.Marshal(jsonMsg)
}
// an abstraction later to help us deal with the differences between ipv4 and
// ipv6 connection differences
type connection struct {
groupAddr *net.UDPAddr
v4 *ipv4.PacketConn
v6 *ipv6.PacketConn
readBuf []byte
}
func (c *connection) write(msg interface{}) error {
buf, err := json.Marshal(msg)
if err != nil {
return errors.New("unable to marshal message - " + err.Error())
}
if c.v4 != nil {
c.v4.WriteTo(buf, nil, c.groupAddr)
time.Sleep(20 * time.Millisecond)
c.v4.WriteTo(buf, nil, c.groupAddr)
} else if c.v6 != nil {
c.v6.WriteTo(buf, nil, c.groupAddr)
time.Sleep(20 * time.Millisecond)
c.v6.WriteTo(buf, nil, c.groupAddr)
} else {
panic("no packet connection found")
}
return nil
}
func (c *connection) read() (*message, error) {
var num int
var srcIP net.IP
var err error
if c.v4 != nil {
var cm *ipv4.ControlMessage
num, cm, _, err = c.v4.ReadFrom(c.readBuf)
if err == nil {
srcIP = cm.Src
}
} else if c.v6 != nil {
var cm *ipv6.ControlMessage
num, cm, _, err = c.v6.ReadFrom(c.readBuf)
if err == nil {
srcIP = cm.Src
}
} else {
panic("no packet connection found")
}
if err != nil {
return nil, errors.New("error reading message - " + err.Error())
}
if num > maxMessageBytes {
return nil, temporaryError{cause: "message was too big"}
}
// log.Printf("from: %v, read: %s", srcIP, string(c.readBuf[:num]))
msg := &message{srcIP: srcIP}
err = json.Unmarshal(c.readBuf[:num], msg)
if err != nil {
return nil, temporaryError{cause: "received corrupt message - " + err.Error()}
}
if err := msg.valid(); err != nil {
return nil, temporaryError{cause: "received an invalid message: " + err.Error()}
}
return msg, nil
}
func (c *connection) close() error {
if c.v4 != nil {
return c.v4.Close()
} else if c.v6 != nil {
return c.v6.Close()
} else {
panic("no packet connection found")
}
}
func newIP4Connection() (*connection, error) {
conn, err := net.ListenUDP("udp4", &net.UDPAddr{IP: ipv4Group, Port: chirpPort})
if err != nil {
return nil, err
}
packetConn := ipv4.NewPacketConn(conn)
ifaces, err := net.Interfaces()
if err != nil {
conn.Close()
return nil, errors.New("unable to retrieve interfaces - " + err.Error())
}
// join the group on any available interfaces
errCount := 0
mcastInterfaces := 0
for _, iface := range ifaces {
if iface.Flags&net.FlagMulticast != net.FlagMulticast {
continue
}
mcastInterfaces++
if err := packetConn.JoinGroup(&iface, &net.UDPAddr{IP: ipv4Group}); err != nil {
errCount++
}
}
if mcastInterfaces == 0 {
conn.Close()
return nil, errors.New("no multicast network interfaces available")
}
if errCount == mcastInterfaces {
conn.Close()
return nil, errors.New("unable to join any multicast interfaces")
}
if err := packetConn.SetControlMessage(ipv4.FlagSrc, true); err != nil {
return nil, errors.New("unable to set control message ipv4.FlagSrc - " + err.Error())
}
return &connection{
readBuf: make([]byte, maxMessageBytes),
v4: packetConn,
groupAddr: &net.UDPAddr{IP: ipv4Group, Port: chirpPort},
}, nil
}
func newIP6Connection() (*connection, error) {
conn, err := net.ListenUDP("udp6", &net.UDPAddr{IP: ipv6Group, Port: chirpPort})
if err != nil {
return nil, err
}
packetConn := ipv6.NewPacketConn(conn)
ifaces, err := net.Interfaces()
if err != nil {
conn.Close()
return nil, errors.New("unable to retrieve interfaces - " + err.Error())
}
// join the group on any available interfaces
errCount := 0
mcastInterfaces := 0
for _, iface := range ifaces {
if iface.Flags&net.FlagMulticast != net.FlagMulticast {
continue
}
mcastInterfaces++
if err := packetConn.JoinGroup(&iface, &net.UDPAddr{IP: ipv6Group}); err != nil {
errCount++
}
}
if mcastInterfaces == 0 {
conn.Close()
return nil, errors.New("no multicast network interfaces available")
}
if errCount == mcastInterfaces {
conn.Close()
return nil, errors.New("unable to join any multicast interfaces")
}
// Why do I have to specify ipv6.FlagDst to get the Src address on each packet?
if err := packetConn.SetControlMessage(ipv6.FlagDst, true); err != nil {
return nil, errors.New("unable to set control message ipv6.FlagDst: " + err.Error())
}
return &connection{
readBuf: make([]byte, maxMessageBytes),
v6: packetConn,
groupAddr: &net.UDPAddr{IP: ipv6Group, Port: chirpPort},
}, nil
}
// Publisher ...
type Publisher struct {
id string
service string
payload publisherPayload
serviceTTL uint
v4Conn *connection
v6Conn *connection
initErr error
stop chan bool
}
// ValidateServiceName ...
func ValidateServiceName(name string) error {
if len(name) == 0 {
return errors.New("service names can not be empty")
}
if len(name) > 64 {
return errors.New("service names may not be longer than 64 bytes")
}
if serviceNameRegExp.FindString(name) != name {
return errors.New("service names can only contain a-z, A-Z, 0-9, . (period) or - (hyphen)")
}
return nil
}
// NewPublisher ...
func NewPublisher(service string) *Publisher {
p := &Publisher{
id: randSenderID(),
service: service,
serviceTTL: 60,
stop: make(chan bool),
}
if err := ValidateServiceName(service); err != nil {
p.initErr = err
}
return p
}
// SetTTL ..
func (p *Publisher) SetTTL(ttl uint) *Publisher {
if p.initErr != nil {
return p
}
// make sure the minimum ttl is 10 seconds
if ttl < 10 {
p.initErr = errors.New("TTL must be at least 10 seconds")
} else {
p.serviceTTL = ttl
}
return p
}
// SetPayload ...
func (p *Publisher) SetPayload(payload map[string]interface{}) *Publisher {
if p.initErr != nil {
return p
}
var serialized []byte
if payload != nil {
var err error
serialized, err = json.Marshal(payload)
if err != nil {
p.initErr = errors.New("unable to convert payload into json - " + err.Error())
return p
}
if len(serialized) > MaxPayloadBytes {
p.initErr = fmt.Errorf("payload too large (%d bytes); must be smaller than 32KB after serialization", len(serialized))
return p
}
}
p.payload = serialized
return p
}
// Start ...
func (p *Publisher) Start() (*Publisher, error) {
if p.initErr != nil {
return p, p.initErr
}
var err error
p.v4Conn, err = newIP4Connection()
if err != nil {
p.Stop() // a v4 connection is required, so bail if we can't start it
return p, fmt.Errorf("unable to v4 multicast broadcast - %v", err)
}
p.v6Conn, err = newIP6Connection()
if err != nil {
// v6 is a nice to have, so don't consider it an error if it fails
log.Printf("unable to v6 multicast broadcast - %v", err)
}
go p.serve(p.v4Conn)
if p.v6Conn != nil {
go p.serve(p.v6Conn)
}
return p, nil
}
func (p *Publisher) serve(conn *connection) {
defer conn.close()
announceMsg := message{
Type: messageTypePublishService,
SenderID: p.id,
ServiceName: p.service,
payloadBytes: p.payload,
TTL: p.serviceTTL,
}
err := conn.write(announceMsg)
if err != nil {
if tmpErr, ok := err.(temporaryError); !ok {
log.Fatal(tmpErr)
} else {
log.Printf("temporary write error - %v", tmpErr)
}
}
received := make(chan *message)
go read(conn, received)
announce := make(chan bool)
go func() {
for {
select {
case <-time.After(time.Duration(p.serviceTTL-4) * time.Second):
announce <- true
case <-p.stop:
return
}
}
}()
serveloop:
for {
select {
case <-announce:
conn.write(announceMsg)
case msg := <-received:
if msg.SenderID == p.id {
// ignore messages we have sent
continue
}
switch msg.Type {
case messageTypeNewListener:
if msg.ServiceName == "*" || msg.ServiceName == p.service {
conn.write(announceMsg)
}
}
case <-p.stop:
goodbyeMsg := message{
Type: messageTypeRemoveService,
SenderID: p.id,
ServiceName: p.service,
}
conn.write(goodbyeMsg)
break serveloop
}
}
}
// Stop ...
func (p *Publisher) Stop() {
// closing this channel notifies our serving goroutines to clean up
close(p.stop)
// give our goroutines enough time to send out service removal messages
time.Sleep(50 * time.Millisecond)
}
func read(conn *connection, msgs chan<- *message) {
for {
msg, err := conn.read()
if err != nil {
// if this is not a transient error, then we need to get out of here
if _, ok := err.(temporaryError); !ok {
close(msgs)
return
}
log.Print("temp err: " + err.Error())
continue
}
msgs <- msg
}
}
// Service ...
type Service struct {
publisherID string
v4IP net.IP
v4IPExpiration time.Time
v6IP net.IP
v6IPExpiration time.Time
Name string
Payload map[string]interface{}
expirationTime time.Time
}
func (s Service) String() string {
tmp := map[string]interface{}{
"PublisherID": s.publisherID,
"Name": s.Name,
"Payload": s.Payload,
"TTL": s.expirationTime.Unix(),
"V4": s.v4IP,
"V4TTL": s.v4IPExpiration.Unix(),
"V6": s.v6IP,
"V6TTL": s.v6IPExpiration.Unix(),
}
buf, _ := json.Marshal(tmp)
return string(buf)
}
// IPv4 returns the IPv4 address of the service publisher. If there is no
// v4 IP associated with the publisher, returns nil.
func (s Service) IPv4() net.IP {
if s.v4IP == nil {
return nil
}
return s.v4IP.To4()
}
// IPv6 returns the IPv6 address of the service publisher. If there is no
// v6 IP associated with the publisher, returns nil.
func (s Service) IPv6() net.IP {
if s.v6IP == nil {
return nil
}
return s.v6IP.To16()
}
// Listener ...
type Listener struct {
id string
v4Conn *connection
v6Conn *connection
serviceName string
// publisher id => Service
knownServices map[string]Service
ServiceEvents <-chan ServiceEvent
serviceEvents chan ServiceEvent
stop chan bool
}
// NewListener ...
func NewListener(serviceName string) (*Listener, error) {
if serviceName != "*" {
if err := ValidateServiceName(serviceName); err != nil {
return nil, err
}
}
l := &Listener{
id: randSenderID(),
serviceName: serviceName,
stop: make(chan bool),
knownServices: make(map[string]Service),
serviceEvents: make(chan ServiceEvent),
}
// initialize the read only version of the channel for the end user
l.ServiceEvents = l.serviceEvents
var err error
l.v4Conn, err = newIP4Connection()
if err != nil {
return nil, fmt.Errorf("unable to v4 multicast listen - %v", err)
}
l.v6Conn, err = newIP6Connection()
if err != nil {
l.v4Conn.close()
return nil, fmt.Errorf("unable to v6 multicast listen - %v", err)
}
// Start a goroutine to handle all the incoming messages from both
// connection, and also remove services that have expired
messageHandler := make(chan *message)
expirationTicker := time.NewTicker(10 * time.Second)
go func() {
defer expirationTicker.Stop()
for {
select {
case msg := <-messageHandler:
// not interested in our own messages
if msg.SenderID == l.id {
continue
}
switch msg.Type {
case messageTypePublishService:
l.handlePublish(msg)
case messageTypeRemoveService:
l.handleRemoval(msg)
}
case <-expirationTicker.C:
for pubID, service := range l.knownServices {
if service.expirationTime.Before(time.Now()) {
delete(l.knownServices, pubID)
se := ServiceEvent{Service: service, EventType: ServiceRemoved}
l.serviceEvents <- se
}
}
case <-l.stop:
return
}
}
}()
go l.listen(l.v4Conn, messageHandler)
go l.listen(l.v6Conn, messageHandler)
return l, nil
}
func (l *Listener) listen(conn *connection, messageHandler chan<- *message) {
// announce our presence to the group
helloMsg := message{
Type: messageTypeNewListener,
SenderID: l.id,
ServiceName: l.serviceName,
}
conn.write(helloMsg)
received := make(chan *message)
go read(conn, received)
// We'll stop getting messages when the connection is closed, because then
// the reading goroutine from the line above will close the channel
for msg := range received {
messageHandler <- msg
}
}
func (l *Listener) handleRemoval(msg *message) {
// Is this a service that we're interested in?
if l.serviceName != "*" && msg.ServiceName != l.serviceName {
return
}
// check if we have a record of this service
service, ok := l.knownServices[msg.SenderID]
if !ok {
return
}
delete(l.knownServices, msg.SenderID)
se := ServiceEvent{Service: service, EventType: ServiceRemoved}
l.serviceEvents <- se
}
func (l *Listener) handlePublish(msg *message) {
// Is this a service that we're interested in?
if l.serviceName != "*" && msg.ServiceName != l.serviceName {
return
}
// check if we have a record for this service already
// log.Printf("srcIP: %s", msg.srcIP)
service, ok := l.knownServices[msg.SenderID]
ttl := time.Now().Add(time.Duration(msg.TTL) * time.Second)
service.expirationTime = ttl
if !ok { // this is the first time we've seen this service
if msg.srcIP.To4() != nil {
// log.Printf("initing with v4")
service.v4IP = msg.srcIP
service.v4IPExpiration = ttl
} else {
// log.Printf("initing with v6")
service.v6IP = msg.srcIP
service.v6IPExpiration = ttl
}
service.Name = msg.ServiceName
service.publisherID = msg.SenderID
service.Payload = msg.Payload
se := ServiceEvent{Service: service, EventType: ServicePublished}
l.serviceEvents <- se
} else { // we've seen this service before. check if we have a new ip address
updatedIP := false
if msg.srcIP.To4() != nil {
service.v4IPExpiration = ttl
// log.Printf("v4 ttl")
if service.v4IP == nil {
// log.Printf("setting v4")
service.v4IP = msg.srcIP
updatedIP = true
} else {
if !service.v4IP.Equal(msg.srcIP) {
// log.Printf("updating v4")
service.v4IP = msg.srcIP
updatedIP = true
}
}
} else {
service.v6IPExpiration = ttl
// log.Printf("v6 ttl")
if service.v6IP == nil {
// log.Printf("setting v6: %v", msg.srcIP)
service.v6IP = msg.srcIP
updatedIP = true
} else {
if !service.v6IP.Equal(msg.srcIP) {
// log.Printf("updating v6")
service.v6IP = msg.srcIP
updatedIP = true
}
}
}
if updatedIP {
se := ServiceEvent{Service: service, EventType: ServiceUpdated}
l.serviceEvents <- se
}
}
// log.Printf("service: %v", service)
l.knownServices[msg.SenderID] = service
}
// Stop listening. The Listener can't be reused after this is called.
func (l *Listener) Stop() {
close(l.stop)
}
// EventType ...
type EventType string
// events
const (
ServicePublished EventType = "service_published"
ServiceUpdated = "service_updated"
ServiceRemoved = "service_removed"
)
// ServiceEvent ...
type ServiceEvent struct {
Service
EventType
}
|
package alicloud
import (
"os"
"path/filepath"
)
var client = &Client{
AccessToken: GetAccessToken(),
BaseApiURL: "http://api.yunpan.alibaba.com/api",
LocalBaseDir: filepath.Join(os.Getenv("PWD"), "local_backup"),
RemoteBaseDir: "raspberry_pi",
}
|
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v2018109
import (
"encoding/json"
tchttp "github.com/tencentyun/tcecloud-sdk-go/tcecloud/common/http"
)
type OperDocCategoryParams struct {
// action
Action *string `json:"Action,omitempty" name:"Action"`
// docTypeId
DocTypeId *int64 `json:"DocTypeId,omitempty" name:"DocTypeId"`
// categoryId
CategoryId *int64 `json:"CategoryId,omitempty" name:"CategoryId"`
// titleEn
TitleEn *string `json:"TitleEn,omitempty" name:"TitleEn"`
// titleCn
TitleCn *string `json:"TitleCn,omitempty" name:"TitleCn"`
// weight
Weight *int64 `json:"Weight,omitempty" name:"Weight"`
// imageUrl
ImageUrl *string `json:"ImageUrl,omitempty" name:"ImageUrl"`
}
type OperDocCategoryRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocCategoryParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocCategoryRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocCategoryRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocCategoryResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocCategoryResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocCategoryResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuLockParams struct {
// menuId
MenuId *int64 `json:"MenuId,omitempty" name:"MenuId"`
// reviewId
ReviewId *int64 `json:"ReviewId,omitempty" name:"ReviewId"`
// action
Action *string `json:"Action,omitempty" name:"Action"`
}
type OperDocMenuLockRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocMenuLockParams `json:"Params,omitempty" name:"Params"`
// ProductId
ProductId *string `json:"ProductId,omitempty" name:"ProductId"`
}
func (r *OperDocMenuLockRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuLockRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuLockResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocMenuLockResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuLockResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuParams struct {
// action
Action *string `json:"Action,omitempty" name:"Action"`
// productId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
// pid
Pid *int64 `json:"Pid,omitempty" name:"Pid"`
// titleCn
TitleCn *string `json:"TitleCn,omitempty" name:"TitleCn"`
// weight
Weight *int64 `json:"Weight,omitempty" name:"Weight"`
// menuId
MenuId *int64 `json:"MenuId,omitempty" name:"MenuId"`
// reviewId
ReviewId *int64 `json:"ReviewId,omitempty" name:"ReviewId"`
// type
Type *string `json:"Type,omitempty" name:"Type"`
// contentCn
ContentCn *string `json:"ContentCn,omitempty" name:"ContentCn"`
// keywordCn
KeywordCn []*string `json:"KeywordCn,omitempty" name:"KeywordCn" list`
}
type OperDocMenuRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocMenuParams `json:"Params,omitempty" name:"Params"`
// ProductId
ProductId *string `json:"ProductId,omitempty" name:"ProductId"`
}
func (r *OperDocMenuRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocMenuResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuStatusRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocMenuLockParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocMenuStatusRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuStatusRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocMenuStatusResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocMenuStatusResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocMenuStatusResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocProductDefaultPageParams struct {
// ProductId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
// PageType
PageType *string `json:"PageType,omitempty" name:"PageType"`
// PageContent
PageContent *string `json:"PageContent,omitempty" name:"PageContent"`
}
type OperDocProductDefaultPageRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocProductDefaultPageParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocProductDefaultPageRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocProductDefaultPageRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocProductDefaultPageResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocProductDefaultPageResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocProductDefaultPageResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocProductParams struct {
// action
Action *string `json:"Action,omitempty" name:"Action"`
// productId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
// categoryId
CategoryId *int64 `json:"CategoryId,omitempty" name:"CategoryId"`
// titleEn
TitleEn *string `json:"TitleEn,omitempty" name:"TitleEn"`
// TitleCn
TitleCn *string `json:"TitleCn,omitempty" name:"TitleCn"`
// weight
Weight *int64 `json:"Weight,omitempty" name:"Weight"`
}
type OperDocProductRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocProductParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocProductRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocProductRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocProductResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocProductResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocProductResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocReviewParams struct {
// reviewIdList
ReviewIdList []*int64 `json:"ReviewIdList,omitempty" name:"ReviewIdList" list`
// reviewPass
ReviewPass *int64 `json:"ReviewPass,omitempty" name:"ReviewPass"`
// reviewComment
ReviewComment *string `json:"ReviewComment,omitempty" name:"ReviewComment"`
}
type OperDocReviewRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocReviewParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocReviewRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocReviewRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocReviewResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocReviewResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocReviewResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocTypeParams struct {
// action
Action *string `json:"Action,omitempty" name:"Action"`
// docTypeId
DocTypeId *uint64 `json:"DocTypeId,omitempty" name:"DocTypeId"`
// docType
DocType *string `json:"DocType,omitempty" name:"DocType"`
}
type OperDocTypeRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *OperDocTypeParams `json:"Params,omitempty" name:"Params"`
}
func (r *OperDocTypeRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocTypeRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type OperDocTypeResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *OperDocTypeResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *OperDocTypeResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocCategoryListRequest struct {
*tchttp.BaseRequest
// method
Method *string `json:"Method,omitempty" name:"Method"`
// userId
UserId *string `json:"UserId,omitempty" name:"UserId"`
}
func (r *QueryDocCategoryListRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocCategoryListRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocCategoryListResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocCategoryListResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocCategoryListResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocCategoryParams struct {
// filters
Filters *QueryDocCategoryParamsFilters `json:"Filters,omitempty" name:"Filters"`
// offset
Offset *int64 `json:"Offset,omitempty" name:"Offset"`
// limit
Limit *int64 `json:"Limit,omitempty" name:"Limit"`
}
type QueryDocCategoryParamsFilters struct {
// docTypeId
DocTypeId *int64 `json:"DocTypeId,omitempty" name:"DocTypeId"`
// categoryId
CategoryId *int64 `json:"CategoryId,omitempty" name:"CategoryId"`
// keyword
Keyword *string `json:"Keyword,omitempty" name:"Keyword"`
}
type QueryDocCategoryRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocCategoryParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocCategoryRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocCategoryRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocCategoryResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocCategoryResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocCategoryResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocMemuListParams struct {
// productId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
// isAll
IsAll *int64 `json:"IsAll,omitempty" name:"IsAll"`
}
type QueryDocMenuListRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocMemuListParams `json:"Params,omitempty" name:"Params"`
// 产品ID
ProductId *string `json:"ProductId,omitempty" name:"ProductId"`
}
func (r *QueryDocMenuListRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocMenuListRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocMenuListResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocMenuListResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocMenuListResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocMenuParams struct {
// MenuId
MenuId *int64 `json:"MenuId,omitempty" name:"MenuId"`
// ReviewId
ReviewId *int64 `json:"ReviewId,omitempty" name:"ReviewId"`
}
type QueryDocMenuRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocMenuParams `json:"Params,omitempty" name:"Params"`
// ProductId
ProductId *string `json:"ProductId,omitempty" name:"ProductId"`
}
func (r *QueryDocMenuRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocMenuRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocMenuResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocMenuResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocMenuResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocProductDefaultPageParams struct {
// ProductId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
}
type QueryDocProductDefaultPageRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocProductDefaultPageParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocProductDefaultPageRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocProductDefaultPageRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocProductDefaultPageResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocProductDefaultPageResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocProductDefaultPageResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocProductParams struct {
// filters
Filters *QueryDocProductParamsFilters `json:"Filters,omitempty" name:"Filters"`
// offset
Offset *int64 `json:"Offset,omitempty" name:"Offset"`
// limit
Limit *int64 `json:"Limit,omitempty" name:"Limit"`
}
type QueryDocProductParamsFilters struct {
// categoryId
CategoryId *int64 `json:"CategoryId,omitempty" name:"CategoryId"`
// productId
ProductId *int64 `json:"ProductId,omitempty" name:"ProductId"`
// keyword
Keyword *string `json:"Keyword,omitempty" name:"Keyword"`
}
type QueryDocProductRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocProductParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocProductRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocProductRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocProductResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocProductResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocProductResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocReviewListParams struct {
// filters
Filters *QueryDocReviewListParamsFilters `json:"Filters,omitempty" name:"Filters"`
// sortType
SortType *string `json:"SortType,omitempty" name:"SortType"`
// offset
Offset *int64 `json:"Offset,omitempty" name:"Offset"`
// limit
Limit *int64 `json:"Limit,omitempty" name:"Limit"`
}
type QueryDocReviewListParamsFilters struct {
// applyTime
ApplyTime *QueryDocReviewListParamsFiltersApplyTime `json:"ApplyTime,omitempty" name:"ApplyTime"`
// titleCn
TitleCn *string `json:"TitleCn,omitempty" name:"TitleCn"`
// applier
Applier *string `json:"Applier,omitempty" name:"Applier"`
// reviewStatusId
ReviewStatusId *int64 `json:"ReviewStatusId,omitempty" name:"ReviewStatusId"`
}
type QueryDocReviewListParamsFiltersApplyTime struct {
// begin
Begin *string `json:"Begin,omitempty" name:"Begin"`
// end
End *string `json:"End,omitempty" name:"End"`
}
type QueryDocReviewListRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocReviewListParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocReviewListRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocReviewListRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocReviewListResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocReviewListResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocReviewListResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocReviewParams struct {
// reviewId
ReviewId *int64 `json:"ReviewId,omitempty" name:"ReviewId"`
}
type QueryDocReviewRequest struct {
*tchttp.BaseRequest
// Method
Method *string `json:"Method,omitempty" name:"Method"`
// UserId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// Params
Params *QueryDocReviewParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocReviewRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocReviewRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocReviewResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocReviewResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocReviewResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocTypeParams struct {
// Filters
Filters []*QueryDocTypeParamsFilters `json:"Filters,omitempty" name:"Filters" list`
// Offset
Offset *int64 `json:"Offset,omitempty" name:"Offset"`
// Limit
Limit *int64 `json:"Limit,omitempty" name:"Limit"`
}
type QueryDocTypeParamsFilters struct {
// docTypeId
DocTypeId *uint64 `json:"DocTypeId,omitempty" name:"DocTypeId"`
// keyword
Keyword *string `json:"Keyword,omitempty" name:"Keyword"`
}
type QueryDocTypeRequest struct {
*tchttp.BaseRequest
// method
Method *string `json:"Method,omitempty" name:"Method"`
// userId
UserId *string `json:"UserId,omitempty" name:"UserId"`
// params
Params *QueryDocTypeParams `json:"Params,omitempty" name:"Params"`
}
func (r *QueryDocTypeRequest) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocTypeRequest) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
type QueryDocTypeResponse struct {
*tchttp.BaseResponse
Response *struct {
// 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
RequestId *string `json:"RequestId,omitempty" name:"RequestId"`
} `json:"Response"`
}
func (r *QueryDocTypeResponse) ToJsonString() string {
b, _ := json.Marshal(r)
return string(b)
}
func (r *QueryDocTypeResponse) FromJsonString(s string) error {
return json.Unmarshal([]byte(s), &r)
}
|
// ViChart library for Go
// Author: Tad Vizbaras
// License: http://github.com/tadvi/vichart/blob/master/LICENSE
//
package vichart
import (
"fmt"
"github.com/ajstarks/svgo"
)
const (
VBMultiLineXYStyle = "stroke:lightgray;stroke-width:2px;"
VBMultiLineStyle = "fill:navy;stroke:navy;stroke-width:2px;"
VBMultiBarStyle1 = "fill:green;stroke:gray;"
VBMultiBarStyle2 = "fill:yellow;stroke:gray;"
VBMultiBarStyle3 = "fill:white;stroke:gray;"
VBMultiGstyle = "font-family:Calibri; font-size:14"
VBMultiGutterLeft = 40
VBMultiGutterRight = 40
VBMultiGutterTop = 40
VBMultiBarSpacing = 16
VBMultiBarWidth = 15
VBMultiLegendXOffset = 10
)
type VBMultiChart struct {
Svg *svg.SVG
Width, Height int
BarValues []VBMultiChartItem // chart bar values
LineValues []int // chart line values
MaxBarValue int // chart max value, used for scaling all the bar values
MaxLineValue int // chart max value, used for scaling all the line values
// optional fields below
BarSpacing int
BarWidth int
LabelsX []string
LabelsY1 []string
LabelsY2 []string
GutterLeft int
GutterRight int // right gutter for the chart, used to fit last bottom label
GutterTop int // top gutter for the chart, used top label
// styles
Gstyle string
LineXYStyle string
LineStyle string
BarStyle1 string
BarStyle2 string
BarStyle3 string
// legend
BarLegend1 string
BarLegend2 string
BarLegend3 string
LineLegend string
// legend offset
LegendXOffset int
}
type VBMultiChartItem struct {
Bottom, Middle, Top int
}
// Draw produces chart on screen, main entry point.
func (chart *VBMultiChart) Draw() error {
canvas := chart.Svg
if chart.Svg == nil {
return fmt.Errorf("Missing pointer to svg.SVG in field Svg.")
}
if chart.Width < 10 || chart.Height < 10 {
return fmt.Errorf("Incorrect Width or Height value.")
}
if len(chart.BarValues) == 0 {
return fmt.Errorf("Missing BarValues for the chart.")
}
if chart.MaxBarValue == 0 {
return fmt.Errorf("Missing chart MaxBarValue.")
}
if chart.MaxLineValue == 0 && len(chart.LineValues) > 0 {
return fmt.Errorf("Missing chart MaxLineValue.")
}
if len(chart.BarValues) != len(chart.LineValues) {
return fmt.Errorf("Number of BarValues does not match number of LineValues.")
}
// default to sensible constants if value is not set
if chart.LineXYStyle == "" {
chart.LineXYStyle = VBMultiLineXYStyle
}
if chart.Gstyle == "" {
chart.Gstyle = VBMultiGstyle
}
if chart.LineStyle == "" {
chart.LineStyle = VBMultiLineStyle
}
if chart.BarStyle1 == "" {
chart.BarStyle1 = VBMultiBarStyle1
}
if chart.BarStyle2 == "" {
chart.BarStyle2 = VBMultiBarStyle2
}
if chart.BarStyle3 == "" {
chart.BarStyle3 = VBMultiBarStyle3
}
if chart.GutterLeft == 0 {
chart.GutterLeft = VBMultiGutterLeft
}
if chart.GutterRight == 0 {
chart.GutterRight = VBMultiGutterRight
}
if chart.GutterTop == 0 {
chart.GutterTop = VBMultiGutterTop
}
if chart.BarSpacing == 0 {
chart.BarSpacing = VBMultiBarSpacing
}
if chart.BarWidth == 0 {
chart.BarWidth = VBMultiBarWidth
}
if chart.LegendXOffset == 0 {
chart.LegendXOffset = VBMultiLegendXOffset
}
// start SVG
canvas.Start(chart.Width, chart.Height)
canvas.Gstyle(chart.Gstyle)
x, y := chart.GutterLeft, chart.Height-42
bHeight := float64(y - chart.GutterTop)
bWidth := float64(chart.Width - chart.GutterRight - x)
xoffset := x
for i, _ := range chart.BarValues {
yoffset := y + 3
// scale value to fit in chart pixels
chartVal := chart.calcBarValue(bHeight, chart.BarValues[i].Bottom)
chart.drawMeter(xoffset, yoffset, chart.BarWidth, chartVal, chart.BarStyle1)
yoffset -= chartVal
chartVal = chart.calcBarValue(bHeight, chart.BarValues[i].Middle)
chart.drawMeter(xoffset, yoffset, chart.BarWidth, chartVal, chart.BarStyle2)
yoffset -= chartVal
chartVal = chart.calcBarValue(bHeight, chart.BarValues[i].Top)
chart.drawMeter(xoffset, yoffset, chart.BarWidth, chartVal, chart.BarStyle3)
yoffset -= chartVal
// draw line on the chart
if i > 0 && len(chart.LineValues) > 0 {
valLine1 := float64(chart.LineValues[i-1])
chartValLine1 := int((valLine1 / float64(chart.MaxLineValue)) * bHeight)
valLine2 := float64(chart.LineValues[i])
chartValLine2 := int((valLine2 / float64(chart.MaxLineValue)) * bHeight)
xpos := xoffset + chart.BarWidth/2
canvas.Line(xpos-chart.BarSpacing, y-chartValLine1+3, xpos, y-chartValLine2+3, chart.LineStyle)
}
xoffset += chart.BarSpacing
}
// bottom line markers and labels
canvas.Line(x, y+12, chart.Width-chart.GutterRight, y+12, chart.LineXYStyle)
labels := len(chart.LabelsX)
// display bottom line labels
for i := 0; i < labels; i++ {
step := bWidth / float64(labels-1)
xoffset := int(float64(i) * step)
canvas.Text(x+xoffset, y+30, chart.LabelsX[i], "font-size:75%;text-anchor:middle;")
canvas.Line(x+xoffset, y+6, x+xoffset, y+18, chart.LineXYStyle)
}
// left vertical Y line
chart.drawYLine(x, y+2)
chart.drawYLineText(x-16, y, chart.LabelsY1, true)
// right vertical Y line
chart.drawYLine(chart.Width-chart.GutterRight+12, y+2)
chart.drawYLineText(chart.Width-chart.GutterRight+12, y, chart.LabelsY2, false)
chart.drawLegend(x)
canvas.Gend()
canvas.End()
return nil
}
func (chart *VBMultiChart) calcBarValue(bHeight float64, value int) int {
val := float64(value)
chartVal := int((val / float64(chart.MaxBarValue)) * bHeight)
return chartVal
}
// drawLegend produces legend on the chart.
func (chart *VBMultiChart) drawLegend(x int) {
canvas := chart.Svg
canvas.Rect(x+chart.LegendXOffset, 10, 40, 10, chart.BarStyle1)
canvas.Text(x+chart.LegendXOffset+50, 20, chart.BarLegend1, "font-size:75%;")
canvas.Rect(x+chart.LegendXOffset+120, 10, 40, 10, chart.BarStyle2)
canvas.Text(x+chart.LegendXOffset+170, 20, chart.BarLegend2, "font-size:75%;")
canvas.Rect(x+chart.LegendXOffset+230, 10, 40, 10, chart.BarStyle3)
canvas.Text(x+chart.LegendXOffset+280, 20, chart.BarLegend3, "font-size:75%;")
if chart.LineLegend != "" {
xpos := x + chart.LegendXOffset + 340
canvas.Line(xpos, 15, xpos+40, 15, chart.LineStyle)
canvas.Text(x+chart.LegendXOffset+390, 20, chart.LineLegend, "font-size:75%;")
}
}
// drawYLineText draws Y line text.
func (chart *VBMultiChart) drawYLineText(x, h int, labels []string, left bool) {
canvas := chart.Svg
style := "font-size:75%;text-anchor:start;baseline-shift:-75%"
if left {
style = "font-size:75%;text-anchor:end;baseline-shift:-75%"
}
labelsCount := len(labels)
for i := 0; i < labelsCount; i++ {
step := float64(h-chart.GutterTop) / float64(labelsCount-1)
yoffset := int(float64(i) * step)
canvas.Text(x, yoffset+chart.GutterTop, labels[labelsCount-i-1], style)
}
}
// drawYLine draws Y line on screen.
func (chart *VBMultiChart) drawYLine(x, y int) {
canvas := chart.Svg
canvas.Line(x-8, chart.GutterTop, x-8, y, chart.LineXYStyle)
height := float64(y - chart.GutterTop)
step := height / 10
pos := 0
for i := 0.0; i <= height; i += step {
marker := int(height-i) + 1 + chart.GutterTop
if pos == 0 || pos == 5 || pos == 10 {
canvas.Line(x-2, marker, x-14, marker, chart.LineXYStyle)
} else {
canvas.Line(x-5, marker, x-11, marker, chart.LineXYStyle)
}
pos += 1
}
}
// drawMeter draws bar on chart.
func (chart *VBMultiChart) drawMeter(x, y, w, value int, barStyle string) {
canvas := chart.Svg
corner := w
canvas.Roundrect(x, y-value, corner, value, 0, 0, barStyle)
}
|
package editor
import (
"github.com/gdamore/tcell"
"github.com/rivo/tview"
"strconv"
)
type Footer struct {
*tview.Box
*Editor
totalLines int
language string
cursorX, cursorY int
}
// NewView returns a new view view primitive.
func (e *Editor) NewFooter() *Footer {
return &Footer{
Box: tview.NewBox().SetBorder(false),
Editor: e,
}
}
// Draw draws this primitive onto the screen.
func (m *Footer) Draw(screen tcell.Screen) {
_, bg, _ := defaultStyle.Decompose()
m.Box.SetBackgroundColor(bg).Draw(screen)
m.drawText(screen, strconv.Itoa(m.totalLines)+
"L"+" "+strconv.Itoa(m.cursorY)+":"+strconv.Itoa(m.cursorX)+" "+m.language, 0)
}
func (m *Footer) drawText(screen tcell.Screen, text string, offsetX int) {
for x, r := range text {
m.draw(screen, x+offsetX, r)
}
}
func (m *Footer) draw(screen tcell.Screen, x int, r rune) {
xr, yr, _, _ := m.Box.GetInnerRect()
screen.SetContent(xr+x, yr, r, nil, defaultStyle.Foreground(tcell.ColorLightCyan))
}
|
// Copyright 2018 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metrics
import "github.com/prometheus/client_golang/prometheus"
// Session metrics.
var (
AutoIDReqDuration prometheus.Histogram
SessionExecuteParseDuration *prometheus.HistogramVec
SessionExecuteCompileDuration *prometheus.HistogramVec
SessionExecuteRunDuration *prometheus.HistogramVec
SchemaLeaseErrorCounter *prometheus.CounterVec
SessionRetry *prometheus.HistogramVec
SessionRetryErrorCounter *prometheus.CounterVec
SessionRestrictedSQLCounter prometheus.Counter
StatementPerTransaction *prometheus.HistogramVec
TransactionDuration *prometheus.HistogramVec
StatementDeadlockDetectDuration prometheus.Histogram
StatementPessimisticRetryCount prometheus.Histogram
StatementLockKeysCount prometheus.Histogram
ValidateReadTSFromPDCount prometheus.Counter
NonTransactionalDMLCount *prometheus.CounterVec
TxnStatusEnteringCounter *prometheus.CounterVec
TxnDurationHistogram *prometheus.HistogramVec
LazyPessimisticUniqueCheckSetCount prometheus.Counter
PessimisticDMLDurationByAttempt *prometheus.HistogramVec
ResourceGroupQueryTotalCounter *prometheus.CounterVec
FairLockingUsageCount *prometheus.CounterVec
)
// InitSessionMetrics initializes session metrics.
func InitSessionMetrics() {
AutoIDReqDuration = NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "meta",
Name: "autoid_duration_seconds",
Help: "Bucketed histogram of processing time (s) in parse SQL.",
Buckets: prometheus.ExponentialBuckets(0.00004, 2, 28), // 40us ~ 1.5h
})
SessionExecuteParseDuration = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "parse_duration_seconds",
Help: "Bucketed histogram of processing time (s) in parse SQL.",
Buckets: prometheus.ExponentialBuckets(0.00004, 2, 28), // 40us ~ 1.5h
}, []string{LblSQLType})
SessionExecuteCompileDuration = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "compile_duration_seconds",
Help: "Bucketed histogram of processing time (s) in query optimize.",
// Build plan may execute the statement, or allocate table ID, so it might take a long time.
Buckets: prometheus.ExponentialBuckets(0.00004, 2, 28), // 40us ~ 1.5h
}, []string{LblSQLType})
SessionExecuteRunDuration = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "execute_duration_seconds",
Help: "Bucketed histogram of processing time (s) in running executor.",
Buckets: prometheus.ExponentialBuckets(0.0001, 2, 30), // 100us ~ 15h
}, []string{LblSQLType})
SchemaLeaseErrorCounter = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "schema_lease_error_total",
Help: "Counter of schema lease error",
}, []string{LblType})
SessionRetry = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "retry_num",
Help: "Bucketed histogram of session retry count.",
Buckets: prometheus.LinearBuckets(0, 1, 21), // 0 ~ 20
}, []string{LblScope})
SessionRetryErrorCounter = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "retry_error_total",
Help: "Counter of session retry error.",
}, []string{LblSQLType, LblType})
SessionRestrictedSQLCounter = NewCounter(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "restricted_sql_total",
Help: "Counter of internal restricted sql.",
})
StatementPerTransaction = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "transaction_statement_num",
Help: "Bucketed histogram of statements count in each transaction.",
Buckets: prometheus.ExponentialBuckets(1, 2, 16), // 1 ~ 32768
}, []string{LblTxnMode, LblType, LblScope})
TransactionDuration = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "transaction_duration_seconds",
Help: "Bucketed histogram of a transaction execution duration, including retry.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
}, []string{LblTxnMode, LblType, LblScope})
StatementDeadlockDetectDuration = NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "statement_deadlock_detect_duration_seconds",
Help: "Bucketed histogram of a statement deadlock detect duration.",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
},
)
StatementPessimisticRetryCount = NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "statement_pessimistic_retry_count",
Help: "Bucketed histogram of statement pessimistic retry count",
Buckets: prometheus.ExponentialBuckets(1, 2, 16), // 1 ~ 32768
})
StatementLockKeysCount = NewHistogram(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "statement_lock_keys_count",
Help: "Keys locking for a single statement",
Buckets: prometheus.ExponentialBuckets(1, 2, 21), // 1 ~ 1048576
})
ValidateReadTSFromPDCount = NewCounter(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "validate_read_ts_from_pd_count",
Help: "Counter of validating read ts by getting a timestamp from PD",
})
NonTransactionalDMLCount = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "non_transactional_dml_count",
Help: "Counter of non-transactional delete",
}, []string{LblType},
)
TxnStatusEnteringCounter = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "txn_state_entering_count",
Help: "How many times transactions enter this state",
}, []string{LblType},
)
TxnDurationHistogram = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "txn_state_seconds",
Help: "Bucketed histogram of different states of a transaction.",
Buckets: prometheus.ExponentialBuckets(0.0005, 2, 29), // 0.5ms ~ 1.5days
}, []string{LblType, LblHasLock})
LazyPessimisticUniqueCheckSetCount = NewCounter(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "lazy_pessimistic_unique_check_set_count",
Help: "Counter of setting tidb_constraint_check_in_place to false, note that it doesn't count the default value set by tidb config",
},
)
PessimisticDMLDurationByAttempt = NewHistogramVec(
prometheus.HistogramOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "transaction_pessimistic_dml_duration_by_attempt",
Help: "Bucketed histogram of duration of pessimistic DMLs, distinguished by first attempt and retries",
Buckets: prometheus.ExponentialBuckets(0.001, 2, 28), // 1ms ~ 1.5days
}, []string{LblType, LblPhase})
ResourceGroupQueryTotalCounter = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "resource_group_query_total",
Help: "Counter of the total number of queries for the resource group",
}, []string{LblName})
FairLockingUsageCount = NewCounterVec(
prometheus.CounterOpts{
Namespace: "tidb",
Subsystem: "session",
Name: "transaction_fair_locking_usage",
Help: "The counter of statements and transactions in which fair locking is used or takes effect",
}, []string{LblType})
}
// Label constants.
const (
LblUnretryable = "unretryable"
LblReachMax = "reach_max"
LblOK = "ok"
LblError = "error"
LblCommit = "commit"
LblAbort = "abort"
LblRollback = "rollback"
LblType = "type"
LblDb = "db"
LblResult = "result"
LblSQLType = "sql_type"
LblCoprType = "copr_type"
LblGeneral = "general"
LblInternal = "internal"
LblTxnMode = "txn_mode"
LblPessimistic = "pessimistic"
LblOptimistic = "optimistic"
LblStore = "store"
LblAddress = "address"
LblBatchGet = "batch_get"
LblGet = "get"
LblLockKeys = "lock_keys"
LblInTxn = "in_txn"
LblVersion = "version"
LblHash = "hash"
LblCTEType = "cte_type"
LblAccountLock = "account_lock"
LblIdle = "idle"
LblRunning = "executing_sql"
LblLockWaiting = "waiting_for_lock"
LblCommitting = "committing"
LblRollingBack = "rolling_back"
LblHasLock = "has_lock"
LblPhase = "phase"
LblModule = "module"
LblRCReadCheckTS = "read_check"
LblRCWriteCheckTS = "write_check"
LblResourceGroup = "resource_group"
LblName = "name"
LblFairLockingTxnUsed = "txn-used"
LblFairLockingTxnEffective = "txn-effective"
LblFairLockingStmtUsed = "stmt-used"
LblFairLockingStmtEffective = "stmt-effective"
LblScope = "scope"
)
|
package config
import (
"github.com/HNB-ECO/HNB-Blockchain/HNB/util"
"encoding/json"
"fmt"
"io/ioutil"
"time"
)
type ConsensusConfig struct {
// 所有超时都是毫秒为单位
// Delta 超时增量
TimeoutNewRound int `json:"timeoutNewRound,omitempty"`
TimeoutPropose int `json:"timeoutPropose,omitempty"`
TimeoutProposeWait int `json:"timeoutProposeWait,omitempty"`
TimeoutPrevote int `json:"timeoutPrevote,omitempty"`
TimeoutPrevoteWait int `json:"timeoutPrevoteWait,omitempty"`
TimeoutPrecommit int `json:"timeoutPrecommit,omitempty"`
TimeoutPrecommitWait int `json:"timeoutPrecommitWait,omitempty"`
TimeoutCommit int `json:"timeoutCommit,omitempty"`
// 是否跳过TimeoutCommit
SkipTimeoutCommit bool `json:"skipTimeoutCommit,omitempty"`
CreateEmptyBlocks bool `json:"createEmptyBlocks,omitempty"`
CreateEmptyBlocksInterval int `json:"createEmptyBlocksInterval,omitempty"`
//Reactor sleep duration parameters are in milliseconds
TimeoutWaitFortx int `json:"peerGossipSleepDuration,omitempty"`
BlkTimeout int `json:"blkTimeout,omitempty"`
BgDemandTimeout int `json:"bgDemandTimeout,omitempty"`
BftNum int `json:"bftNum,omitempty"`
//GeneBftName []string `json:"geneBftName,omitempty"`
// 初始bft共识组 由peerID+"_"+orgID对应的节点组成
GeneBftGroup []*GenesisValidator `json:"geneBftGroup,omitempty"`
//初始化全部的验证者
GeneValidators []*GenesisValidator `json:"geneValidators,omitempty"`
EpochPeriod int `json:"epochPeriod"`
}
type GenesisValidator struct {
// 公钥字符串
PubKeyStr string `json:"PubKeyStr,omitempty"`
// 选取提案人proposer的权重
Power int `json:"power,omitempty"`
// 验证节点名称
//Name string `json:"name,omitempty"`
// 公钥算法类型
//AlgType uint16 `json:"algType,omitempty"`
//公钥信息
//PeerID string `json:"peerID,omitempty"`
}
type AllConfig struct {
RunMode string `json:"runMode"`
Log LogConfig `json:"logConfig"`
SeedList []string `json:"seedList"`
MaxConnOutBound uint `json:"maxConnOutBound"`
MaxConnInBound uint `json:"maxConnInBound"`
EnableConsensus bool `json:"enableConsensus"`
MaxConnInBoundForSingleIP uint `json:"singleIP"`
SyncPort uint16 `json:"syncPort"`
ConsPort uint16 `json:"consPort"`
RestPort uint16 `json:"restPort"`
GPRCPort uint16 `json:"grpcPort"`
IsPeersTLS bool `json:"isTLS"`
IsServerTLS bool `json:"isVisitTLS"`
TlsKeyPath string `json:"tlsKeyPath"`
TlsCertPath string `json:"tlsCertPath"`
IsBlkSQL bool `json:"isBlkSQL"`
SQLIP string `json:"sqlIP"`
UserName string `json:"username"`
Password string `json:"passwd"`
DBPath string `json:"dbPath"`
//algorand config
KetPairPath string `json:"ketPairPath"`
ConsensusConfig `json:"consConfig"`
// 创世块时间戳
GenesisTime time.Time `json:"genesisTime"`
// 接收共识消息管道长度
RecvMsgChan int `json:"recvMsgChan,omitempty"`
EpochServer *EpochServer `json:"epochServer"`
}
type EpochServer struct {
EpochServerPath string `json:"epochServerPath"`
CaPath string `json:"caPath"`
}
type LogConfig struct {
Path string `json:"path"`
Level string `json:"level"`
}
var Config = NewConfig()
func NewConfig() *AllConfig {
c := &AllConfig{}
return c
}
func LoadConfig(path string) {
if path == "" {
return
}
if util.PathExists(path) == false {
fmt.Println("config path is missing")
return
}
data, err := ioutil.ReadFile(path)
if err != nil {
panic("config load fail: " + err.Error())
}
datajson := []byte(data)
err = json.Unmarshal(datajson, Config)
if err != nil {
panic("config load fail: " + err.Error())
}
}
|
package main
import (
"context"
"fmt"
"os"
"github.com/go-redis/redis/v8"
)
var ctx = context.Background()
// redis-benchmark -h $CACHE_HOST.redis.cache.windows.net -p 6380 -a $CACHE_KEY
func main() {
rdb := redis.NewClient(&redis.Options{
Addr: "akscache.redis.cache.windows.net:6380",
Password: os.Getenv("CACHE_KEY"),
})
err := rdb.Set(ctx, "foo", "go gold", 0).Err()
if err != nil {
panic(err)
}
val, err := rdb.Get(ctx, "foo").Result()
if err != nil {
panic(err)
}
fmt.Println("key", val)
}
|
package Problem0443
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
// tcs is testcase slice
var tcs = []struct {
chars []byte
ans int
ansInPlace []byte
}{
{
[]byte("abaa2"),
5,
[]byte("aba22"),
},
{
[]byte("a"),
1,
[]byte("a"),
},
{
[]byte("abbbbbbbbbbbb"),
4,
[]byte("ab12"),
},
{
[]byte("aabbccc"),
6,
[]byte("a2b2c3"),
},
// 可以有多个 testcase
}
func Test_compress(t *testing.T) {
ast := assert.New(t)
for _, tc := range tcs {
fmt.Printf("~~%v~~\n", tc)
ast.Equal(tc.ans, compress(tc.chars))
// 检查 inPlace 修改的情况
ast.Equal(string(tc.ansInPlace), string(tc.chars)[:len(tc.ansInPlace)])
}
}
func Benchmark_compress(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, tc := range tcs {
compress(tc.chars)
}
}
}
|
package data_test
import (
. "github.com/DennisDenuto/property-price-collector/data"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"time"
)
var _ = Describe("Postcode", func() {
It("should generate postcode numbers for nsw", func() {
postCodes := ListNswPostcodes()
Eventually(postCodes, 1*time.Minute, 1*time.Millisecond).Should(Receive(Equal(1001)))
Eventually(postCodes, 1*time.Minute, 1*time.Millisecond).Should(Receive(Equal(3707)))
})
})
|
// Copyright 2019 Authors of Cilium
// Copyright 2017 Lyft, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package eni
import (
"context"
"fmt"
"sort"
"time"
eniTypes "github.com/cilium/cilium/pkg/aws/eni/types"
"github.com/cilium/cilium/pkg/aws/types"
"github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/trigger"
"golang.org/x/sync/semaphore"
)
type k8sAPI interface {
Update(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)
UpdateStatus(origResource, newResource *v2.CiliumNode) (*v2.CiliumNode, error)
Get(name string) (*v2.CiliumNode, error)
}
type nodeManagerAPI interface {
GetENI(instanceID string, index int) *eniTypes.ENI
GetENIs(instanceID string) []*eniTypes.ENI
GetSubnet(subnetID string) *types.Subnet
GetSubnets(ctx context.Context) types.SubnetMap
FindSubnetByTags(vpcID, availabilityZone string, required types.Tags) *types.Subnet
FindSecurityGroupByTags(vpcID string, required types.Tags) []*types.SecurityGroup
Resync(ctx context.Context) time.Time
UpdateENI(instanceID string, eni *eniTypes.ENI)
}
type ec2API interface {
CreateNetworkInterface(ctx context.Context, toAllocate int64, subnetID, desc string, groups []string) (string, *eniTypes.ENI, error)
DeleteNetworkInterface(ctx context.Context, eniID string) error
AttachNetworkInterface(ctx context.Context, index int64, instanceID, eniID string) (string, error)
ModifyNetworkInterface(ctx context.Context, eniID, attachmentID string, deleteOnTermination bool) error
AssignPrivateIpAddresses(ctx context.Context, eniID string, addresses int64) error
UnassignPrivateIpAddresses(ctx context.Context, eniID string, addresses []string) error
TagENI(ctx context.Context, eniID string, eniTags map[string]string) error
}
type metricsAPI interface {
IncENIAllocationAttempt(status, subnetID string)
AddIPAllocation(subnetID string, allocated int64)
AddIPRelease(subnetID string, released int64)
SetAllocatedIPs(typ string, allocated int)
SetAvailableENIs(available int)
SetAvailableIPsPerSubnet(subnetID string, availabilityZone string, available int)
SetNodes(category string, nodes int)
IncResyncCount()
PoolMaintainerTrigger() trigger.MetricsObserver
K8sSyncTrigger() trigger.MetricsObserver
ResyncTrigger() trigger.MetricsObserver
}
// nodeMap is a mapping of node names to ENI nodes
type nodeMap map[string]*Node
// NodeManager manages all nodes with ENIs
type NodeManager struct {
mutex lock.RWMutex
nodes nodeMap
instancesAPI nodeManagerAPI
ec2API ec2API
k8sAPI k8sAPI
metricsAPI metricsAPI
resyncTrigger *trigger.Trigger
parallelWorkers int64
eniTags map[string]string
}
// NewNodeManager returns a new NodeManager
func NewNodeManager(instancesAPI nodeManagerAPI, ec2API ec2API, k8sAPI k8sAPI, metrics metricsAPI, parallelWorkers int64, eniTags map[string]string) (*NodeManager, error) {
if parallelWorkers < 1 {
parallelWorkers = 1
}
mngr := &NodeManager{
nodes: nodeMap{},
instancesAPI: instancesAPI,
ec2API: ec2API,
k8sAPI: k8sAPI,
metricsAPI: metrics,
parallelWorkers: parallelWorkers,
eniTags: eniTags,
}
resyncTrigger, err := trigger.NewTrigger(trigger.Parameters{
Name: "eni-node-manager-resync",
MinInterval: 10 * time.Millisecond,
MetricsObserver: metrics.ResyncTrigger(),
TriggerFunc: func(reasons []string) {
syncTime := instancesAPI.Resync(context.TODO())
mngr.Resync(context.TODO(), syncTime)
},
})
if err != nil {
return nil, fmt.Errorf("unable to initialize resync trigger: %s", err)
}
mngr.resyncTrigger = resyncTrigger
return mngr, nil
}
// GetNames returns the list of all node names
func (n *NodeManager) GetNames() (allNodeNames []string) {
n.mutex.RLock()
defer n.mutex.RUnlock()
allNodeNames = make([]string, 0, len(n.nodes))
for name := range n.nodes {
allNodeNames = append(allNodeNames, name)
}
return
}
// Update is called whenever a CiliumNode resource has been updated in the
// Kubernetes apiserver
// 'resource' is deep copied before it is stored.
func (n *NodeManager) Update(resource *v2.CiliumNode) bool {
n.mutex.Lock()
node, ok := n.nodes[resource.Name]
if !ok {
node = &Node{
name: resource.Name,
manager: n,
}
poolMaintainer, err := trigger.NewTrigger(trigger.Parameters{
Name: fmt.Sprintf("eni-pool-maintainer-%s", resource.Name),
MinInterval: 10 * time.Millisecond,
MetricsObserver: n.metricsAPI.PoolMaintainerTrigger(),
TriggerFunc: func(reasons []string) {
if err := node.MaintainIpPool(context.TODO()); err != nil {
node.logger().WithError(err).Warning("Unable to maintain ip pool of node")
}
},
})
if err != nil {
node.logger().WithError(err).Error("Unable to create pool-maintainer trigger")
return false
}
k8sSync, err := trigger.NewTrigger(trigger.Parameters{
Name: fmt.Sprintf("eni-node-k8s-sync-%s", resource.Name),
MinInterval: 10 * time.Millisecond,
MetricsObserver: n.metricsAPI.K8sSyncTrigger(),
TriggerFunc: func(reasons []string) {
node.SyncToAPIServer()
},
})
if err != nil {
poolMaintainer.Shutdown()
node.logger().WithError(err).Error("Unable to create k8s-sync trigger")
return false
}
node.poolMaintainer = poolMaintainer
node.k8sSync = k8sSync
n.nodes[node.name] = node
log.WithField(fieldName, resource.Name).Info("Discovered new CiliumNode custom resource")
}
n.mutex.Unlock()
return node.updatedResource(resource)
}
// Delete is called after a CiliumNode resource has been deleted via the
// Kubernetes apiserver
func (n *NodeManager) Delete(nodeName string) {
n.mutex.Lock()
if node, ok := n.nodes[nodeName]; ok {
if node.poolMaintainer != nil {
node.poolMaintainer.Shutdown()
}
if node.k8sSync != nil {
node.k8sSync.Shutdown()
}
}
delete(n.nodes, nodeName)
n.mutex.Unlock()
}
// Get returns the node with the given name
func (n *NodeManager) Get(nodeName string) *Node {
n.mutex.RLock()
node := n.nodes[nodeName]
n.mutex.RUnlock()
return node
}
// GetNodesByIPWatermark returns all nodes that require addresses to be
// allocated or released, sorted by the number of addresses needed to be operated
// in descending order. Number of addresses to be released is negative value
// so that nodes with IP deficit are resolved first
func (n *NodeManager) GetNodesByIPWatermark() []*Node {
n.mutex.RLock()
list := make([]*Node, len(n.nodes))
index := 0
for _, node := range n.nodes {
list[index] = node
index++
}
n.mutex.RUnlock()
sort.Slice(list, func(i, j int) bool {
valuei := list[i].getNeededAddresses()
valuej := list[j].getNeededAddresses()
// Number of addresses to be released is negative value,
// nodes with more excess addresses are released earlier
if valuei < 0 && valuej < 0 {
return valuei < valuej
}
return valuei > valuej
})
return list
}
type resyncStats struct {
mutex lock.Mutex
totalUsed int
totalAvailable int
totalNeeded int
remainingInterfaces int
nodes int
nodesAtCapacity int
nodesInDeficit int
}
func (n *NodeManager) resyncNode(ctx context.Context, node *Node, stats *resyncStats, syncTime time.Time) {
node.mutex.Lock()
if syncTime.After(node.resyncNeeded) {
node.loggerLocked().Debug("Resetting resyncNeeded")
node.resyncNeeded = time.Time{}
}
node.recalculateLocked()
allocationNeeded := node.allocationNeeded()
releaseNeeded := node.releaseNeeded()
if allocationNeeded || releaseNeeded {
node.waitingForPoolMaintenance = true
node.poolMaintainer.Trigger()
}
stats.mutex.Lock()
stats.totalUsed += node.stats.usedIPs
availableOnNode := node.stats.availableIPs - node.stats.usedIPs
stats.totalAvailable += availableOnNode
stats.totalNeeded += node.stats.neededIPs
stats.remainingInterfaces += node.stats.remainingInterfaces
stats.nodes++
if allocationNeeded {
stats.nodesInDeficit++
}
if node.stats.remainingInterfaces == 0 && availableOnNode == 0 {
stats.nodesAtCapacity++
}
for subnetID, subnet := range n.instancesAPI.GetSubnets(ctx) {
n.metricsAPI.SetAvailableIPsPerSubnet(subnetID, subnet.AvailabilityZone, subnet.AvailableAddresses)
}
stats.mutex.Unlock()
node.mutex.Unlock()
node.k8sSync.Trigger()
}
// Resync will attend all nodes and resolves IP deficits. The order of
// attendance is defined by the number of IPs needed to reach the configured
// watermarks. Any updates to the node resource are synchronized to the
// Kubernetes apiserver.
func (n *NodeManager) Resync(ctx context.Context, syncTime time.Time) {
stats := resyncStats{}
sem := semaphore.NewWeighted(n.parallelWorkers)
for _, node := range n.GetNodesByIPWatermark() {
err := sem.Acquire(ctx, 1)
if err != nil {
continue
}
go func(node *Node, stats *resyncStats) {
n.resyncNode(ctx, node, stats, syncTime)
sem.Release(1)
}(node, &stats)
}
// Acquire the full semaphore, this requires all go routines to
// complete and thus blocks until all nodes are synced
sem.Acquire(ctx, n.parallelWorkers)
n.metricsAPI.SetAllocatedIPs("used", stats.totalUsed)
n.metricsAPI.SetAllocatedIPs("available", stats.totalAvailable)
n.metricsAPI.SetAllocatedIPs("needed", stats.totalNeeded)
n.metricsAPI.SetAvailableENIs(stats.remainingInterfaces)
n.metricsAPI.SetNodes("total", stats.nodes)
n.metricsAPI.SetNodes("in-deficit", stats.nodesInDeficit)
n.metricsAPI.SetNodes("at-capacity", stats.nodesAtCapacity)
}
|
package testFunctions
import (
"bufio"
"fmt"
"net"
"strings"
"time"
)
func sayHello() string {
return "Hello from this another package"
}
func testSwitch() {
t := time.Now()
switch {
case t.Hour() < 12:
fmt.Println("It's before noon")
default:
fmt.Println("It's after noon")
}
}
func testarray() {
var a [3]int //int array with length 3
a[0] = 12 // array index starts at 0
a[1] = 78
a[2] = 50
fmt.Println(a)
b := [3]int{22, 7448, 4} // short hand declaration to create array
fmt.Println(b)
c := [...]int{12, 78, 50} // ... makes the compiler determine the length
fmt.Println(c)
// ::::::::::::::::::::::::::::::::::::::::::::::
j := [5]int{44, 12, 33, 786, 80}
var t []int = j[0:4] //creates a slice from a[1] to a[3]
fmt.Println(t)
// ::::::::::::::::::::::::::::::::::::::::::::::
darr := [...]int{57, 89, 90, 82, 100, 78, 67, 69, 59}
dslice := darr[2:5]
fmt.Println("array before", darr)
for i := range dslice {
dslice[i]++
}
fmt.Println("array after", darr)
}
func testtime() {
epoch := time.Now().Unix()
fmt.Println(epoch)
}
func testTcpServer() {
fmt.Println("Launching server...")
// listen on all interfaces
ln, _ := net.Listen("tcp", ":4444")
// accept connection on port
conn, _ := ln.Accept()
// run loop forever (or until ctrl-c)
for {
// will listen for message to process ending in newline (\n)
message, _ := bufio.NewReader(conn).ReadString('\n')
// output message received
fmt.Print("Message Received:", string(message))
// sample process for string received
newmessage := strings.ToUpper(message)
// send new string back to client
conn.Write([]byte("\n rec from server : " + newmessage + "\n"))
}
}
func say(s string) {
for i := 0; i < 1000; i++ {
time.Sleep(100 * time.Millisecond)
fmt.Println(s)
}
}
func sum(s []int, c chan int) {
sum := 0
for _, v := range s {
sum += v
}
c <- sum // send sum to c
}
func squares(c chan int) {
for i := 0; i <= 3; i++ {
n := <-c
fmt.Println(n * n)
}
}
func testThread() {
go say("world")
say("hello")
}
func testThreadChannal() {
s := []int{7, 2, 8, -9, 4, 0}
c := make(chan int)
go sum(s[:len(s)/2], c)
go sum(s[len(s)/2:], c)
x, y := <-c, <-c // receive from c
fmt.Println(x, y, x+y)
}
func testChannalBuffer() {
c := make(chan int, 3)
go squares(c)
c <- 1
c <- 2
c <- 3
}
func RunTest() {
// if testCSV() {
// fmt.Printf("csv export success \n")
// }
//testTcpServer()
//:::::::::::::::::::::::::: THREAD
//testThread()
testThreadChannal()
// testChannalBuffer()
}
|
package psql
import (
"database/sql"
"fmt"
_ "github.com/lib/pq"
"log"
)
type (
PsqlContext interface {
GetDb() *sql.DB
Close()
SchemeInit(scheme string) error
}
psqlContext struct {
db *sql.DB
}
)
func (c *psqlContext) GetDb() *sql.DB {
return c.db
}
func (c *psqlContext) Close() {
c.db.Close()
}
func (c *psqlContext) SchemeInit(scheme string) error {
if _, err := c.db.Exec(scheme); err != nil {
log.Println(err)
return err
}
return nil
}
func NewConnect(host string, port string, user string, password string, dbName string, sslMode string) PsqlContext {
connectString := fmt.Sprintf("host=%s port=%s user=%s password=%s dbname=%s sslmode=%s",
host, port, user, password, dbName, sslMode)
db, err := sql.Open("postgres", connectString)
if err != nil {
log.Panic(err)
}
if err = db.Ping(); err != nil {
log.Panic(err)
}
return &psqlContext{db}
}
|
package main
import "lesson/myLesson/readAndWrite/read"
// 入口函数
func main() {
//read.TestReadFromCmd()
//read.TestReadFromFile()
read.TestGob1()
}
|
package main
type StageList struct {
Stages []string
CompoundStages map[string][]string
}
func parseStagesFromYaml(filename string) *StageList {
stages := StageList{}
parseFromYamlFile(filename, &stages)
return &stages
}
|
package sw
import (
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"fmt"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/secp256k1"
)
type ecdsa256K1KeyGenerator struct {
curve elliptic.Curve
}
func (kg *ecdsa256K1KeyGenerator) KeyGen(opts bccsp.KeyGenOpts) (k bccsp.Key, err error) {
key, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader)
if err != nil {
return nil, fmt.Errorf("Failed generating ECDSA key for [%v]: [%s]", kg.curve, err)
}
return &Ecdsa256K1PrivateKey{key}, nil
}
|
package http
import (
"github.com/heroku/cytokine/logging"
gohttp "net/http"
"time"
)
type TimingTransport struct {
Transport gohttp.RoundTripper
}
func (t TimingTransport) RoundTrip(request *gohttp.Request) (*gohttp.Response, error) {
defer logging.Monitor("cytokine.http.request", time.Now())
return t.Transport.RoundTrip(request)
}
|
package cli
import (
"github.com/HNB-ECO/HNB-Blockchain/HNB/access/rest"
appComm "github.com/HNB-ECO/HNB-Blockchain/HNB/appMgr/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/secp256k1"
"github.com/HNB-ECO/HNB-Blockchain/HNB/bccsp/sw"
"github.com/HNB-ECO/HNB-Blockchain/HNB/common"
"github.com/HNB-ECO/HNB-Blockchain/HNB/consensus/dbft"
"github.com/HNB-ECO/HNB-Blockchain/HNB/contract/hgs"
"github.com/HNB-ECO/HNB-Blockchain/HNB/contract/hnb"
"github.com/HNB-ECO/HNB-Blockchain/HNB/msp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/rlp"
"github.com/HNB-ECO/HNB-Blockchain/HNB/util"
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"encoding/json"
"fmt"
"github.com/urfave/cli"
"io/ioutil"
"math/big"
"net/http"
)
var (
CliTxSendMsg = cli.StringFlag{
Name: "sendmsg",
Value: "Hello World",
Usage: "Send the cli interface of the transaction",
}
CliChainID = cli.StringFlag{
Name: "chainID",
Value: "hgs",
Usage: "chainid",
}
CliFromAddr = cli.StringFlag{
Name: "from",
Value: "",
Usage: "from address",
}
CliFromChainID = cli.StringFlag{
Name: "fromCoinType",
Value: "",
Usage: "hnb/hgs",
}
CliFromAmount = cli.Int64Flag{
Name: "fromAmount",
Value: 0,
Usage: "amount",
}
CliToAddr = cli.StringFlag{
Name: "to",
Value: "",
Usage: "to address",
}
CliToChainID = cli.StringFlag{
Name: "toCoinType",
Value: "",
Usage: "hnb/hgs",
}
CliToAmount = cli.Int64Flag{
Name: "toAmount",
Value: 0,
Usage: "amount",
}
CliKeyPath = cli.StringFlag{
Name: "keyPath",
Value: "./node.key",
Usage: "key path",
}
//CliQueryType = cli.StringFlag{
// Name: "queryType",
// Value: "balance",
// Usage: "balance",
//}
CliBalanceAddr = cli.StringFlag{
Name: "addr",
Value: "",
Usage: "Query account balance",
}
CliNonce = cli.Int64Flag{
Name: "nonce",
Value: 0,
Usage: "nonce",
}
)
var (
CliSendVoteMsg = cli.StringFlag{
Name: "send vote msg",
Value: "vote",
Usage: "send vote tx to vote for one node",
}
CliCandidate = cli.StringFlag{
Name: "candidate",
Value: "",
Usage: "candidate node",
}
CliVotingPower = cli.Int64Flag{
Name: "votingPower",
Value: 0,
Usage: "node vote power",
}
CliVoteEpochNo = cli.Uint64Flag{
Name: "voteEpochNo",
Value: 0,
Usage: "voteEpochNo",
}
)
//var ReadTxPoolLen = cli.Command{
// Name: "txpoollen",
// Usage: "Get the number of transactions in the tx pool, including two parts queue and pending",
// Action: GetTxcount,
// Flags: []cli.Flag{
// CliChainID,
// },
//}
// ./querymsg --restport=xxx balance --chainid=xx --addr=xxx
var QueryBalanceCommand = cli.Command{
Name: "querybalance",
Usage: "query balance by address",
Action: QueryBalance,
Flags: []cli.Flag{
CliChainID,
CliRest,
CliBalanceAddr,
CliKeyPath,
},
}
var QueryMsgCommand = cli.Command{
Name: "querymsg",
Usage: "Query msg",
Action: QueryBalance,
//Flags: []cli.Flag{
// CliRest,
//},
Subcommands: []cli.Command{
QueryBalanceCommand,
},
}
// ./start sendmsg tx from=xxx coinType=hnb to=xxx coinType=hgs
// ./start sendmsg vote
var SendTx = cli.Command{
Name: "sendmsg",
Usage: "Send the cli interface of the transaction",
Action: SendMsg,
Flags: []cli.Flag{
CliFromAddr,
CliFromChainID,
CliToAddr,
CliToChainID,
CliRest,
CliFromAmount,
CliToAmount,
CliKeyPath,
CliNonce,
},
}
var SendVoteTx = cli.Command{
Name: "sendvotemsg",
Usage: "send vote tx to vote for one node",
Action: SendVoteMsg,
Flags: []cli.Flag{
//CliFromAddr,
//CliFromChainID,
CliRest,
CliCandidate,
CliVotingPower,
CliVoteEpochNo,
CliKeyPath,
CliNonce,
},
}
//func GetTxcount(ctx *cli.Context) error {
// //chainID := ctx.String(CliRest.Name)
// //txCount := txpool.TxsLen(chainID)
// //fmt.Printf(">> the %s txpool len %d\n", chainID, txCount)
// return nil
//}
func loadKey(path string) (bccsp.Key, error) {
kp := &msp.KeyPair{}
sKeyPair, err := msp.Load(path)
if err != nil {
return nil, err
}
key := new(ecdsa.PrivateKey)
key.Curve = secp256k1.S256()
key.D = new(big.Int)
key.D.SetBytes(sKeyPair.PriKey)
key.PublicKey.X, key.PublicKey.Y = elliptic.Unmarshal(secp256k1.S256(), sKeyPair.PubKey)
kp.Scheme = sKeyPair.Scheme
kp.PubKey = &sw.Ecdsa256K1PublicKey{&key.PublicKey}
kp.PriKey = &sw.Ecdsa256K1PrivateKey{key}
return kp.PubKey, nil
}
func SendMsg(ctx *cli.Context) {
port := ctx.String(CliRest.Name)
fmt.Println("port:" + port)
chainID := ctx.String(CliFromChainID.Name)
toChainID := ctx.String(CliToChainID.Name)
fromAmount := ctx.Int64(CliFromAmount.Name)
toAmount := ctx.Int64(CliToAmount.Name)
//fromAddr := ctx.String(CliFromAddr.Name)
toAddr := ctx.String(CliToAddr.Name)
path := ctx.String(CliKeyPath.Name)
nonce := ctx.Int64(CliNonce.Name)
isSame := false
if chainID == toChainID {
isSame = true
}
var payload []byte
if chainID == appComm.HNB {
ht := &hnb.HnbTx{}
if isSame == true {
ht.TxType = hnb.SAME
st := &hnb.SameTx{}
st.OutputAddr = util.HexToByte(toAddr)
st.Amount = fromAmount
ht.PayLoad, _ = json.Marshal(st)
payload, _ = json.Marshal(ht)
} else {
ht.TxType = hnb.DIFF
df := &hnb.DiffTx{}
df.Amount = toAmount
df.OutputAddr = util.HexToByte(toAddr)
df.InAmount = fromAmount
ht.PayLoad, _ = json.Marshal(df)
payload, _ = json.Marshal(ht)
}
} else if chainID == appComm.HGS {
ht := &hgs.HgsTx{}
if isSame == true {
ht.TxType = hgs.SAME
st := &hgs.SameTx{}
st.OutputAddr = util.HexToByte(toAddr)
st.Amount = fromAmount
ht.PayLoad, _ = json.Marshal(st)
payload, _ = json.Marshal(ht)
} else {
ht.TxType = hgs.DIFF
df := &hgs.DiffTx{}
df.Amount = toAmount
df.OutputAddr = util.HexToByte(toAddr)
df.InAmount = fromAmount
ht.PayLoad, _ = json.Marshal(df)
payload, _ = json.Marshal(ht)
}
} else {
fmt.Println("chainID invalid")
return
}
key, err := loadKey(path)
if err != nil {
fmt.Println("load key ", err.Error())
}
msgTx := common.Transaction{}
address := msp.AccountPubkeyToAddress1(key)
msgTx.Payload = payload
msgTx.From = address
msgTx.ContractName = chainID
msgTx.NonceValue = uint64(nonce)
signer := msp.GetSigner()
msgTx.Txid = signer.Hash(&msgTx)
err = msp.NewKeyPair().Init("./node.key")
if err != nil {
fmt.Println(err)
return
}
msgTxWithSign, err := msp.SignTx(&msgTx, signer)
if err != nil {
fmt.Println(err)
return
}
mt, _ := rlp.EncodeToBytes(msgTxWithSign)
url := "http://" + "127.0.0.1:" + port + "/"
jm := &rest.JsonrpcMessage{Version: "1.0"}
jm.Method = "sendRawTransaction"
var params []interface{}
params = append(params, util.ToHex(mt))
jm.Params, _ = json.Marshal(params)
jmm, _ := json.Marshal(jm)
if url != "" {
response, err := http.Post(url, "application/json", bytes.NewReader(jmm))
if err != nil {
fmt.Println(err)
}
result, _ := ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(">>" + string(result))
response.Body.Close()
}
}
func QueryBalance(ctx *cli.Context) {
var url string
port := ctx.String(CliRest.Name)
fmt.Println("port:" + port)
chainID := ctx.String(CliChainID.Name)
fmt.Println("chainID:" + chainID)
addr := ctx.String(CliBalanceAddr.Name)
if addr == "" {
path := ctx.String(CliKeyPath.Name)
key, err := loadKey(path)
if err != nil {
fmt.Println("load key ", err.Error())
return
}
address := msp.AccountPubkeyToAddress1(key)
addr = util.ByteToHex(address.GetBytes())
}
fmt.Println("addr:", addr)
url = "http://" + "127.0.0.1:" + port + "/"
jm := &rest.JsonrpcMessage{Version: "1.0"}
jm.Method = "getBalance"
var params []interface{}
params = append(params, chainID, addr)
jm.Params, _ = json.Marshal(params)
jmm, _ := json.Marshal(jm)
if url != "" {
response, err := http.Post(url, "application/json", bytes.NewReader(jmm))
if err != nil {
fmt.Println(err)
}
result, _ := ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(">>" + string(result))
response.Body.Close()
}
}
func SendVoteMsg(ctx *cli.Context) {
port := ctx.String(CliRest.Name)
fmt.Println("port:" + port)
candidate := ctx.String(CliCandidate.Name)
nonce := ctx.Int64(CliNonce.Name)
path := ctx.String(CliKeyPath.Name)
key, err := loadKey(path)
if err != nil {
fmt.Println("load key ", err.Error())
}
address := msp.AccountPubkeyToAddress1(key)
votePower := ctx.Int64(CliVotingPower.Name)
epochNo := ctx.Uint64(CliVoteEpochNo.Name)
var payload []byte
ht := &hgs.HgsTx{}
ht.TxType = hnb.POS_VOTE_TRANSCATION
voteMsg := &hnb.VoteInfo{}
voteMsg.FromAddr = address.GetBytes()
voteMsg.Candidate = msp.StringToByteKey(candidate)
voteMsg.VotingPower = votePower
if epochNo == 0 {
epochNo = dbft.DS.GetCurrentEpochNo()
}
voteMsg.VoteEpoch = epochNo
ht.PayLoad, _ = json.Marshal(voteMsg)
payload, _ = json.Marshal(ht)
msgTx := common.Transaction{}
msgTx.Payload = payload
msgTx.From = address
msgTx.ContractName = appComm.HNB
msgTx.NonceValue = uint64(nonce)
signer := msp.GetSigner()
msgTx.Txid = signer.Hash(&msgTx)
err = msp.NewKeyPair().Init(path)
if err != nil {
fmt.Println(err)
return
}
msgTxWithSign, err := msp.SignTx(&msgTx, signer)
if err != nil {
fmt.Println(err)
return
}
//f,err := msp.Sender(signer, &msgTx)
//if err != nil{
// fmt.Println(err)
// return
//}
//fmt.Printf("msg %v %v\n",
// util.ByteToHex(address.GetBytes()), util.ByteToHex(f.GetBytes()))
mt, _ := rlp.EncodeToBytes(msgTxWithSign)
url := "http://" + "127.0.0.1:" + port + "/"
jm := &rest.JsonrpcMessage{Version: "1.0"}
jm.Method = "sendRawTransaction"
var params []interface{}
params = append(params, util.ToHex(mt))
jm.Params, _ = json.Marshal(params)
jmm, _ := json.Marshal(jm)
if url != "" {
response, err := http.Post(url, "application/json", bytes.NewReader(jmm))
if err != nil {
fmt.Println(err)
}
result, _ := ioutil.ReadAll(response.Body)
if err != nil {
fmt.Println(err)
}
fmt.Println(">>" + string(result))
response.Body.Close()
}
}
|
package main
import (
"github.com/ant0ine/go-json-rest"
"log"
"net/http"
"os"
"runtime"
// "runtime/pprof"
// _ "net/http/pprof"
// "flag"
)
// var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
func main() {
// flag.Parse()
// if *cpuprofile != "" {
// f, err := os.Create(*cpuprofile)
// if err != nil {
// log.Fatal(err)
// }
// pprof.StartCPUProfile(f)
// defer pprof.StopCPUProfile()
// }
// go func() {
// log.Println(http.ListenAndServe("localhost:6060", nil))
// }()
runtime.GOMAXPROCS(config.server.cpuCore)
parseConfig("./conf/config.conf")
logFile := getLogFile(config.global.logFile)
startLogger := serverStartInfoLog(logFile)
indexLogger := serverIndexInfoLog(logFile)
startLogger.Println("AT-AT version", version)
startLogger.Println(ascii_icon)
startLogger.Println("Start atat server......")
startLogger.Println("Server PID: ", os.Getpid())
startLogger.Println("Starting Initialize Mysql Connection Pool")
model.Init()
startLogger.Println("Initialize Mysql Connection Pool Successful.")
if config.global.cacheEable {
startLogger.Println("Starting Initialize Redis Connection Pool.....")
cache.Init()
startLogger.Println("Initialize Redis Connection Pool Successful.")
indexLogger.Println("Starting Initialize Indexer.....")
engine.Init()
go engine.IndexAll()
go engine.checkStatus()
}
// startLogger.Println("Starting Initialize Controller")
// controller.Init()
// startLogger.Println("Initialize Controller Successful")
// indexLogger.Println("Initialize Indexer Successful.")
handler := rest.ResourceHandler{}
handler.EnableGzip = config.server.gzip
handler.EnableStatusService = config.server.gzip
handler.EnableResponseStackTrace = config.global.debug
handler.DisableJsonIndent = config.server.jsonIndent
handler.Logger = log.New(logFile, "[Request] ", log.LstdFlags)
handler.SetRoutes(
rest.Route{"GET", "/app/release", controller.GetAppRelease},
rest.Route{"GET", "/book/search", controller.GetBookListFromKeyword},
rest.Route{"HEAD", "/book/search", controller.GetBookListFromKeyword},
rest.Route{"GET", "/book/:id", controller.GetBookFromBookId},
rest.Route{"HEAD", "/book/:id", controller.GetBookFromBookId},
rest.Route{"GET", "/book/isbn/:isbn", controller.GetBookFromBookISBN},
rest.Route{"HEAD", "/book/isbn/:isbn", controller.GetBookFromBookISBN},
)
if config.server.listenAddr != "" && config.server.port != "" {
ListenAddrPort := config.server.listenAddr + ":" + config.server.port
startLogger.Println("Server listen on: ", ListenAddrPort)
http.ListenAndServe(ListenAddrPort, &handler)
} else {
startLogger.Println("Server listen on: ", defaultListenAddrPort)
http.ListenAndServe(defaultListenAddrPort, &handler)
}
}
|
package config
import (
"flag"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
// DBConfig database config
type DBConfig struct {
Database string `mapstructure:"name"`
Host string `mapstructure:"host"`
MaxConn string `mapstructure:"max_connections"`
MaxIdleConn string `mapstructure:"max_idle_connections"`
Password string `mapstructure:"password"`
Port int `mapstructure:"port"`
User string `mapstructure:"user"`
}
// RabbitMQConfig config for rabbit mq
type RabbitMQConfig struct {
Host string `mapstructure:"host"`
Queue string `mapstructure:"queue"`
Password string `mapstructure:"password"`
Port int `mapstructure:"port"`
Retries int `mapstructure:"retries"`
User string `mapstructure:"user"`
}
// Config application config
type Config struct {
DB DBConfig `mapstructure:"db"`
GRPCListen string `mapstructure:"grpc_listen"`
HTTPListen string `mapstructure:"http_listen"`
LogFile string `mapstructure:"log_file"`
LogLevel string `mapstructure:"log_level"`
LogStdout bool `mapstructure:"log_console"`
RabbitMQ RabbitMQConfig `mapstructure:"rabbitmq"`
StorageType string `mapstructure:"storage_type"`
}
func init() {
flag.String("configfile", "config.yaml", "config file path")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
if err := viper.BindPFlags(pflag.CommandLine); err != nil {
panic(err)
}
}
// ParseConfig parse config
func ParseConfig() (*Config, error) {
viper.SetConfigFile(viper.GetString("configfile"))
viper.SetConfigType("yaml")
if err := viper.ReadInConfig(); err != nil {
return nil, errors.Wrap(err, "could not read config")
}
// default values
cfg := Config{
LogLevel: "info",
LogStdout: true,
}
if err := viper.Unmarshal(&cfg); err != nil {
return nil, errors.Wrap(err, "could not parse config")
}
return &cfg, nil
}
|
package cmd
import (
"encoding/json"
"fmt"
"log"
"net/http"
"time"
"github.com/r3labs/sse"
"github.com/spf13/cobra"
)
type LogResponse struct {
PageInfo struct {
TotalCount int `json:"total"`
Limit int `json:"limit"`
Offset int `json:"offset"`
} `json:"pageInfo"`
Results []struct {
T time.Time `json:"t"`
Msg string `json:"msg"`
Level string `json:"level"`
Tags map[string]string `json:"tags"`
} `json:"results"`
Namespace string `json:"namespace"`
Instance string `json:"instance"`
}
type InstanceResponse struct {
Namespace string `json:"namespace"`
Instance struct {
CreatedAt time.Time `json:"createdAt"`
UpdatedAt time.Time `json:"updatedAt"`
ID string `json:"id"`
As string `json:"as"`
Status string `json:"status"`
ErrorCode string `json:"errorCode"`
ErrorMessage string `json:"errorMessage"`
} `json:"instance"`
InvokedBy string `json:"invokedBy"`
Flow []string `json:"flow"`
Workflow struct {
Path string `json:"path"`
Name string `json:"name"`
Parent string `json:"parent"`
Revision string `json:"revision"`
} `json:"workflow"`
}
type FilterQueryInstance struct {
Typ string
Filter string
Payload []string
}
func (fq FilterQueryInstance) Query() string {
value := ""
for i, v := range fq.Payload {
value += v
if i < len(fq.Payload)-1 {
value += "::"
}
}
return fmt.Sprintf("?filter.field=%s&filter.type=%s&filter.val=%s", fq.Filter, fq.Typ, value)
}
func GetLogs(cmd *cobra.Command, instance string, query string) (urlOutput string) {
instanceStatus := "pending"
urlLogs := fmt.Sprintf("%s/instances/%s/logs%s", UrlPrefix, instance, query)
clientLogs := sse.NewClient(urlLogs)
clientLogs.Connection.Transport = &http.Transport{
TLSClientConfig: GetTLSConfig(),
}
cmd.Println("-------INSTANCE LOGS-------")
cmd.Println(urlLogs)
cmd.Println("---------------------------")
AddSSEAuthHeaders(clientLogs)
logsChannel := make(chan *sse.Event)
err := clientLogs.SubscribeChan("messages", logsChannel)
if err != nil {
log.Fatalf("Failed to subscribe to messages channel: %v\n", err)
}
// Get Logs
go func() {
for {
msg := <-logsChannel
if msg == nil {
break
}
// Skip heartbeat
if len(msg.Data) == 0 {
continue
}
var logResp LogResponse
err = json.Unmarshal(msg.Data, &logResp)
if err != nil {
log.Fatalln(err)
}
if len(logResp.Results) > 0 {
for _, edge := range logResp.Results {
prefix := ""
if len(edge.Tags) > 0 {
prefix = buildPrefix(edge.Tags)
}
prefix = printFormated(edge.Level) + prefix
//nolint:gosmopolitan
cmd.Printf("%v: %s %s\n", edge.T.In(time.Local).Format("02 Jan 06 15:04 MST"), prefix, edge.Msg)
}
}
}
}()
urlInstance := fmt.Sprintf("%s/instances/%s", UrlPrefix, instance)
clientInstance := sse.NewClient(urlInstance)
clientInstance.Connection.Transport = &http.Transport{
TLSClientConfig: GetTLSConfig(),
}
AddSSEAuthHeaders(clientInstance)
channelInstance := make(chan *sse.Event)
err = clientInstance.SubscribeChan("messages", channelInstance)
if err != nil {
Fail(cmd, "Failed to subscribe to messages channel: %v", err)
}
for {
msg := <-channelInstance
if msg == nil {
break
}
// Skip heartbeat
if len(msg.Data) == 0 {
continue
}
var instanceResp InstanceResponse
err = json.Unmarshal(msg.Data, &instanceResp)
if err != nil {
log.Fatalf("Failed to read instance response: %v\n", err)
}
if instanceResp.Instance.Status != instanceStatus {
time.Sleep(500 * time.Millisecond)
instanceStatus = instanceResp.Instance.Status
clientLogs.Unsubscribe(logsChannel)
clientInstance.Unsubscribe(channelInstance)
break
}
}
cmd.Printf("instance completed with status: %s\n", instanceStatus)
return fmt.Sprintf("%s/instances/%s/output", UrlPrefix, instance)
}
func buildPrefix(tags map[string]string) string {
if tags["state-id"] == "" {
tags["state-id"] = " "
}
caller := fmt.Sprintf("%s/%s", tags["workflow"], tags["state-id"])
loop_index := ""
if val, ok := tags["loop-index"]; ok {
loop_index = "/i-" + val
}
prefix := caller + loop_index
prefixLen := len(prefix)
// if prefixLen < 8 {
// prefix += "\t"
// }
// if prefixLen < 12 {
// prefix += "\t"
// }
for i := prefixLen; i < 25; i++ {
prefix += " "
}
if prefixLen < 24 {
prefix += "\t"
}
if prefixLen < 32 {
prefix += "\t"
}
return prefix
}
func printFormated(level string) string {
switch level {
case "debug":
return ""
case "info":
return ""
case "error":
return "\033[0;31m"
case "fatal":
return "\033[1;95m"
}
return ""
}
|
package gallery
type Image interface {
GetId() string
GetContent() string
}
type imageDto struct {
Id string `json:"id"`
Content string `json:"content"`
}
func CreateImage(id, content string) *imageDto {
g := new(imageDto)
g.Id = id
g.Content = content
return g
}
func (g *imageDto) GetId() string {
return g.Id
}
func (g *imageDto) GetContent() string {
return g.Content
}
|
package example
func main() {
var i interface{} = "hello"
f, ok = i.(float64)
if !ok {
// don't use f
}
}
|
package main
import "fmt"
func main() {
a := []int{3, 2, 1, 0, 4}
fmt.Println(canJump(a))
}
func canJump(nums []int) bool {
maxRearch := 0
for i := 0; i < len(nums); i++ {
if maxRearch < i {
return false
}
if maxRearch < nums[i]+i {
maxRearch = nums[i] + i
}
}
return true
}
|
package routers
import (
"WhereIsMyDriver/controllers"
"github.com/kataras/iris"
)
// IrisApp iris App is router
func IrisApp() *iris.Application {
app := iris.New()
app.Get("/drivers", controllers.GetDrivers)
app.Put("/drivers/:id/location", controllers.UpdateLocation)
app.OnErrorCode(iris.StatusNotFound)
return app
}
|
// Copyright 2019 The Grafeas Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package config
import (
"errors"
"io/ioutil"
"log"
fernet "github.com/fernet/fernet-go"
"gopkg.in/yaml.v2"
)
// file is the Grafeas configuration file.
type file struct {
Grafeas *GrafeasConfig `yaml:"grafeas"`
}
// ServerConfig is the Grafeas server configuration.
type ServerConfig struct {
Address string `yaml:"address"` // Endpoint address, e.g. localhost:8080 or unix:///var/run/grafeas.sock
CertFile string `yaml:"certfile"` // A PEM encoded certificate file
KeyFile string `yaml:"keyfile"` // A PEM encoded private key file
CAFile string `yaml:"cafile"` // A PEM encoded CA's certificate file
CORSAllowedOrigins []string `yaml:"cors_allowed_origins"` // Permitted CORS origins.
}
// EmbeddedStoreConfig is the configuration for embedded store.
type EmbeddedStoreConfig struct {
Path string `yaml:"path"` // Path is the folder path to storage files
}
// PgSQLConfig is the configuration for PostgreSQL store.
type PgSQLConfig struct {
Host string `yaml:"host"`
DbName string `yaml:"dbname"`
User string `yaml:"user"`
Password string `yaml:"password"`
// Valid sslmodes: disable, allow, prefer, require, verify-ca, verify-full.
// See https://www.postgresql.org/docs/current/static/libpq-connect.html for details
SSLMode string `yaml:"sslmode"`
PaginationKey string `yaml:"paginationkey"`
}
// GrafeasConfig is the global configuration for an instance of Grafeas.
type GrafeasConfig struct {
API *ServerConfig `yaml:"api"`
StorageType string `yaml:"storage_type"` // Supported storage types are "memstore", "postgres" and "embedded"
PgSQLConfig *PgSQLConfig `yaml:"postgres"`
EmbeddedConfig *EmbeddedStoreConfig `yaml:"embedded"` // EmbeddedConfig is the embedded store config
}
// defaultConfig is a configuration that can be used as a fallback value.
func defaultConfig() *GrafeasConfig {
return &GrafeasConfig{
API: &ServerConfig{
Address: "0.0.0.0:8080",
CertFile: "",
KeyFile: "",
CAFile: "",
},
StorageType: "memstore",
PgSQLConfig: &PgSQLConfig{},
}
}
// LoadConfig creates a config from a YAML-file. If fileName is an empty
// string a default config will be returned.
func LoadConfig(fileName string) (*GrafeasConfig, error) {
if fileName == "" {
return defaultConfig(), nil
}
data, err := ioutil.ReadFile(fileName)
if err != nil {
return nil, err
}
var configFile file
if err := yaml.Unmarshal(data, &configFile); err != nil {
return nil, err
}
config := configFile.Grafeas
if config.StorageType == "postgres" {
// Generate a pagination key if none is provided.
if config.PgSQLConfig.PaginationKey == "" {
log.Println("pagination key is empty, generating...")
var key fernet.Key
if err = key.Generate(); err != nil {
return nil, err
}
config.PgSQLConfig.PaginationKey = key.Encode()
} else {
_, err = fernet.DecodeKey(config.PgSQLConfig.PaginationKey)
if err != nil {
err = errors.New("Invalid Pagination key; must be 32-bit URL-safe base64")
return nil, err
}
}
}
return config, nil
}
|
package main
import "fmt"
type Graph [][]string
func NewGraph(sensors Sensors) *Graph {
graph := Graph{}
trim := sensors.Trim()
max := sensors.Max()
min := sensors.Min()
fmt.Println(max + trim)
fmt.Println(min)
for y := min; y <= max+trim; y++ {
row := []string{}
for x := min; x <= max+trim; x++ {
row = append(row, ".")
}
graph = append(graph, row)
}
return &graph
}
func (g *Graph) Set(y int, x int, val string) {
(*g)[y][x] = val
}
func (g *Graph) Get(y int, x int) string {
return (*g)[y][x]
}
func (g *Graph) Draw(min int, max int, trim int) {
for y := min; y <= max; y++ {
fmt.Println()
fmt.Printf("%d (%d) ", y, y-trim)
for x := min; x <= max; x++ {
fmt.Printf("%s", g.Get(y, x))
}
}
}
func (g *Graph) PlotSensors(s Sensors) {
for _, sensor := range s {
g.Set(sensor.Location.Y, sensor.Location.X, "S")
g.Set(sensor.Beacon.Y, sensor.Beacon.X, "B")
}
}
|
package admin
import (
"fmt"
"github.com/billyninja/pgtools/scanner"
"html/template"
"log"
"time"
)
type col2html func(cl *scanner.Column) template.HTML
type val2html func(cl *scanner.Column, value interface{}) template.HTML
func InputHTML(cl *scanner.Column, value interface{}) template.HTML {
archtype, fieldtype := field_type_translation(cl.Type)
required := ""
if cl.Nullable == "NO" {
required = `required="required"`
}
max_length := ""
if cl.CharMaxLength != nil {
max_length = fmt.Sprintf(`max_length="%d"`, *cl.CharMaxLength)
}
value_str := ""
if value != nil {
switch v := value.(type) {
case bool:
if v == true {
value_str = `checked="checked"`
} else {
value_str = `checked=""`
}
break
case string:
if archtype == "textarea" {
value_str = v
} else {
value_str = `value="` + v + `"`
}
break
case float64, float32:
value_str = fmt.Sprintf(`value="%.2f"`, v)
break
case int, uint8, int8, uint16, int16, uint32, int32, int64:
value_str = fmt.Sprintf(`value="%d"`, v)
break
case time.Time:
value_str = fmt.Sprintf(`value="%s"`, v.Format("2006-01-02T15:04:05"))
break
default:
value_str = fmt.Sprintf(`value="%s"`, v)
}
}
var input template.HTML = ""
if archtype == "textarea" {
input = template.HTML(fmt.Sprintf(`<textarea name="%s" %s>%s</textarea>`, cl.Name, required, value_str))
} else {
input = template.HTML(fmt.Sprintf(`<input type="%s" name="%s" %s %s %s/>`, fieldtype, cl.Name, max_length, required, value_str))
}
return input
}
func ThHTML(cl *scanner.Column) template.HTML {
return template.HTML("<th>" + cl.Name + "</th>")
}
func LabelHTML(cl *scanner.Column) template.HTML {
return template.HTML("<label>" + cl.Name + "</label>")
}
func LabelAndInputHTML(cl *scanner.Column, value interface{}) template.HTML {
label := LabelHTML(cl)
input := InputHTML(cl, value)
// TODO-improvement: configurable label-class, input class
return `<div class="form-group"><div class="">` + label + `</div><div class="">` + input + `</div></div>`
}
func TdHTML(cl *scanner.Column, value interface{}) template.HTML {
return template.HTML("<td>" + format_value(value) + "</td>")
}
func field_type_translation(column_type string) (string, string) {
switch column_type {
case "character varying":
return "input", "text"
case "timestamp without time zone":
return "input", "date"
case "timestamp with time zone":
return "input", "date"
case "numeric":
return "input", "number"
case "text":
return "textarea", "textarea"
case "boolean":
return "input", "checkbox"
default:
log.Printf("\n\nUnmapped PSQL field type %s\n\n", column_type)
return "input", "text"
}
return "err", "err"
}
func format_value(value interface{}) string {
switch v := value.(type) {
case bool:
if v == true {
return "true"
} else {
return "false"
}
case string:
return v
case float64, float32:
return fmt.Sprintf(`%.2f`, v)
case int, uint8, int8, uint16, int16, uint32, int32, int64:
return fmt.Sprintf(`%d`, v)
case time.Time:
return fmt.Sprintf(`%s`, v.Format("2006-01-02T15:04:05"))
default:
return fmt.Sprintf(`%s`, v)
}
return "[---]"
}
|
package main
import (
"log"
"room"
)
func main() {
log.SetFlags(log.Llongfile)
rpcListen := room.NewRpcListener()
go func() {
room.RpcWorker(rpcListen)
}()
go func() {
debugRoom := room.NewDebugChatRoom()
debugRoom.Start()
rpcListen.Msg <- <-debugRoom.Msg
}()
for {
<-rpcListen.CreateNewRoom
go func() {
newRoom := room.NewChatRoom()
newRoom.Start()
rpcListen.Msg <- <-newRoom.Msg
}()
}
}
|
package migrations
import (
"github.com/go-pg/migrations"
log "github.com/sirupsen/logrus"
)
func init() {
migrations.Register(func(db migrations.DB) error {
log.Info("migrate 20171010114357_create-locales")
_, err := db.Exec(`
CREATE TABLE locales (
id BIGSERIAL PRIMARY KEY,
code VARCHAR(255) NOT NULL,
lang VARCHAR(8) NOT NULL DEFAULT 'en-US',
message TEXT NOT NULL,
created_at TIMESTAMP WITHOUT TIME ZONE NOT NULL DEFAULT now(),
updated_at TIMESTAMP WITHOUT TIME ZONE NOT NULL
);
CREATE UNIQUE INDEX idx_locales_code_lang ON locales (code, lang);
CREATE INDEX idx_locales_code ON locales (code);
CREATE INDEX idx_locales_lang ON locales (lang);
`)
return err
}, func(db migrations.DB) error {
log.Info("rollback 20171010114357_create-locales")
_, err := db.Exec(`DROP TABLE locales;`)
return err
})
}
|
package facade
import (
"github.com/stretchr/testify/assert"
"testing"
)
func Test(t *testing.T) {
bidder := BidderImpl{bidOptimizer: &BidOptimizer{}, predictor: &Predictor{}, detargeter: &Detargeter{}}
bidPrice := bidder.bid(&Request{})
assert.Equal(t, float32(50), bidPrice)
}
|
package handshaketime
import (
"net"
"time"
"sync"
)
type SynPacket struct {
ip net.IP
timeReceived time.Time
sequenceNumber uint32
}
type HandshakeTime struct {
ip net.IP
time time.Duration
}
type DatabaseProxy interface {
saveSynPacket(SynPacket)
getSynPacket(uint32) (SynPacket, error)
deleteSynPacket(SynPacket)
saveHandshakeTime(HandshakeTime)
getHandshakeTimes() []HandshakeTime
}
type SynPacketLite struct {
ip net.IP
timeReceived time.Time
}
type MemoryDB struct {
sync.RWMutex
synPacketMap map[uint32]SynPacketLite
handshakeTimeSlice []HandshakeTime
}
type SynPacketNotFound struct {}
func (e SynPacketNotFound) Error() string {
return "Syn Packet not found"
}
func newSynPacketLite(synPacket SynPacket) SynPacketLite {
return SynPacketLite{ip: synPacket.ip, timeReceived: synPacket.timeReceived}
}
func synPacketFromLite(sequenceNumber uint32, syn SynPacketLite) SynPacket {
return SynPacket{ip: syn.ip, timeReceived: syn.timeReceived, sequenceNumber: sequenceNumber}
}
func (db *MemoryDB) saveSynPacket(synPacket SynPacket) {
db.Lock()
db.synPacketMap[synPacket.sequenceNumber] = newSynPacketLite(synPacket)
db.Unlock()
}
func (db *MemoryDB) getSynPacket(sequenceNumber uint32) (SynPacket, error) {
db.RLock()
syn, isFound := db.synPacketMap[sequenceNumber]
db.RUnlock()
if isFound {
return synPacketFromLite(sequenceNumber, syn), nil
} else {
return SynPacket{}, SynPacketNotFound{}
}
}
func (db *MemoryDB) deleteSynPacket(synPacket SynPacket) {
db.Lock()
delete(db.synPacketMap, synPacket.sequenceNumber)
db.Unlock()
}
func (db *MemoryDB) cleanSynPacket() {
db.Lock()
for seqNum, packet := range db.synPacketMap {
if time.Now().Sub(packet.timeReceived) > time.Second {
delete(db.synPacketMap, seqNum)
}
}
db.Unlock()
}
func (db *MemoryDB) saveHandshakeTime(handshakeTime HandshakeTime) {
db.handshakeTimeSlice = append(db.handshakeTimeSlice, handshakeTime)
}
func (db *MemoryDB) getHandshakeTimes() []HandshakeTime {
return db.handshakeTimeSlice
}
func createMemoryDB() *MemoryDB {
db := new(MemoryDB)
db.synPacketMap = make(map[uint32]SynPacketLite)
return db
} |
// Package bitvec is bit-vector with atomic access
package bitvec
import "sync/atomic"
// ABitVec is a bitvector
type ABitVec []uint64
// NewABitVec returns a new bitvector with the given size
func NewABitVec(size int) ABitVec {
return make(ABitVec, uint(size+63)/64)
}
// Get returns the given bit
func (b ABitVec) Get(bit uint32) bool {
shift := bit % 64
bb := b[bit/64]
bb &= (1 << shift)
return bb != 0
}
// Set sets the given bit
func (b ABitVec) Set(bit uint32) {
b[bit/64] |= (1 << (bit % 64))
}
// AGet atomically returns the given bit
func (b ABitVec) AGet(bit uint32) bool {
shift := bit % 64
bb := atomic.LoadUint64(&b[bit/64])
bb &= (1 << shift)
return bb != 0
}
// ASet atomically sets the given bit
func (b ABitVec) ASet(bit uint32) {
set := uint64(1) << (bit % 64)
addr := &b[bit/64]
var old uint64
for {
old = atomic.LoadUint64(addr)
if (old&set != 0) || atomic.CompareAndSwapUint64(addr, old, old|set) {
break
}
}
}
|
package handlers
import (
"net/http"
"github.com/saurabmish/Coffee-Shop/data"
)
func (p Products) Modify(w http.ResponseWriter, r *http.Request) {
p.l.Println("[INFO] Endpoint for PUT request")
w.Header().Add("Content-Type", "application/json")
id := getProductID(r)
p.l.Println("[DEBUG] Retrieved product ID from URL: ", id)
product := r.Context().Value(KeyProduct{}).(data.Product)
p.l.Println("[DEBUG] Retrieved product from data store")
err := data.UpdateProduct(id, &product)
if err == data.ErrProductNotFound {
w.WriteHeader(http.StatusNotFound)
data.ToJSON(&GenericError{Message: "Couldn't update product; ID not found ..."}, w)
p.l.Println("[ERROR] Fetching product with given ID ...", err)
return
}
w.WriteHeader(http.StatusNoContent)
}
|
package request
import (
"errors"
"io/ioutil"
"net/http"
"net/url"
)
type Request struct{}
var ErrParseURL = errors.New("error to parse url string!")
func (r *Request) Get(rawUrl string) ([]byte, error) {
url, err := validateUrl(rawUrl)
if err != nil {
return nil, ErrParseURL
}
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
return body, err
}
func validateUrl(rawUrl string) (string, error) {
u, err := url.Parse(rawUrl)
if err != nil {
return "", err
}
const defaultScheme = "https"
if u.Scheme == "" {
u.Scheme = defaultScheme
}
if u.Host == "" && u.Path == "" {
return "", ErrParseURL
}
return u.String(), nil
}
func New() *Request {
return &Request{}
}
|
package icarus
import (
"sync"
"github.com/luuphu25/data-sidecar/util"
)
// IcarusStore holds sets of metrics and retires them as necessary.
type IcarusStore struct {
*sync.Mutex
Keep int
Index int
Metrics []map[string]util.Metric
}
// Get back a new implementation of the rolling store
func NewRollingStore(lookback int) *IcarusStore {
var mux sync.Mutex
out := IcarusStore{&mux, lookback,
0, make([]map[string]util.Metric, lookback, lookback)}
for ii := range out.Metrics {
out.Metrics[ii] = make(map[string]util.Metric)
}
return &out
}
// Roll the rolling store
func (r *IcarusStore) Roll() {
r.Lock()
defer r.Unlock()
r.Index = (r.Index + 1) % r.Keep
r.Metrics[r.Index] = make(map[string]util.Metric)
}
// Insert something into the current store in the rolling store
func (r *IcarusStore) Insert(met util.Metric) {
r.Lock()
defer r.Unlock()
label := util.MapSSToS(met.Desc)
r.Metrics[r.Index][label] = met
}
// Dump all the []Metrics in the rolling store.
func (r *IcarusStore) Dump() []util.Metric {
r.Lock()
defer r.Unlock()
temp := make(map[string]util.Metric)
for ii := 1; ii <= r.Keep; ii++ {
loc := (r.Index + ii) % r.Keep
for key, val := range r.Metrics[loc] {
temp[key] = val
}
}
out := make([]util.Metric, len(temp), len(temp))
index := 0
for _, val := range temp {
out[index] = val
index++
}
return out
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.