text stringlengths 11 4.05M |
|---|
package git
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"os"
"strings"
"sync"
"testing"
"testing/fstest"
git "github.com/go-git/go-git/v5"
"github.com/google/go-cmp/cmp"
)
type mockGitSvc struct {
cloneOpts *git.CloneOptions
fetchOpts *git.FetchOptions
plainOpened bool
pcErr error
poErr error
fetchErr error
wtErr error
coErr error
}
func (g *mockGitSvc) PlainClone(path string, isBare bool, o *git.CloneOptions) (*git.Repository, error) {
g.cloneOpts = o
if g.pcErr != nil {
return nil, g.pcErr
}
return nil, nil
}
func (g *mockGitSvc) PlainOpen(path string) (*git.Repository, error) {
fmt.Println(path)
g.plainOpened = true
if g.poErr != nil {
return nil, g.poErr
}
if strings.HasSuffix(path, "myrepo3") {
return &git.Repository{}, nil
}
return nil, nil
}
func (g *mockGitSvc) Fetch(r *git.Repository, o *git.FetchOptions) error {
g.fetchOpts = o
if g.fetchErr != nil {
return g.fetchErr
}
if r != nil {
return git.NoErrAlreadyUpToDate
}
return nil
}
func (g *mockGitSvc) Worktree(r *git.Repository) (*git.Worktree, error) {
if g.wtErr != nil {
return nil, g.wtErr
}
return nil, nil
}
func (g *mockGitSvc) Checkout(w *git.Worktree, opts *git.CheckoutOptions) error {
if g.coErr != nil {
return g.coErr
}
return nil
}
func newGitClient() (BasicClient, *mockGitSvc) {
paths := []string{
"myrepo/path/to/manifest.yaml",
"myrepo2/path/to/manifest.yaml",
"myrepo3/path/to/manifest.yaml",
}
mapFs := fstest.MapFS{}
mapFs["aDir/aPath"] = &fstest.MapFile{
Mode: os.ModeDir,
}
for _, path := range paths {
mapFs[path] = &fstest.MapFile{
Data: []byte("my bytes"),
}
}
gitSvc := &mockGitSvc{}
return BasicClient{
auth: nil,
mu: &sync.Mutex{},
git: gitSvc,
fs: mapFs,
}, gitSvc
}
type progressWriter struct{}
func (pw *progressWriter) Write(p []byte) (n int, err error) {
return 0, nil
}
func TestGetManifestErrors(t *testing.T) {
tests := []struct {
name string
repo string
path string
pc error
po error
fetch error
wt error
co error
errStr string
}{
{
name: "bubbles PlainClone error",
repo: "plainclone",
pc: errors.New("PlainClone err"),
},
{
name: "bubbles PlainOpen error",
po: errors.New("PlainOpen err"),
},
{
name: "bubbles Fetch error",
fetch: errors.New("Fetch err"),
},
{
name: "bubbles WorkTree error",
wt: errors.New("WorkTree err"),
},
{
name: "bubbles Checkout error",
co: errors.New("Checkout err"),
},
{
name: "rejects when path is a dir",
repo: "aDir",
path: "aPath",
errStr: "path provided is not a file",
},
}
cl, svc := newGitClient()
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
svc.pcErr = tt.pc
svc.poErr = tt.po
svc.fetchErr = tt.fetch
svc.wtErr = tt.wt
svc.coErr = tt.co
repo := defaultString(tt.repo, "myrepo3")
path := defaultString(tt.path, "path/to/manifest.yaml")
_, err := cl.GetManifestFile(repo, "123", path)
for _, want := range []error{tt.pc, tt.po, tt.fetch, tt.wt, tt.co} {
if want != nil && !errors.Is(err, want) {
t.Errorf("wanted: %+v got: %+v", want, err)
}
}
if tt.errStr != "" && !strings.Contains(err.Error(), tt.errStr) {
t.Errorf("wanted: %+v got: %+v\n", tt.errStr, err)
}
})
}
}
func TestGetManifestFile(t *testing.T) {
tests := []struct {
name string
repository string
commitHash string
path string
errResult bool
res string
}{
{
name: "get manifest exists on fs success",
repository: "myrepo",
commitHash: "123",
path: "path/to/manifest.yaml",
errResult: false,
res: "my bytes",
},
{
name: "get manifest new clone success",
repository: "myrepo2",
commitHash: "123",
path: "path/to/manifest.yaml",
errResult: false,
res: "my bytes",
},
{
name: "get manifest fetch already updated",
repository: "myrepo3",
commitHash: "123",
path: "path/to/manifest.yaml",
errResult: false,
res: "my bytes",
},
}
pw := &progressWriter{}
gitClient, gitSvc := newGitClient()
WithProgressWriter(pw)(&gitClient)
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
res, err := gitClient.GetManifestFile(tt.repository, tt.commitHash, tt.path)
if err != nil {
if !tt.errResult {
t.Errorf("\ndid not expect error, got: %v", err)
}
} else {
if tt.errResult {
t.Errorf("\nexpected error")
}
if !cmp.Equal(string(res), tt.res) {
t.Errorf("\nwant: %v\n got: %v", tt.res, string(res))
}
}
if !gitSvc.plainOpened && gitSvc.cloneOpts.Progress != pw {
t.Errorf("\ncloneOpts Progress not passed through: want: %v\n got: %v\n", pw, gitSvc.cloneOpts.Progress)
}
if gitSvc.fetchOpts.Progress != pw {
t.Errorf("\nfetchOpts Progress not passed through: want: %v\n got: %v\n", pw, gitSvc.fetchOpts.Progress)
}
})
}
}
func TestNewClient(t *testing.T) {
t.Run("NewSSHBasicClient creates client with ssh auth with valid PEM", func(t *testing.T) {
tmp, err := os.CreateTemp("", "tmpssh*.pem")
assertNoErr(t, err)
defer tmp.Close()
pk, _ := rsa.GenerateKey(rand.Reader, 2048)
asn := x509.MarshalPKCS1PrivateKey(pk)
err = pem.Encode(tmp, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: asn})
assertNoErr(t, err)
cl, err := NewSSHBasicClient(tmp.Name())
assertNoErr(t, err)
want := "ssh-public-keys"
if cl.auth.Name() != want {
t.Errorf("client auth, want: %s got: %s\n", want, cl.auth.Name())
}
})
t.Run("NewSSHBasicClient rejects with invalid PEM", func(t *testing.T) {
_, err := NewSSHBasicClient("fakefile.pem")
if err == nil {
t.Error("expected error, received nil")
}
})
t.Run("NewHTTPSBasicClient creates client with http auth", func(t *testing.T) {
cl, err := NewHTTPSBasicClient("user", "pass")
assertNoErr(t, err)
want := "http-basic-auth"
if cl.auth.Name() != want {
t.Errorf("client auth, want: %s got: %s\n", want, cl.auth.Name())
}
})
t.Run("NewHTTPSBasicClient passes opts", func(t *testing.T) {
pw := &progressWriter{}
cl, err := NewHTTPSBasicClient("user", "pass", WithProgressWriter(pw))
assertNoErr(t, err)
if cl.pw != pw {
t.Errorf("want: %+v got: %+v\n", pw, cl.pw)
}
})
}
func assertNoErr(t *testing.T, err error) {
if err == nil {
return
}
t.Errorf("unexpected err: %+v\n", err)
}
func defaultString(in, def string) string {
if in == "" {
return def
}
return in
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for HostTypeEnum enum
*/
type HostTypeEnum int
/**
* Value collection for HostTypeEnum enum
*/
const (
HostType_KLINUX HostTypeEnum = 1 + iota
HostType_KWINDOWS
HostType_KAIX
HostType_KSOLARIS
)
func (r HostTypeEnum) MarshalJSON() ([]byte, error) {
s := HostTypeEnumToValue(r)
return json.Marshal(s)
}
func (r *HostTypeEnum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := HostTypeEnumFromValue(s)
*r = v
return nil
}
/**
* Converts HostTypeEnum to its string representation
*/
func HostTypeEnumToValue(hostTypeEnum HostTypeEnum) string {
switch hostTypeEnum {
case HostType_KLINUX:
return "kLinux"
case HostType_KWINDOWS:
return "kWindows"
case HostType_KAIX:
return "kAix"
case HostType_KSOLARIS:
return "kSolaris"
default:
return "kLinux"
}
}
/**
* Converts HostTypeEnum Array to its string Array representation
*/
func HostTypeEnumArrayToValue(hostTypeEnum []HostTypeEnum) []string {
convArray := make([]string,len( hostTypeEnum))
for i:=0; i<len(hostTypeEnum);i++ {
convArray[i] = HostTypeEnumToValue(hostTypeEnum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func HostTypeEnumFromValue(value string) HostTypeEnum {
switch value {
case "kLinux":
return HostType_KLINUX
case "kWindows":
return HostType_KWINDOWS
case "kAix":
return HostType_KAIX
case "kSolaris":
return HostType_KSOLARIS
default:
return HostType_KLINUX
}
}
|
package examples
import (
"bytes"
"io"
"os"
"github.com/go-echarts/go-echarts/v2/charts"
"github.com/go-echarts/go-echarts/v2/opts"
"github.com/go-echarts/go-echarts/v2/render"
tpls "github.com/go-echarts/go-echarts/v2/templates"
)
// copy from go-echarts/templates/header.go
// Now I want to customize my own Header (or tpls.BaseTpl / tpls.ChartTpl) template
var HeaderTpl = `
{{ define "header" }}
<head>
<meta charset="utf-8">
<title>{{ .PageTitle }} --> This is my own style template 🐶</title>
{{- range .JSAssets.Values }}
<script src="{{ . }}"></script>
{{- end }}
{{- range .CSSAssets.Values }}
<link href="{{ . }}" rel="stylesheet">
{{- end }}
</head>
{{ end }}
`
type myOwnRender struct {
c interface{}
before []func()
}
func NewMyOwnRender(c interface{}, before ...func()) render.Renderer {
return &myOwnRender{c: c, before: before}
}
func (r *myOwnRender) Render(w io.Writer) error {
for _, fn := range r.before {
fn()
}
contents := []string{HeaderTpl, tpls.BaseTpl, tpls.ChartTpl}
tpl := render.MustTemplate("chart", contents)
var buf bytes.Buffer
if err := tpl.ExecuteTemplate(&buf, "chart", r.c); err != nil {
return err
}
_, err := w.Write(buf.Bytes())
return err
}
func barCustomize() *charts.Bar {
bar := charts.NewBar()
bar.Renderer = NewMyOwnRender(bar, bar.Validate)
bar.SetGlobalOptions(
charts.WithTitleOpts(opts.Title{
Title: "Bar-customize-template-example",
Subtitle: "This is the subtitle.",
}),
)
bar.SetXAxis(weeks).
AddSeries("Category A", generateBarItems()).
AddSeries("Category B", generateBarItems())
return bar
}
type CustomizeExamples struct{}
func (CustomizeExamples) Examples() {
bar := barCustomize()
f, err := os.Create("examples/html/customize.html")
if err != nil {
panic(err)
}
bar.Render(io.MultiWriter(f))
}
|
package go_test_parallel
import (
"testing"
"time"
)
func Test_slow1(t *testing.T) {
time.Sleep(3 * time.Second)
}
func Test_slow2(t *testing.T) {
time.Sleep(1 * time.Second)
}
func Test_slow3(t *testing.T) {
time.Sleep(2 * time.Second)
}
|
package handler
import (
"excho-job/helper"
"excho-job/resume"
"fmt"
"strconv"
"github.com/gin-gonic/gin"
)
type resumeHandler struct {
service resume.Service
}
func NewResumeHandler(service resume.Service) *resumeHandler {
return &resumeHandler{service}
}
func (h *resumeHandler) GetResemuByJobSeekerIDHandler(c *gin.Context) {
userData := int(c.MustGet("currentUser").(int))
userID := strconv.Itoa(userData)
userProfile, err := h.service.GetResemuByJobSeekerID(userID)
if userProfile.ID == 0 {
response := "resume can't found / resume not uploaded yet"
responseErr := helper.APIResponse("status unauthorize", 401, "error", gin.H{"error": response})
c.JSON(401, responseErr)
return
}
if err != nil {
responseError := helper.APIResponse("status unauthorize", 401, "error", gin.H{"error": err.Error()})
c.JSON(401, responseError)
return
}
response := helper.APIResponse("success get resume by joob seeker ID", 200, "success", userProfile)
c.JSON(200, response)
}
func (h *resumeHandler) SaveNewResumeeHandler(c *gin.Context) {
userData := int(c.MustGet("currentUser").(int))
file, err := c.FormFile("resume") // postman
if err != nil {
responseError := helper.APIResponse("status bad request", 400, "error", gin.H{"error": err.Error()})
c.JSON(400, responseError)
return
}
path := fmt.Sprintf("images/job-seeker-detail/resume/resume-%d-%s", userData, file.Filename)
err = c.SaveUploadedFile(file, path)
if err != nil {
// log.Println("error line 63")
responseError := helper.APIResponse("status bad request", 400, "error", gin.H{"error": err.Error()})
c.JSON(400, responseError)
return
}
pathResumeSave := "https://excho-job.herokuapp.com/" + path
userProfile, err := h.service.SaveNewResume(pathResumeSave, userData)
if err != nil {
responseError := helper.APIResponse("Internal server error", 500, "error", gin.H{"error": err.Error()})
c.JSON(500, responseError)
return
}
response := helper.APIResponse("success create user profile", 201, "success", userProfile)
c.JSON(201, response)
}
func (h *resumeHandler) UpdateResumeByIDHandler(c *gin.Context) {
userData := int(c.MustGet("currentUser").(int))
ID := strconv.Itoa(userData)
file, err := c.FormFile("resume") // postman
if err != nil {
responseError := helper.APIResponse("status bad request", 400, "error", gin.H{"error": err.Error()})
c.JSON(400, responseError)
return
}
path := fmt.Sprintf("images/job-seeker-detail/resume/resume-%d-%s", userData, file.Filename)
err = c.SaveUploadedFile(file, path)
if err != nil {
// log.Println("error line 63")
responseError := helper.APIResponse("status bad request", 400, "error", gin.H{"error": err.Error()})
c.JSON(400, responseError)
return
}
pathProfileSave := "https://excho-job.herokuapp.com/" + path
userProfile, err := h.service.UpdateResumeByID(pathProfileSave, ID)
if err != nil {
responseError := helper.APIResponse("Internal server error", 500, "error", gin.H{"error": err.Error()})
c.JSON(500, responseError)
return
}
response := helper.APIResponse("success update resume job seeker", 200, "success", userProfile)
c.JSON(200, response)
}
|
package set1
func HammingDistance(A []byte, B []byte) int {
xor := FixedXOR(A, B)
bits := 0
for i:=0; i<len(xor); i+=1 {
n := xor[i]
for n > 0 {
bits += 1
n -= (n & -n)
}
}
return bits
}
func KeySizeScore(data []byte, keysize int) float64 {
var sum int
var count int
var i int
for (i+2)*keysize <= len(data) {
slice1 := data[i*keysize:(i+1)*keysize]
slice2 := data[(i+1)*keysize:(i+2)*keysize]
sum += HammingDistance(slice1, slice2)
count += 1
i += 1
}
return float64(sum) / float64(count)
}
func FindKeySize(data []byte) (int, float64) {
var key int
var score float64
key, score = 1, KeySizeScore(data, 1)
for i:=1; i<=50 && i<=len(data)/2; i+=1 {
key_score := KeySizeScore(data, i)
if key_score / float64(i) < score {
key = i
score = key_score / float64(i)
}
}
return key, score
}
func BytesToBlocks(data []byte, size int) [][]byte {
blocks := make([][]byte, size)
for i:=0; i<len(data); i+=1 {
blocks[i%size] = append(blocks[i%size], data[i])
}
return blocks
}
func BreakRepeatingXOR(data []byte) ([]byte, []byte) {
keySize, _ := FindKeySize(data)
blocks := BytesToBlocks(data, keySize)
key := make([]byte, keySize)
for i:=0; i<len(blocks); i+=1 {
_, keyChar, _ := BreakSingleByteXOR(blocks[i])
key[i] = keyChar
}
return RepeatingKeyXOR(data, key), key
}
|
package main
import (
"fmt"
"log"
"github.com/PuerkitoBio/goquery"
)
func HtmlScrape(rule *Rule, url string) {
// doc, err := goquery.NewDocument("http://www.biquge.com.tw/17_17275/")
doc, err := goquery.NewDocument(url)
if err != nil {
log.Fatal(err)
}
if len(rule.Class)
t := doc.Find("div.volume").Find("ul.cf").Find("a")
fmt.Println("length:", t.Length())
for i := 0; i < t.Length(); i++ {
fmt.Println(t.Eq(i).Text())
d, exi := t.Eq(i).Attr("href")
fmt.Println(d, exi)
}
}
|
package helper
import "github.com/MOZGIII/evans/config"
func TestConfig() *config.Config {
return config.Get()
}
|
package generator
var serverTemplate = `
package main
import (
"fmt"
"github.com/watchman1989/rninet/server"
"{{.Rpath}}/router"
"{{.Rpath}}/proto/{{.Package.Name}}"
)
var (
routerServer = &router.RouterServer{}
)
func main() {
fmt.Printf("START_SERVER\n")
if err := server.Init(); err != nil {
fmt.Printf("SERVER_INIT_ERROR: %v\n", err)
return
}
{{.Package.Name}}.Register{{.Service.Name}}Server(server.GRPCServer(), routerServer)
if err := server.Serve(); err != nil {
fmt.Printf("SERVER_ERROR: %v\n", err)
return
}
}
` |
package supervisor_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
// sql drivers
_ "github.com/mattn/go-sqlite3"
. "github.com/starkandwayne/shield/supervisor"
)
var _ = Describe("HTTP Rest API", func() {
Describe("/v1/status API", func() {
It("handles GET requests", func() {
r := GET(StatusAPI{}, "/v1/status")
Ω(r.Code).Should(Equal(200))
})
It("ignores other HTTP methods", func() {
for _, method := range []string{
"PUT", "POST", "DELETE", "PATCH", "OPTIONS", "TRACE",
} {
NotImplemented(StatusAPI{}, method, "/v1/status", nil)
}
})
It("ignores requests not to /v1/status (sub-URIs)", func() {
NotImplemented(StatusAPI{}, "GET", "/v1/status/stuff", nil)
NotImplemented(StatusAPI{}, "OPTIONS", "/v1/status/OPTIONAL/STUFF", nil)
})
})
})
|
package reverse
import "testing"
func TestReverse(t *testing.T) {
a := 123
b := 2147483648
a = Reverse(a)
b = Reverse(b)
if a != 321 {
t.Error("reverse error, expected result is 321")
}
if b != 0 {
t.Error("reverse error, expected result is 0")
}
}
|
package main
import "fmt"
var count int
func totalNQueens(n int) int {
putQueen(n,0,0,0,0)
return count
}
func putQueen(n,row,col,pia,na int) {
if row >= n {
count++
return
}
// 查看是否有空位
bits := (^(col | pia | na)) & ((1<<n)-1)
for bits > 0 {
// 取出最近的一个空位
p := bits & -bits
putQueen(n,row+1,(col|p),((pia|p)<<1),((na|p)>>1))
bits &= bits-1
}
return
}
func main() {
res := totalNQueens(4)
fmt.Println(res)
} |
package main
import (
"fmt"
"sync"
"time"
)
func main() {
wg := &sync.WaitGroup{}
wg.Add(1)
go func() {
defer func() {
wg.Done()
}()
//panic(-1)
Println()
time.Sleep(time.Second * 10)
//wg.Done()
}()
fmt.Println("+++++++++++")
wg.Wait()
fmt.Println("***********")
}
func Println() {
fmt.Println("-------------")
}
|
package Add_Two_Numbers
type ListNode struct {
Val int
Next *ListNode
}
func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode {
var l3, curr *ListNode
var carry = 0
for l1 != nil || l2 != nil {
var value1, value2 int
if l1 != nil {
value1 = l1.Val
l1 = l1.Next
} else {
value1 = 0
}
if l2 != nil {
value2 = l2.Val
l2 = l2.Next
} else {
value2 = 0
}
newNode := &ListNode{
Val: (value1 + value2 + carry) % 10,
Next: nil,
}
carry = (value1 + value2 + carry) / 10
if curr == nil {
l3 = newNode
curr = newNode
} else {
curr.Next = newNode
curr = newNode
}
if carry > 0 {
curr.Next = &ListNode{
Val: carry,
Next: nil,
}
}
}
return l3
}
|
package main
import (
"fmt"
"github.com/aliyun/aliyun-datahub-sdk-go/datahub"
"time"
)
func example_error() {
maxRetry := 3
dh = datahub.New(accessId, accessKey, endpoint)
if _, err := dh.CreateProject(projectName, "project comment"); err != nil {
if _, ok := err.(*datahub.InvalidParameterError); ok {
fmt.Println("invalid parameter,please check your input parameter")
} else if _, ok := err.(*datahub.ResourceExistError); ok {
fmt.Println("project already exists")
} else if _, ok := err.(*datahub.AuthorizationFailedError); ok {
fmt.Println("accessId or accessKey err,please check your accessId and accessKey")
} else if _, ok := err.(*datahub.LimitExceededError); ok {
fmt.Println("limit exceed, so retry")
for i := 0; i < maxRetry; i++ {
// wait 5 seconds
time.Sleep(5 * time.Second)
if _, err := dh.CreateProject(projectName, "project comment"); err != nil {
fmt.Println("create project failed")
fmt.Println(err)
} else {
fmt.Println("create project successful")
break
}
}
} else {
fmt.Println("unknown error")
fmt.Println(err)
}
} else {
fmt.Println("create project successful")
}
}
|
package server
import (
"errors"
"github.com/evcc-io/evcc/util/config"
"github.com/evcc-io/evcc/util/templates"
)
func templateForConfig(class templates.Class, conf map[string]any) (templates.Template, error) {
typ, ok := conf[typeTemplate].(string)
if !ok {
return templates.Template{}, errors.New("config template not found")
}
return templates.ByName(class, typ)
}
func sanitizeMasked(class templates.Class, conf map[string]any) (map[string]any, error) {
tmpl, err := templateForConfig(class, conf)
if err != nil {
return nil, err
}
res := make(map[string]any, len(conf))
for k, v := range conf {
if i, p := tmpl.ParamByName(k); i >= 0 && p.IsMasked() {
v = masked
}
res[k] = v
}
return res, nil
}
func mergeMasked(class templates.Class, conf, old map[string]any) (map[string]any, error) {
tmpl, err := templateForConfig(class, conf)
if err != nil {
return nil, err
}
res := make(map[string]any, len(conf))
for k, v := range conf {
if i, p := tmpl.ParamByName(k); i >= 0 && p.IsMasked() && v == masked {
v = old[k]
}
res[k] = v
}
return res, nil
}
func deviceInstanceFromMergedConfig[T any](id int, class templates.Class, conf map[string]any, newFromConf func(string, map[string]any) (T, error), h config.Handler[T]) (config.Device[T], T, error) {
var zero T
dev, err := h.ByName(config.NameForID(id))
if err != nil {
return nil, zero, err
}
merged, err := mergeMasked(class, conf, dev.Config().Other)
if err != nil {
return nil, zero, err
}
instance, err := newFromConf(typeTemplate, merged)
return dev, instance, err
}
|
package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"strconv"
"github.com/gorilla/mux"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
var db *gorm.DB
var err error
type Artical struct {
ID int64 `json:"Id"`
TITLE string `json:"title"`
DESCRIPTION string `json:"description"`
CONTENT string `json:"content"`
}
func main() {
db, err = gorm.Open("mysql", "manish:manish@tcp(localhost:3306)/student?charset=utf8&parseTime=True")
if err != nil {
log.Println("Connetion Failed")
} else {
log.Println("Connetion established")
}
db.AutoMigrate(&Artical{})
handleRequest()
}
func handleRequest() {
log.Println("Server Started:")
log.Println("press ctrl and c to quit the server")
myrouter := mux.NewRouter().StrictSlash(true)
myrouter.HandleFunc("/new-artical", createNewArtical).Methods("GET")
myrouter.HandleFunc("/all-articals", returnAllArticals).Methods("GET")
myrouter.HandleFunc("/artical/{id}", returnSingleArtical).Methods("GET")
myrouter.HandleFunc("/update-artical/{id}/{title}", updateArtical).Methods("GET")
myrouter.HandleFunc("/delete-artical/{id}", deleteArtical).Methods("GET")
log.Fatal(http.ListenAndServe(":8085", myrouter))
}
func createNewArtical(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Context-Type", "application/json")
reqBody, err := ioutil.ReadAll(r.Body)
if err == nil {
var artical Artical
er := json.Unmarshal(reqBody, &artical)
if er == nil {
db.Create(&artical)
fmt.Println("Endpoint Hit: Creating New Booking")
json.NewEncoder(w).Encode(artical)
} else {
fmt.Println(er)
}
}
}
func returnTotalArticals(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Context-Type", "application/json")
articals := []Artical{}
db.Raw("Select * from aricals").Scan(&articals)
json.NewEncoder(w).Encode(articals)
}
func returnSingleBooking(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key := vars["id"]
artical := []Artical{}
s, err := strconv.Atoi(key)
if err == nil {
db.Raw("Select * from articals where id= ?", s).Scan(&artical)
json.NewEncoder(w).Encode(artical)
}
}
func updateBooking(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key1 := vars["id"]
key2 := vars["user"]
articals := []Artical{}
s, err := strconv.Atoi(key1)
if err == nil {
db.Model(&articals).Where("id = ?", s).Update("title", key2)
fmt.Println("Updated")
} else {
fmt.Print(err)
}
}
func deleteBooking(w http.ResponseWriter, r *http.Request) {
vars := mux.Vars(r)
key := vars["id"]
articals := []Artical{}
s, err := strconv.Atoi(key)
if err == nil {
db.Where("id = ?", s).Delete(&articals)
fmt.Println("Deleted")
} else {
fmt.Print(err)
}
}
|
package zhenai
import (
"bufio"
"fmt"
"github.com/xiaozefeng/go-web-crawler/fetcher"
"github.com/xiaozefeng/go-web-crawler/model/zhenai"
"io/ioutil"
"os"
"testing"
)
func TestSaveProfile(t *testing.T) {
content, err := fetcher.Fetch("https://album.zhenai.com/u/108208979")
if err != nil {
panic(err)
}
file, err := os.Create("./profile_data.html")
if err != nil {
panic(err)
}
writer := bufio.NewWriter(file)
_, _ = writer.Write(content)
defer writer.Flush()
}
func TestParseProfile(t *testing.T) {
content, err := ioutil.ReadFile("./profile_data.html")
if err != nil {
panic(err)
}
parseResult:= ParseProfile(content,zhenai.UserInfo{})
fmt.Printf("%#v",parseResult.Items[0])
}
|
// Copyright 2016 Lars Wiegman. All rights reserved. Use of this source code is
// governed by a BSD-style license that can be found in the LICENSE file.
package multipass
import (
"testing"
"time"
"github.com/mholt/caddy"
)
func TestParse(t *testing.T) {
tests := []struct {
input string
shouldErr bool
expected []Rule
}{
{`multipass {
handles leeloo@dallas
mail_from no-reply@dallas
}`, false, []Rule{
{
Handles: []string{"leeloo@dallas"},
MailFrom: "no-reply@dallas",
},
}},
{`multipass {
resources /fhloston /paradise
basepath /multipass
expires 24h
handles leeloo@dallas korben@dallas
smtp_addr localhost:2525
smtp_user admin
smtp_pass secret
mail_from "Multipass <no-reply@dallas>"
mail_tmpl email_template.eml
}`, false, []Rule{
{
Resources: []string{"/fhloston", "paradise"},
Basepath: "/multipass",
Expires: time.Hour * 24,
Handles: []string{"leeloo@dallas", "korben@dallas"},
SMTPAddr: "localhost:2525",
SMTPUser: "admin",
SMTPPass: "secret",
MailFrom: "Multipass <no-reply@dallas>",
MailTmpl: "email_template.eml",
},
}},
{`multipass {
expires a
}`, true, []Rule{},
},
{`multipass {
}`, true, []Rule{},
},
{`multipass {
handles
}`, true, []Rule{},
},
{`multipass {
handles leeloo@dallas korben@dallas
mail_from "Multipass <no-reply@dallas>"
}
multipass {
handles leeloo@dallas korben@dallas
mail_from "Multipass <no-reply@dallas>"
}`, true, []Rule{},
},
{`multipass {
basepath a b
}`, true, []Rule{},
},
{`multipass {
expires 12h 12h
}`, true, []Rule{},
},
{`multipass {
smtp_addr a b
}`, true, []Rule{},
},
{`multipass {
smtp_user a b
}`, true, []Rule{},
},
{`multipass {
smtp_pass a b
}`, true, []Rule{},
},
{`multipass {
mail_from a b
}`, true, []Rule{},
},
{`multipass {
mail_tmpl a b
}`, true, []Rule{},
},
{`multipass a`, true, []Rule{}},
}
for i, test := range tests {
actual, err := parse(caddy.NewTestController("http", test.input))
if err == nil && test.shouldErr {
t.Errorf("test #%d should return an error, but did not", i)
} else if err != nil && !test.shouldErr {
t.Errorf("test #%d should not return an error, but did with %s", i, err)
}
if !test.shouldErr && len(actual) != len(test.expected) {
t.Errorf("test #%d: expected %d rules, actual %d rules", i, len(test.expected), len(actual))
}
for j, expectedRule := range test.expected {
actualRule := actual[j]
if len(actualRule.Resources) != len(expectedRule.Resources) {
t.Errorf("test #%d: expected %d Resources, actual %d Resources", i, len(expectedRule.Resources), len(actualRule.Resources))
}
if actualRule.Basepath != expectedRule.Basepath {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.Basepath, actualRule.Basepath)
}
if actualRule.Expires != expectedRule.Expires {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.Expires, actualRule.Expires)
}
if actualRule.SMTPAddr != expectedRule.SMTPAddr {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.SMTPAddr, actualRule.SMTPAddr)
}
if actualRule.SMTPUser != expectedRule.SMTPUser {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.SMTPUser, actualRule.SMTPUser)
}
if actualRule.SMTPPass != expectedRule.SMTPPass {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.SMTPPass, actualRule.SMTPPass)
}
if actualRule.MailFrom != expectedRule.MailFrom {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.MailFrom, actualRule.MailFrom)
}
if actualRule.MailTmpl != expectedRule.MailTmpl {
t.Errorf("test #%d, rule #%d: expected '%s', actual '%s'", i, j, expectedRule.MailTmpl, actualRule.MailTmpl)
}
if len(actualRule.Handles) != len(expectedRule.Handles) {
t.Errorf("test #%d: expected %d handles, actual %d handles", i, len(expectedRule.Handles), len(actualRule.Handles))
}
}
}
}
|
/*
Copyright 2020 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package formatter
import (
"fmt"
"strconv"
"strings"
"time"
"github.com/docker/cli/cli/command/formatter"
"github.com/docker/compose/v2/pkg/api"
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stringid"
"github.com/docker/go-units"
)
const (
defaultContainerTableFormat = "table {{.Name}}\t{{.Image}}\t{{.Command}}\t{{.Service}}\t{{.RunningFor}}\t{{.Status}}\t{{.Ports}}"
nameHeader = "NAME"
serviceHeader = "SERVICE"
commandHeader = "COMMAND"
runningForHeader = "CREATED"
mountsHeader = "MOUNTS"
localVolumes = "LOCAL VOLUMES"
networksHeader = "NETWORKS"
)
// NewContainerFormat returns a Format for rendering using a Context
func NewContainerFormat(source string, quiet bool, size bool) formatter.Format {
switch source {
case formatter.TableFormatKey, "": // table formatting is the default if none is set.
if quiet {
return formatter.DefaultQuietFormat
}
format := defaultContainerTableFormat
if size {
format += `\t{{.Size}}`
}
return formatter.Format(format)
case formatter.RawFormatKey:
if quiet {
return `container_id: {{.ID}}`
}
format := `container_id: {{.ID}}
image: {{.Image}}
command: {{.Command}}
created_at: {{.CreatedAt}}
state: {{- pad .State 1 0}}
status: {{- pad .Status 1 0}}
names: {{.Names}}
labels: {{- pad .Labels 1 0}}
ports: {{- pad .Ports 1 0}}
`
if size {
format += `size: {{.Size}}\n`
}
return formatter.Format(format)
default: // custom format
if quiet {
return formatter.DefaultQuietFormat
}
return formatter.Format(source)
}
}
// ContainerWrite renders the context for a list of containers
func ContainerWrite(ctx formatter.Context, containers []api.ContainerSummary) error {
render := func(format func(subContext formatter.SubContext) error) error {
for _, container := range containers {
err := format(&ContainerContext{trunc: ctx.Trunc, c: container})
if err != nil {
return err
}
}
return nil
}
return ctx.Write(NewContainerContext(), render)
}
// ContainerContext is a struct used for rendering a list of containers in a Go template.
type ContainerContext struct {
formatter.HeaderContext
trunc bool
c api.ContainerSummary
// FieldsUsed is used in the pre-processing step to detect which fields are
// used in the template. It's currently only used to detect use of the .Size
// field which (if used) automatically sets the '--size' option when making
// the API call.
FieldsUsed map[string]interface{}
}
// NewContainerContext creates a new context for rendering containers
func NewContainerContext() *ContainerContext {
containerCtx := ContainerContext{}
containerCtx.Header = formatter.SubHeaderContext{
"ID": formatter.ContainerIDHeader,
"Name": nameHeader,
"Service": serviceHeader,
"Image": formatter.ImageHeader,
"Command": commandHeader,
"CreatedAt": formatter.CreatedAtHeader,
"RunningFor": runningForHeader,
"Ports": formatter.PortsHeader,
"State": formatter.StateHeader,
"Status": formatter.StatusHeader,
"Size": formatter.SizeHeader,
"Labels": formatter.LabelsHeader,
}
return &containerCtx
}
// MarshalJSON makes ContainerContext implement json.Marshaler
func (c *ContainerContext) MarshalJSON() ([]byte, error) {
return formatter.MarshalJSON(c)
}
// ID returns the container's ID as a string. Depending on the `--no-trunc`
// option being set, the full or truncated ID is returned.
func (c *ContainerContext) ID() string {
if c.trunc {
return stringid.TruncateID(c.c.ID)
}
return c.c.ID
}
func (c *ContainerContext) Name() string {
return c.c.Name
}
// Names returns a comma-separated string of the container's names, with their
// slash (/) prefix stripped. Additional names for the container (related to the
// legacy `--link` feature) are omitted.
func (c *ContainerContext) Names() string {
names := formatter.StripNamePrefix(c.c.Names)
if c.trunc {
for _, name := range names {
if len(strings.Split(name, "/")) == 1 {
names = []string{name}
break
}
}
}
return strings.Join(names, ",")
}
func (c *ContainerContext) Service() string {
return c.c.Service
}
func (c *ContainerContext) Image() string {
return c.c.Image
}
func (c *ContainerContext) Command() string {
command := c.c.Command
if c.trunc {
command = formatter.Ellipsis(command, 20)
}
return strconv.Quote(command)
}
func (c *ContainerContext) CreatedAt() string {
return time.Unix(c.c.Created, 0).String()
}
func (c *ContainerContext) RunningFor() string {
createdAt := time.Unix(c.c.Created, 0)
return units.HumanDuration(time.Now().UTC().Sub(createdAt)) + " ago"
}
func (c *ContainerContext) ExitCode() int {
return c.c.ExitCode
}
func (c *ContainerContext) State() string {
return c.c.State
}
func (c *ContainerContext) Status() string {
return c.c.Status
}
func (c *ContainerContext) Health() string {
return c.c.Health
}
func (c *ContainerContext) Publishers() api.PortPublishers {
return c.c.Publishers
}
func (c *ContainerContext) Ports() string {
var ports []types.Port
for _, publisher := range c.c.Publishers {
ports = append(ports, types.Port{
IP: publisher.URL,
PrivatePort: uint16(publisher.TargetPort),
PublicPort: uint16(publisher.PublishedPort),
Type: publisher.Protocol,
})
}
return formatter.DisplayablePorts(ports)
}
// Labels returns a comma-separated string of labels present on the container.
func (c *ContainerContext) Labels() string {
if c.c.Labels == nil {
return ""
}
var joinLabels []string
for k, v := range c.c.Labels {
joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v))
}
return strings.Join(joinLabels, ",")
}
// Label returns the value of the label with the given name or an empty string
// if the given label does not exist.
func (c *ContainerContext) Label(name string) string {
if c.c.Labels == nil {
return ""
}
return c.c.Labels[name]
}
// Mounts returns a comma-separated string of mount names present on the container.
// If the trunc option is set, names can be truncated (ellipsized).
func (c *ContainerContext) Mounts() string {
var mounts []string
for _, name := range c.c.Mounts {
if c.trunc {
name = formatter.Ellipsis(name, 15)
}
mounts = append(mounts, name)
}
return strings.Join(mounts, ",")
}
// LocalVolumes returns the number of volumes using the "local" volume driver.
func (c *ContainerContext) LocalVolumes() string {
return fmt.Sprintf("%d", c.c.LocalVolumes)
}
// Networks returns a comma-separated string of networks that the container is
// attached to.
func (c *ContainerContext) Networks() string {
return strings.Join(c.c.Networks, ",")
}
// Size returns the container's size and virtual size (e.g. "2B (virtual 21.5MB)")
func (c *ContainerContext) Size() string {
if c.FieldsUsed == nil {
c.FieldsUsed = map[string]interface{}{}
}
c.FieldsUsed["Size"] = struct{}{}
srw := units.HumanSizeWithPrecision(float64(c.c.SizeRw), 3)
sv := units.HumanSizeWithPrecision(float64(c.c.SizeRootFs), 3)
sf := srw
if c.c.SizeRootFs > 0 {
sf = fmt.Sprintf("%s (virtual %s)", srw, sv)
}
return sf
}
|
package main
import (
"fmt"
)
/*
原码:正数是其二进制本身;负数是符号位为1,数值部分取X绝对值的二进制。
反码:正数的反码和原码相同;负数是符号位为1,其它位是原码取反。
补码:正数的补码和原码,反码相同;负数是符号位为1,其它位是原码取反,未位加1。
*/
func main() {
//原码 0000 0011
//反码 0000 0011
//补码 0000 0011
var a int8 = 3
//原码 1000 0010
//反码 1111 1101
//补码 1111 1110
var b int8 = -2
//// 左移和右移 操作的是原码, 结果为乘以2的n次方
//fmt.Println(a << 2) // 0000 1100
//fmt.Println(b << 2) // 1000 1000
//// 右移, 正数的最小值是0, 负数的最小值是-1
//fmt.Println(a >> 2) // 0000 0000
//fmt.Println(b >> 2) // 0000 0000
//位运算符使用的是补码 补码 -> 反码 -> 原码
fmt.Println(a | b) // 1111 1111 -> 1111 1110 -> 1000 0001
fmt.Println(a & b) // 0000 0010 -> 0000 0010 -> 0000 0010
fmt.Println(-1 ^ (-1 << 5))
}
|
// +build !linux,!darwin,!windows
package udwSys
func SetCurrentMaxFileNum(limit uint64) (err error) {
return GetErrPlatformNotSupport()
}
|
package main
import (
"bufio"
"flag"
"io"
"log"
"os"
"time"
)
//TODO: Should mark header tags to avoid fetching those files
//TODO: Could also make Tag -> Array of attributes
var crawlTags = map[string]string{
"link": "href",
"script": "src",
"a": "href",
}
const defaultDuration = 60 * time.Second
const defaultOutputFile = "stdout"
const defaultBufferSize = 10
const defaultNumWorkers = 16
const defaultStackSize = 1024
const defaultWorkerThinkTime = 100 * time.Millisecond
func main() {
durationPointer := flag.Duration("d", defaultDuration, "Cralwing duration in seconds.")
workerThinkTimePointer := flag.Duration("t", defaultWorkerThinkTime, "Default think time between requests.")
outputFilePointer := flag.String("f", defaultOutputFile, "output file | stdout.")
workerBuffSizePointer := flag.Int("b", defaultBufferSize, "Worker input buffer size.")
numWorkersPointer := flag.Int("w", defaultNumWorkers, "Worker input buffer size.")
flag.Parse()
duration := *durationPointer
outputFile := *outputFilePointer
bufferSize := *workerBuffSizePointer
numWorkers := *numWorkersPointer
workerThinkTime := *workerThinkTimePointer
domainNames := flag.Args()
log.Printf("duration: %v, output: %v", duration, outputFile)
log.Printf("Args %v", flag.Args())
//c := newBasicCrawler()
//c := newProducerConsumerCrawler()
c := newNBatchesCrawler()
p := newCheckSubDomainPolicy()
initCheckSubDomainPolicy(p, domainNames)
fe := defaultFetcher(p)
fr := newQueueFrontier(defaultStackSize)
s := newInMemoryURLStore()
sm := newOrderedTreeSitemap()
initOrderedTreeSitemap(sm)
//initBasicCrawler(c, domainNames, fe, p, fr, duration, s, sm)
//initProducerConsumerCrawler(c, domainNames, fe, p, fr, duration, s, sm)
initNBatchesCrawler(c, domainNames, fe, p, fr, duration, s, numWorkers, bufferSize, workerThinkTime, sm)
result, _ := c.Crawl()
var f io.Writer
var out *bufio.Writer
var file os.File
if outputFile == "stdout" {
f = os.Stdout
} else {
file, err := os.Create(outputFile)
if err != nil {
log.Fatal("Error opening file.")
} else {
f = file
}
}
out = bufio.NewWriter(f)
result.printSitemap(out)
if f != nil {
file.Close()
}
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package spanset
import (
"context"
"fmt"
"runtime/debug"
"strings"
"github.com/cockroachdb/cockroach/pkg/keys"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/errors"
)
// SpanAccess records the intended mode of access in a SpanSet.
type SpanAccess int
// Constants for SpanAccess. Higher-valued accesses imply lower-level ones.
const (
SpanReadOnly SpanAccess = iota
SpanReadWrite
NumSpanAccess
)
// String returns a string representation of the SpanAccess.
func (a SpanAccess) String() string {
switch a {
case SpanReadOnly:
return "read"
case SpanReadWrite:
return "write"
default:
panic("unreachable")
}
}
// SpanScope divides access types into local and global keys.
type SpanScope int
// Constants for span scopes.
const (
SpanGlobal SpanScope = iota
SpanLocal
NumSpanScope
)
// String returns a string representation of the SpanScope.
func (a SpanScope) String() string {
switch a {
case SpanGlobal:
return "global"
case SpanLocal:
return "local"
default:
panic("unreachable")
}
}
// Span is used to represent a keyspan accessed by a request at a given
// timestamp. A zero timestamp indicates it's a non-MVCC access.
type Span struct {
roachpb.Span
Timestamp hlc.Timestamp
}
// SpanSet tracks the set of key spans touched by a command, broken into MVCC
// and non-MVCC accesses. The set is divided into subsets for access type
// (read-only or read/write) and key scope (local or global; used to facilitate
// use by the separate local and global latches).
// The Span slice for a particular access and scope contains non-overlapping
// spans in increasing key order after calls to SortAndDedup.
type SpanSet struct {
spans [NumSpanAccess][NumSpanScope][]Span
}
// String prints a string representation of the SpanSet.
func (s *SpanSet) String() string {
var buf strings.Builder
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
for _, cur := range s.GetSpans(sa, ss) {
fmt.Fprintf(&buf, "%s %s: %s at %s\n",
sa, ss, cur.Span.String(), cur.Timestamp.String())
}
}
}
return buf.String()
}
// Len returns the total number of spans tracked across all accesses and scopes.
func (s *SpanSet) Len() int {
var count int
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
count += len(s.GetSpans(sa, ss))
}
}
return count
}
// Empty returns whether the set contains any spans across all accesses and scopes.
func (s *SpanSet) Empty() bool {
return s.Len() == 0
}
// Reserve space for N additional spans.
func (s *SpanSet) Reserve(access SpanAccess, scope SpanScope, n int) {
existing := s.spans[access][scope]
s.spans[access][scope] = make([]Span, len(existing), n+cap(existing))
copy(s.spans[access][scope], existing)
}
// AddNonMVCC adds a non-MVCC span to the span set. This should typically
// local keys.
func (s *SpanSet) AddNonMVCC(access SpanAccess, span roachpb.Span) {
s.AddMVCC(access, span, hlc.Timestamp{})
}
// AddMVCC adds an MVCC span to the span set to be accessed at the given
// timestamp. This should typically be used for MVCC keys, user keys for e.g.
func (s *SpanSet) AddMVCC(access SpanAccess, span roachpb.Span, timestamp hlc.Timestamp) {
scope := SpanGlobal
if keys.IsLocal(span.Key) {
scope = SpanLocal
timestamp = hlc.Timestamp{}
}
s.spans[access][scope] = append(s.spans[access][scope], Span{Span: span, Timestamp: timestamp})
}
// Merge merges all spans in s2 into s. s2 is not modified.
func (s *SpanSet) Merge(s2 *SpanSet) {
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
s.spans[sa][ss] = append(s.spans[sa][ss], s2.spans[sa][ss]...)
}
}
s.SortAndDedup()
}
// SortAndDedup sorts the spans in the SpanSet and removes any duplicates.
func (s *SpanSet) SortAndDedup() {
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
s.spans[sa][ss], _ /* distinct */ = mergeSpans(s.spans[sa][ss])
}
}
}
// GetSpans returns a slice of spans with the given parameters.
func (s *SpanSet) GetSpans(access SpanAccess, scope SpanScope) []Span {
return s.spans[access][scope]
}
// BoundarySpan returns a span containing all the spans with the given params.
func (s *SpanSet) BoundarySpan(scope SpanScope) roachpb.Span {
var boundary roachpb.Span
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for _, cur := range s.GetSpans(sa, scope) {
if !boundary.Valid() {
boundary = cur.Span
continue
}
boundary = boundary.Combine(cur.Span)
}
}
return boundary
}
// Intersects returns true iff the span set denoted by `other` has any
// overlapping spans with `s`, and that those spans overlap in access type. Note
// that timestamps associated with the spans in the spanset are not considered,
// only the span boundaries are checked.
func (s *SpanSet) Intersects(other *SpanSet) bool {
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
otherSpans := other.GetSpans(sa, ss)
for _, span := range otherSpans {
// If access is allowed, we must have an overlap.
if err := s.CheckAllowed(sa, span.Span); err == nil {
return true
}
}
}
}
return false
}
// AssertAllowed calls CheckAllowed and fatals if the access is not allowed.
// Timestamps associated with the spans in the spanset are not considered,
// only the span boundaries are checked.
func (s *SpanSet) AssertAllowed(access SpanAccess, span roachpb.Span) {
if err := s.CheckAllowed(access, span); err != nil {
log.Fatalf(context.TODO(), "%v", err)
}
}
// CheckAllowed returns an error if the access is not allowed over the given
// keyspan based on the collection of spans in the spanset. Timestamps
// associated with the spans in the spanset are not considered, only the span
// boundaries are checked.
//
// If the provided span contains only an (exclusive) EndKey and has a nil
// (inclusive) Key then Key is considered to be the key previous to EndKey,
// i.e. [,b) will be considered [b.Prev(),b).
//
// TODO(irfansharif): This does not currently work for spans that straddle
// across multiple added spans. Specifically a spanset with spans [a-c) and
// [b-d) added under read only and read write access modes respectively would
// fail at checking if read only access over the span [a-d) was requested. This
// is also a problem if the added spans were read only and the spanset wasn't
// already SortAndDedup-ed.
func (s *SpanSet) CheckAllowed(access SpanAccess, span roachpb.Span) error {
return s.checkAllowed(access, span, func(_ SpanAccess, _ Span) bool {
return true
})
}
// CheckAllowedAt is like CheckAllowed, except it returns an error if the access
// is not allowed over the given keyspan at the given timestamp.
func (s *SpanSet) CheckAllowedAt(
access SpanAccess, span roachpb.Span, timestamp hlc.Timestamp,
) error {
mvcc := !timestamp.IsEmpty()
return s.checkAllowed(access, span, func(declAccess SpanAccess, declSpan Span) bool {
declTimestamp := declSpan.Timestamp
if declTimestamp.IsEmpty() {
// When the span is declared as non-MVCC (i.e. with an empty
// timestamp), it's equivalent to a read/write mutex where we
// don't consider access timestamps.
return true
}
switch declAccess {
case SpanReadOnly:
switch access {
case SpanReadOnly:
// Read spans acquired at a specific timestamp only allow reads
// at that timestamp and below. Non-MVCC access is not allowed.
return mvcc && timestamp.LessEq(declTimestamp)
case SpanReadWrite:
// NB: should not get here, see checkAllowed.
panic("unexpected SpanReadWrite access")
default:
panic("unexpected span access")
}
case SpanReadWrite:
switch access {
case SpanReadOnly:
// Write spans acquired at a specific timestamp allow reads at
// any timestamp. Non-MVCC access is not allowed.
return mvcc
case SpanReadWrite:
// Write spans acquired at a specific timestamp allow writes at
// that timestamp of above. Non-MVCC access is not allowed.
return mvcc && declTimestamp.LessEq(timestamp)
default:
panic("unexpected span access")
}
default:
panic("unexpected span access")
}
})
}
func (s *SpanSet) checkAllowed(
access SpanAccess, span roachpb.Span, check func(SpanAccess, Span) bool,
) error {
scope := SpanGlobal
if (span.Key != nil && keys.IsLocal(span.Key)) ||
(span.EndKey != nil && keys.IsLocal(span.EndKey)) {
scope = SpanLocal
}
for ac := access; ac < NumSpanAccess; ac++ {
for _, cur := range s.spans[ac][scope] {
if contains(cur.Span, span) && check(ac, cur) {
return nil
}
}
}
return errors.Errorf("cannot %s undeclared span %s\ndeclared:\n%s\nstack:\n%s", access, span, s, debug.Stack())
}
// contains returns whether s1 contains s2. Unlike Span.Contains, this function
// supports spans with a nil start key and a non-nil end key (e.g. "[nil, c)").
// In this form, s2.Key (inclusive) is considered to be the previous key to
// s2.EndKey (exclusive).
func contains(s1, s2 roachpb.Span) bool {
if s2.Key != nil {
// The common case.
return s1.Contains(s2)
}
// The following is equivalent to:
// s1.Contains(roachpb.Span{Key: s2.EndKey.Prev()})
if s1.EndKey == nil {
return s1.Key.IsPrev(s2.EndKey)
}
return s1.Key.Compare(s2.EndKey) < 0 && s1.EndKey.Compare(s2.EndKey) >= 0
}
// Validate returns an error if any spans that have been added to the set
// are invalid.
func (s *SpanSet) Validate() error {
for sa := SpanAccess(0); sa < NumSpanAccess; sa++ {
for ss := SpanScope(0); ss < NumSpanScope; ss++ {
for _, cur := range s.GetSpans(sa, ss) {
if len(cur.EndKey) > 0 && cur.Key.Compare(cur.EndKey) >= 0 {
return errors.Errorf("inverted span %s %s", cur.Key, cur.EndKey)
}
}
}
}
return nil
}
|
package presigner
import (
"context"
"time"
"github.com/Cloud-Foundations/golib/pkg/awsutil/presignauth"
"github.com/Cloud-Foundations/golib/pkg/log/nulllogger"
"github.com/aws/aws-sdk-go-v2/aws/arn"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/sts"
)
const (
presignedUrlLifetime = 15*time.Minute - 7*time.Second
)
func newPresigner(params Params) (*presignerT, error) {
ctx := context.Background()
if params.Logger == nil {
params.Logger = nulllogger.New()
}
if params.StsClient == nil {
if params.AwsConfig == nil {
awsConfig, err := config.LoadDefaultConfig(ctx,
config.WithEC2IMDSRegion())
if err != nil {
return nil, err
}
params.AwsConfig = &awsConfig
}
params.StsClient = sts.NewFromConfig(*params.AwsConfig)
}
if params.StsPresignClient == nil {
params.StsPresignClient = sts.NewPresignClient(params.StsClient)
}
idOutput, err := params.StsClient.GetCallerIdentity(ctx,
&sts.GetCallerIdentityInput{})
if err != nil {
return nil, err
}
parsedArn, err := arn.Parse(*idOutput.Arn)
if err != nil {
return nil, err
}
normalisedArn, err := presignauth.NormaliseARN(parsedArn)
if err != nil {
return nil, err
}
callerArn := normalisedArn.String()
params.Logger.Debugf(0,
"Account: %s, RawARN: %s, NormalisedARN: %s, UserId: %s\n",
*idOutput.Account, *idOutput.Arn, callerArn, *idOutput.UserId)
presigner := &presignerT{
params: params,
callerArn: normalisedArn,
}
if params.RefreshPolicy == RefreshAutomatically {
go presigner.refreshLoop(ctx)
}
return presigner, nil
}
func (p *presignerT) presignGetCallerIdentity(ctx context.Context) (
*v4.PresignedHTTPRequest, error) {
if ctx == nil {
ctx = context.TODO()
}
p.mutex.Lock()
defer p.mutex.Unlock()
if p.presignedRequest != nil {
if time.Until(p.presignedExpiration) > 0 {
return p.presignedRequest, nil
}
p.presignedRequest = nil
}
presignedReq, err := p.params.StsPresignClient.PresignGetCallerIdentity(ctx,
&sts.GetCallerIdentityInput{})
if err != nil {
return nil, err
}
p.presignedExpiration = time.Now().Add(presignedUrlLifetime)
p.presignedRequest = presignedReq
p.params.Logger.Debugf(2, "presigned URL: %v\n", presignedReq.URL)
return presignedReq, nil
}
func (p *presignerT) refreshLoop(ctx context.Context) {
for ; ; time.Sleep(presignedUrlLifetime) {
if _, err := p.presignGetCallerIdentity(ctx); err != nil {
p.params.Logger.Println(err)
}
}
}
|
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package useragent_test
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/useragent"
)
func TestEncodeEntries(t *testing.T) {
// invalid product
_, err := useragent.EncodeEntries([]useragent.Entry{
{"Prod)uct", "", ""},
})
require.Error(t, err)
// invalid version
_, err = useragent.EncodeEntries([]useragent.Entry{
{"Product", "Vers(ion", ""},
})
require.Error(t, err)
type test struct {
in []useragent.Entry
exp string
}
var tests = []test{{
in: []useragent.Entry{
{"Mozilla", "", ""},
},
exp: `Mozilla`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
},
exp: `Mozilla/5.0`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", "Linux; U; Android 4.4.3;"},
},
exp: `Mozilla/5.0 (Linux; U; Android 4.4.3;)`,
}, {
in: []useragent.Entry{
{"Mozilla", "", "Linux; U; Android 4.4.3;"},
},
exp: `Mozilla (Linux; U; Android 4.4.3;)`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
{"", "", "Linux; U; Android 4.4.3;"},
},
exp: `Mozilla/5.0 (Linux; U; Android 4.4.3;)`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
{"", "", "Linux; (U); Android 4.4.3;"},
},
exp: `Mozilla/5.0 (Linux; (U); Android 4.4.3;)`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
{"", "", "Linux; U; Android 4.4.3;"},
{"Mobile", "", ""},
},
exp: `Mozilla/5.0 (Linux; U; Android 4.4.3;) Mobile`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
{"", "", "Linux; U; Android 4.4.3;"},
{"Mobile", "", ""},
{"Safari", "534.30", ""},
},
exp: `Mozilla/5.0 (Linux; U; Android 4.4.3;) Mobile Safari/534.30`,
}, {
in: []useragent.Entry{
{"storj.io-uplink", "v0.0.1", ""},
},
exp: `storj.io-uplink/v0.0.1`,
}, {
in: []useragent.Entry{
{"storj.io-uplink", "v0.0.1", ""},
{"storj.io-drpc", "v5.0.0+123+123", ""},
},
exp: `storj.io-uplink/v0.0.1 storj.io-drpc/v5.0.0+123+123`,
}, {
in: []useragent.Entry{
{"Mozilla", "5.0", ""},
{"", "", "Linux; U; Android 4.4.3;"},
{"AppleWebkit", "534.30", ""},
{"", "", "KHTML, like Gecko"},
{"Version", "4.0", ""},
{"Mobile", "", ""},
{"Safari", "534.30", ""},
{"Opera", "", ""},
{"News", "1.0", ""},
},
exp: `Mozilla/5.0 (Linux; U; Android 4.4.3;) AppleWebkit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30 Opera News/1.0`,
}, {
in: []useragent.Entry{
{"Blocknify", "", ""},
{"Uplink", "1.4.6-0.20210201122710-48b82ce14a37", ""},
},
exp: `Blocknify Uplink/1.4.6-0.20210201122710-48b82ce14a37`,
}}
for _, test := range tests {
encoded, err := useragent.EncodeEntries(test.in)
require.NoError(t, err)
assert.Equal(t, test.exp, string(encoded))
}
}
|
package installer
import (
"fmt"
"io/fs"
"io/ioutil"
"net/http"
"os"
"path"
"github.com/bernardolm/iot/supervisor-go/config"
"github.com/gosimple/slug"
log "github.com/sirupsen/logrus"
)
const (
permission fs.FileMode = 0744
)
func Install(p config.Program) (string, error) {
pPath := "./bin"
if _, err := os.Stat(pPath); os.IsNotExist(err) {
err := os.Mkdir(pPath, permission)
if err != nil {
fmt.Printf("%#v", err)
return "", err
}
}
pPath = path.Join(pPath, slug.Make(p.Command))
log.Debugf("installing in %s\n", pPath)
out, err := os.Create(pPath)
if err != nil {
return "", err
}
defer out.Close()
resp, err := http.Get(p.URL)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
err = out.Truncate(0)
if err != nil {
return "", err
}
err = ioutil.WriteFile(pPath, body, permission)
if err != nil {
return "", err
}
return pPath, nil
}
|
package openrtb
// SeatBid type encapsulates a set of bids submitted on behalf of a buyer, or a bidder seat, via
// the containing bid response object.
// See OpenRTB 2.3.1 Sec 4.2.2.
//go:generate easyjson $GOFILE
//easyjson:json
type SeatBid struct {
Bids []*Bid `json:"bid,omitempty"`
Seat string `json:"seat,omitempty"`
Group int `json:"group,omitempty"`
}
// Validate method validates a seatbid object.
func (b SeatBid) Validate() error {
for _, bid := range b.Bids {
if err := bid.Validate(); err != nil {
return err
}
}
return nil
}
// Copy returns a pointer to a copy of the seatbid object.
func (b *SeatBid) Copy() *SeatBid {
bCopy := *b
bCopy.Bids = []*Bid{}
for _, bid := range b.Bids {
bidCopy := bid.Copy()
bCopy.Bids = append(bCopy.Bids, bidCopy)
}
return &bCopy
}
|
package cmd
import (
"fmt"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/fanaticscripter/EggContractor/db"
"github.com/fanaticscripter/EggContractor/util"
)
const _peekedThreshold = 7 * 24 * time.Hour
var _peekedCommand = &cobra.Command{
Use: "peeked [<contract-id>]",
Short: "Print list of recently peeked coops",
Long: `Print list of recently peeked coops
Recently currently means "within one week". If a contract-id is specified,
only coops for that contract is listed.
Note that the /peeked/ page of the web client is much richer in information.`,
Args: cobra.MaximumNArgs(1),
PreRunE: subcommandPreRunE,
RunE: func(cmd *cobra.Command, args []string) error {
selectedContractId := ""
if len(args) > 0 {
selectedContractId = strings.ToLower(args[0])
}
contractIds, groups, err := db.GetPeekedGroupedByContract(time.Now().Add(-_peekedThreshold))
if err != nil {
return err
}
if selectedContractId != "" {
_, exists := groups[selectedContractId]
if !exists {
log.Warnf("You haven't peeked any coop for contract %s recently.", selectedContractId)
return nil
}
contractIds = []string{selectedContractId}
}
table := [][]string{
{"Contract ID", "Code", "Spots", "Laid", "Rate/Required", "Time left"},
{"-----------", "----", "-----", "----", "-------------", "---------"},
}
for groupIdx, contractId := range contractIds {
for i, p := range groups[contractId] {
contractIdField := p.ContractId
if i != 0 {
// Since rows are grouped by contract, we can merge the Contract ID cells.
contractIdField = ""
}
spotsField := fmt.Sprintf("%d", p.Openings)
laidField := util.Numfmt(p.EggsLaid)
rateVsRequiredField := util.Numfmt(p.EggsPerHour)
if p.RequiredEggsPerHour != 0 {
rateVsRequiredField = fmt.Sprintf("%s / %s",
util.Numfmt(p.EggsPerHour), util.Numfmt(p.RequiredEggsPerHour))
}
timeLeftField := util.FormatDurationNonNegative(p.TimeLeft)
table = append(table, []string{
contractIdField,
p.Code,
spotsField,
laidField,
rateVsRequiredField,
timeLeftField,
})
}
if groupIdx != len(contractIds)-1 {
table = append(table, []string{
"-----------", "----", "-----", "----", "-------------", "---------",
})
}
}
util.PrintTable(table)
fmt.Println()
log.Warn("the /peeked/ page of the web client is much richer in information")
return nil
},
}
func init() {
_rootCmd.AddCommand(_peekedCommand)
}
|
// Copyright 2014 mqant Author. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gate
import (
"github.com/liangdas/mqant/log"
"github.com/liangdas/mqant/network"
"bufio"
"runtime"
"fmt"
"strings"
"encoding/json"
"github.com/liangdas/mqant/conf"
"github.com/liangdas/mqant/utils/uuid"
"github.com/liangdas/mqant/gate/mqtt"
"time"
)
type resultInfo struct {
Error string //错误结果 如果为nil表示请求正确
Result interface{} //结果
}
type agent struct {
Agent
session *Session
conn network.Conn
r *bufio.Reader
w *bufio.Writer
gate *Gate
client *mqtt.Client
isclose bool
last_storage_heartbeat_data_time int64 //上一次发送存储心跳时间
}
func (a *agent) IsClosed()(bool){
return a.isclose
}
func (a *agent) GetSession()(*Session){
return a.session
}
func (a *agent) Run() (err error){
defer func() {
if err := recover(); err != nil {
buff := make([]byte, 4096)
runtime.Stack(buff, false)
log.Error("conn.serve() panic(%v)\n info:%s", err, string(buff))
}
a.Close()
}()
//握手协议
var pack *mqtt.Pack
pack, err = mqtt.ReadPack(a.r)
if err != nil {
log.Error("Read login pack error",err)
return
}
if pack.GetType() != mqtt.CONNECT {
log.Error("Recive login pack's type error:%v \n", pack.GetType())
return
}
info, ok := (pack.GetVariable()).(*mqtt.Connect)
if !ok {
log.Error("It's not a mqtt connection package.")
return
}
//id := info.GetUserName()
//psw := info.GetPassword()
//log.Debug("Read login pack %s %s %s %s",*id,*psw,info.GetProtocol(),info.GetVersion())
c := mqtt.NewClient(conf.Conf.Mqtt,a,a.r, a.w, a.conn,info.GetKeepAlive())
a.client=c
a.session=NewSession(a.gate.App,map[string]interface{}{
"Sessionid" :Get_uuid(),
"Serverid" :a.gate.GetServerId(),
"Settings" :make(map[string]interface{}),
})
a.gate.agentLearner.Connect(a) //发送连接成功的事件
//回复客户端 CONNECT
err = mqtt.WritePack(mqtt.GetConnAckPack(0), a.w)
if err != nil {
return
}
c.Listen_loop() //开始监听,直到连接中断
return nil
}
func (a *agent) OnClose() error{
a.isclose=true
a.gate.agentLearner.DisConnect(a) //发送连接断开的事件
return nil
}
func (a *agent) OnRecover(pack *mqtt.Pack){
defer func() {
if r := recover(); r != nil {
log.Error("Gate OnRecover error [%s]",r)
}
}()
toResult:= func(a *agent,Topic string,Result interface{},Error string) (err error){
r:=&resultInfo{
Error:Error,
Result:Result,
}
b,err:=json.Marshal(r)
if err==nil{
a.WriteMsg(Topic,b)
}
return
}
//路由服务
switch pack.GetType() {
case mqtt.PUBLISH:
pub := pack.GetVariable().(*mqtt.Publish)
topics:=strings.Split(*pub.GetTopic(),"/")
var msgid string
if len(topics)<2{
log.Error("Topic must be [serverType]/[handler]|[serverType]/[handler]/[msgid]")
return
}else if len(topics)==3{
msgid=topics[2]
}
var obj interface{} // var obj map[string]interface{}
err:=json.Unmarshal(pub.GetMsg(), &obj)
if err!=nil{
if msgid!=""{
toResult(a,*pub.GetTopic(),nil,"body must be JSON format")
}
return
}
serverSession,err:=a.gate.GetRouteServersByType(topics[0])
if err!=nil{
if msgid!=""{
toResult(a,*pub.GetTopic(),nil,fmt.Sprintf("Service(type:%s) not found",topics[0]))
}
return
}
startsWith := strings.HasPrefix(topics[1], "HD_")
if !startsWith{
if msgid!=""{
toResult(a,*pub.GetTopic(),nil,fmt.Sprintf("Method(%s) must begin with 'HD_'",topics[1]))
}
return
}
result,e:=serverSession.Call(topics[1],a.GetSession().ExportMap(),obj)
toResult(a,*pub.GetTopic(),result,e)
if a.GetSession().Userid!=""{
//这个链接已经绑定Userid
interval:=time.Now().UnixNano()/1000000/1000-a.last_storage_heartbeat_data_time //单位秒
if interval>a.gate.MinStorageHeartbeat{
//如果用户信息存储心跳包的时长已经大于一秒
if a.gate.storage!=nil{
a.gate.storage.Heartbeat(a.GetSession().Userid)
a.last_storage_heartbeat_data_time=time.Now().UnixNano()/1000000/1000
}
}
}
case mqtt.PINGREQ:
//客户端发送的心跳包
if a.GetSession().Userid!=""{
//这个链接已经绑定Userid
interval:=time.Now().UnixNano()/1000000/1000-a.last_storage_heartbeat_data_time //单位秒
if interval>a.gate.MinStorageHeartbeat{
//如果用户信息存储心跳包的时长已经大于60秒
if a.gate.storage!=nil{
a.gate.storage.Heartbeat(a.GetSession().Userid)
a.last_storage_heartbeat_data_time=time.Now().UnixNano()/1000000/1000
}
}
}
}
}
func (a *agent) WriteMsg(topic string,body []byte) error{
return a.client.WriteMsg(topic,body)
}
func (a *agent) Close() {
a.conn.Close()
}
func (a *agent) Destroy() {
a.conn.Destroy()
}
func Get_uuid() string {
return uuid.Rand().Hex()
}
|
// Routing based on the gorilla/mux router
package gorilla
import (
"crypto/sha1"
"fmt"
"html/template"
"io"
"log"
"net/http"
"os"
"path/filepath"
//"strconv"
"strings"
//"sort"
"time"
"github.com/gorilla/mux"
)
import (
"github.com/upper/db/v4"
"github.com/upper/db/v4/adapter/cockroachdb"
)
var Serve http.Handler
//var partnoinput string
var partno Product
var category string
var products []Product
func init() {
// /* cockroachdb stuff using upper/db database access layer */ //
fmt.Printf("Initializing cockroachDB connection\n")
sess, err := cockroachdb.Open(settings) //establish the session
if err != nil {
log.Fatal("cockroachdb.Open: ", err)
}
defer sess.Close()
//test actions on database
createTables(sess)
deleteAll(sess)
createTestProd(sess)
// Find().All() maps all the records from the products collection.
productsCol := Products(sess)
products = []Product{}
err = productsCol.Find().All(&products)
if err != nil {
log.Fatal("productsCol.Find: ", err)
}
//router := NewRouter()
r := mux.NewRouter()
r.PathPrefix("/gallery/").Handler(http.StripPrefix("/gallery/", http.FileServer(http.Dir("./gallery"))))
r.HandleFunc("/", timeFunc).Methods("GET")
r.HandleFunc("/time", timeFunc).Methods("GET")
// r.HandleFunc("/gallery", galleryFunc).Methods("GET")
r.HandleFunc("/products", findProducts).Methods("GET")
r.HandleFunc("/product/{slug}", findProduct).Methods("GET")
//r.HandleFunc("/gallery/{slug}", ).Methods("GET")
Serve = r
}
// /* timepage */ //
func monthDayYear(t time.Time) string {
return t.Format("January 2, 2006 15:04:05")
}
func timeFunc(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
var fm = template.FuncMap{
"fdateMDY": monthDayYear,
}
tp1 := template.Must(template.New("").Funcs(fm).ParseFiles("time.gohtml"))
if err := tp1.ExecuteTemplate(w, "time.gohtml", time.Now()); err != nil {
log.Fatalln(err)
}
}
// /* products page */ //
func findProducts(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html")
tpl0 := template.Must(template.New("").ParseFiles("products.gohtml"))
tpl0.ExecuteTemplate(w, "products.gohtml", products)
//fmt.Fprint(w, "products\n")
}
// /* individual product page */ //
func findProduct(w http.ResponseWriter, r *http.Request) { //, product string
slug := mux.Vars(r)["slug"]
for i := range products {
if products[i].PartNo == slug {
partno = products[i]
break
// Found!
}
}
if partno.Name == "" {
fmt.Fprint(w, "No product found for partno:\n", slug)
} else {
w.Header().Set("Content-Type", "text/html")
tpl1 := template.Must(template.New("").ParseFiles("product.gohtml"))
fmt.Fprint(w, "products\n", partno.Name)
tpl1.ExecuteTemplate(w, "product.gohtml", partno)
}
}
// /* Images */ //
/*
var tpl *template.Template
func init() {
tpl = template.Must(template.ParseGlob("*.gohtml"))
}
*/
/*
func galleryFunc(w http.ResponseWriter, req *http.Request, filetoserve ) {
}
*/
func index(w http.ResponseWriter, req *http.Request) {
if req.Method == http.MethodPost {
mf, fh, err := req.FormFile("nf")
if err != nil {
fmt.Println(err)
}
defer mf.Close()
// create sha for file name
ext := strings.Split(fh.Filename, ".")[1]
h := sha1.New()
io.Copy(h, mf)
fname := fmt.Sprintf("%x", h.Sum(nil)) + "." + ext
// create new file
wd, err := os.Getwd()
if err != nil {
fmt.Println(err)
}
path := filepath.Join(wd, "gallery", fname)
nf, err := os.Create(path)
if err != nil {
fmt.Println(err)
}
defer nf.Close()
mf.Seek(0, 0)
io.Copy(nf, mf)
}
file, err := os.Open("gallery")
if err != nil {
log.Fatalf("failed opening directory: %s", err)
}
defer file.Close()
tpl := template.Must(template.ParseGlob("index.gohtml"))
list, _ := file.Readdirnames(0) // 0 to read all files and folders
tpl.ExecuteTemplate(w, "index.gohtml", list)
}
/*
*/
/*
func main() {
router := NewRouter()
if err := http.ListenAndServe(":8080", router); err != nil {
log.Fatal("ListenAndServe Error: ", err)
}
}
*/
func NewRouter() *mux.Router {
router := mux.NewRouter().StrictSlash(true)
// Choose the folder to serve
staticDir := "/static/"
// Create the route
router.
PathPrefix(staticDir).
Handler(http.StripPrefix(staticDir, http.FileServer(http.Dir("."+staticDir))))
return router
}
//func handleGallery(w http.ResponseWriter, r *http.Request) {
// http.Handle("/gallery/", http.StripPrefix("/gallery", http.FileServer(http.Dir("./gallery"))))
//}
// /* database stuff */ //
var err error
//var sitedata basedata.
var settings = cockroachdb.ConnectionURL{
Host: "localhost",
Database: "product",
User: "madmin",
Options: map[string]string{
// Secure node.
"sslrootcert": "certs/ca.crt",
"sslkey": "certs/client.madmin.key",
"sslcert": "certs/client.madmin.crt",
},
}
// Products is a handy way to represent a collection.
func Products(sess db.Session) db.Store {
return sess.Collection("products")
}
// Product is used to represent a single record in the "products" table.
type Product struct {
ID uint64 `db:"ID,omitempty" json:"ID,omitempty"`
Image1 *string `db:"image1,omitempty" json:"image1,omitempty"`
Image2 *string `db:"image2,omitempty" json:"image2,omitempty"`
Image3 *string `db:"image3,omitempty" json:"image3,omitempty"`
Thumb *string `db:"thumb,omitempty" json:"thumb,omitempty"`
Name string `db:"name" json:"name"`
PartNo string `db:"partno" json:"partno"`
MfgPartNo *string `db:"mfgpartno,omitempty" json:"mfgpartno,omitempty"`
MfgName *string `db:"mfgname,omitempty" json:"mfgname,omitempty"`
Qty int64 `db:"quantity" json:"quantity"`
UnlimitQty *bool `db:"unlimitqty,omitempty" json:"unlimitqty,omitempty"`
Enable *bool `db:"enable,omitempty" json:"enable,omitempty"`
Price float64 `db:"price" json:"price"`
Msrp *float64 `db:"msrp,omitempty" json:"msrp,omitempty"`
Cost *float64 `db:"cost,omitempty" json:"cost,omitempty"`
//Sold int64 `db:"soldqty,omitempty" json:"soldqty,omitempty"`
MinOrder *int64 `db:"minorder" json:"minorder"`
MaxOrder *int64 `db:"maxorder,omitempty" json:"maxorder,omitempty"`
Location *string `db:"location" json:"location"`
Category *string `db:"category" json:"category"`
Type *string `db:"type,omitempty" json:"type,omitempty"`
PackageType *string `db:"packagetype,omitempty" json:"packagetype,omitempty"`
Technology *string `db:"technology,omitempty" json:"technology,omitempty"`
Value *float64 `db:"value,omitempty" json:"value,omitempty"`
ValUnit *string `db:"valunit,omitempty" json:"valunit,omitempty"`
VoltsRating *float64 `db:"voltsrating,omitempty" json:"voltsrating,omitempty"`
AmpsRating *float64 `db:"ampsrating,omitempty" json:"ampsrating,omitempty"`
WattsRating *float64 `db:"wattsrating,omitempty" json:"wattsrating,omitempty"`
Description1 *string `db:"description1,omitempty" json:"description1,omitempty"`
Description2 *string `db:"description2,omitempty" json:"description2,omitempty"`
Color1 *string `db:"color1,omitempty" json:"color1,omitempty"`
Color2 *string `db:"color2,omitempty" json:"color2,omitempty"`
Sourceinfo *string `db:"sourceinfo,omitempty" json:"sourceinfo,omitempty"`
Datasheet *string `db:"datasheet,omitempty" json:"datasheet,omitempty"`
Docs *string `db:"docs,omitempty" json:"docs,omitempty"`
Reference *string `db:"reference,omitempty" json:"reference,omitempty"`
Attributes *string `db:"attributes,omitempty" json:"attributes,omitempty"`
Condition *string `db:"condition,omitempty" json:"condition,omitempty"`
Note *string `db:"note,omitempty" json:"note,omitempty"`
Warning *string `db:"warning,omitempty" json:"warning,omitempty"`
Length *float64 `db:"length,omitempty,omitempty" json:"length,omitempty"`
Width *float64 `db:"width,omitempty" json:"width,omitempty"`
Height *float64 `db:"height,omitempty" json:"height,omitempty"`
WeightLb *float64 `db:"weightlb,omitempty" json:"weightlb,omitempty"`
WeightOz *float64 `db:"weightoz,omitempty" json:"weightoz,omitempty"`
MetaTitle *string `db:"metatitle,omitempty" json:"metatitle,omitempty"`
MetaDesc *string `db:"metadesc,omitempty" json:"metadesc,omitempty"`
MetaKeywords *string `db:"metakeywords,omitempty" json:"metakeywords,omitempty"`
//todo: add extra control fields
}
type Productss []Product
//var Collection1 string
// Collection is required in order to create a relation between the Product
// struct and the "products" table.
func (a *Product) Store(sess db.Session) db.Store {
return Products(sess)
}
// createTables creates all the tables that are neccessary to run this example.
func createTables(sess db.Session) error {
fmt.Printf("Creating 'products' table\n")
_, err := sess.SQL().Exec(`
CREATE TABLE IF NOT EXISTS products (
ID SERIAL PRIMARY KEY,
image1 STRING,
image2 STRING,
image3 STRING,
thumb STRING,
name STRING,
partno STRING,
mfgpartno STRING,
mfgname STRING,
quantity INT,
unlimitqty BOOL,
enable BOOL,
price FLOAT,
msrp FLOAT,
cost FLOAT,
minorder INT,
maxorder INT,
location STRING,
category STRING,
type STRING,
packagetype STRING,
technology STRING,
value FLOAT,
valunit STRING,
voltsrating FLOAT,
ampsrating FLOAT,
wattsrating FLOAT,
description1 STRING,
description2 STRING,
color1 STRING,
color2 STRING,
sourceinfo STRING,
datasheet STRING,
docs STRING,
reference STRING,
attributes STRING,
condition STRING,
note STRING,
warning STRING,
length FLOAT,
width FLOAT,
height FLOAT,
weightlb FLOAT,
weightoz FLOAT,
metatitle STRING,
metadesc STRING,
metakeywords STRING
)
`)
if err != nil {
return err
}
return nil
}
///* database test stuff *///
func deleteAll(sess db.Session) {
fmt.Printf("Clearing tables\n")
//clear tables ; testing
err := Products(sess).Truncate()
if err != nil {
log.Fatal("Truncate: ", err)
}
}
var desc string
var descp *string
var img string
var imgp *string
func createTestProd(sess db.Session) {
fmt.Printf("Creating test product 'dummy'\n")
desc = "test entry to database"
descp = &desc
img := "gallery/test.jpg"
imgp := &img
product1 := Product{Name: "dummy", PartNo:"test", Description1:descp, Price:1.00, Image1:imgp, Qty:10}
err := Products(sess).InsertReturning(&product1)
if err != nil {
log.Fatal("sess.Save: ", err)
}
fmt.Printf("Creating second test product 'dummy2'\n")
product1 = Product{Name: "dummy2", PartNo:"test1", Description1:descp, Price:1.00}
err = Products(sess).InsertReturning(&product1)
if err != nil {
log.Fatal("sess.Save: ", err)
}
}
|
package server
import (
"github.com/yacen/gong/context"
"net/http"
)
type Server struct {
server *http.Server
middlewares []Middleware
}
type MFun func(ctx *context.Context, next MFun)
func (s *Server) Use(f MiddlewareFunc) *Server {
s.middlewares = append(s.middlewares, &FunctionMiddleware{Fn: f})
return s
}
func (s *Server) Listen() error {
s.server.Handler = &serverHandler{middlewares: s.middlewares}
return s.server.ListenAndServe()
}
|
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"kto/rpcclient"
"kto/rpcclient/message"
"kto/transaction"
"kto/types"
"kto/until"
"log"
"os"
"runtime"
"sync"
"time"
"github.com/BurntSushi/toml"
"google.golang.org/grpc"
)
const (
coinbaseAddr = "Kto9sFhbjDdjEHvcdH6n9dtQws1m4ptsAWAy7DhqGdrUFai"
coinbasePriv = "5BWVgtMPPUPFuCHssYhXxFx2xVfQRTkB1EjKHKu1B1KxdVxD5cswDEdqiko3PjUPFPGfePoKxdfzHvv4YXRCYNp2"
)
type Client struct {
client message.GreeterClient
ctx context.Context
addressList []userInfo
}
func main() {
addr := flag.String("a", "106.12.9.134:8544", "host and post")
num := flag.Int("n", 100, "每秒的次数")
sleeptime := flag.Uint("t", 2, "sleep time")
symbol := flag.String("symbol", "otk", "代币名称")
addrFileName := flag.String("addrfile", "", "address json file")
flag.Parse()
fmt.Printf("\taddress\t%s\n", *addr)
fmt.Printf("\tnum\t%d\n", *num)
fmt.Printf("\tsymbol\t%s\n", *symbol)
fmt.Printf("\taddrfile\t%s\n", *addrFileName)
fmt.Printf("\tsleeptime\t%d\n", *sleeptime)
runtime.GOMAXPROCS(runtime.NumCPU())
//loadCfg()
client, err := newClient(*addr, *addrFileName)
if err != nil {
rpcclient.Error("fail to new client:", err)
return
}
/*离线交易*/
err = client.sendSignedTx()
fmt.Println(err)
//client.createPriv()
/* 代币 */
// balance := client.getTokenBalance("Kto9sFhbjDdjEHvcdH6n9dtQws1m4ptsAWAy7DhqGdrUFai", *symbol)
// fmt.Printf("balance:%d\n", balance)
// tokenTotal := client.allTokenBalance(*symbol)
// fmt.Printf("%s的总量为:%d\n", *symbol, tokenTotal)
// _, _, err = client.mintToken(coinbaseAddr, coinbasePriv, *symbol, 100000, 1000000000000000, 500001)
// if err != nil {
// rpcclient.Error("client.mintToken failed", err)
// return
// }
// time.Sleep(2 * time.Second)
// err = client.tokenCycleTransaction(*symbol, 500001, *num)
// if err != nil {
// rpcclient.Error("client.tokenCycleTransaction failed", err)
// return
// }
//测试大量的qtj转账
// var total int
// if *num <= 100 {
// total = *num * 5
// } else {
// total = *num * 2
// }
// fmt.Printf("地址:%s,账户个数:%d,次数:%d,睡眠时间:%d\n", *addr, total, *num, *sleeptime)
// befor := time.Now()
// client.fastTx(total)
// fmt.Println(time.Now().Sub(befor))
// client.sendto100(*num, *sleeptime)
//获取所有地址的总余额
//client.getbalance()
// if err := QTJTEST(ctx, client); err != nil {
// rpcclient.Error(err)
// }
//KtoFT6eP7iLRTBiW7vjzUfvrj8VdxvniVnUnPW4nvzcXicM",
//"priv":"2W3SQHGqGrc5ikBXSKxHm4JbHEWFNFWy6TRpmXPEYpH4CMw41ktjPRfWERChMmJ75hgjpN6Q57iM922Tq2Xwez4m"
// ===============================================================
// reqData := &message.ReqTransaction{
// From: "Kto9sFhbjDdjEHvcdH6n9dtQws1m4ptsAWAy7DhqGdrUFai",
// To: "KtoFT6eP7iLRTBiW7vjzUfvrj8VdxvniVnUnPW4nvzcXicM",
// Amount: 100000000000000000,
// Nonce: nonce.Nonce,
// Priv: "5BWVgtMPPUPFuCHssYhXxFx2xVfQRTkB1EjKHKu1B1KxdVxD5cswDEdqiko3PjUPFPGfePoKxdfzHvv4YXRCYNp2",
// }
// respData, err := client.SendTransaction(ctx, reqData)
// if err != nil {
// fmt.Println("err")
// return
// }
// sendEachOther(ctx, client)
// keyPairTest()
//getBalance(ctx, client)
//largeSendTx(ctx, client)
//createKerPair(ctx, client)
}
var config struct {
AddrMap map[string]struct {
Addr string
Private string
}
}
func loadCfg() {
_, err := toml.DecodeFile("./output/rpctest.toml", &config)
if err != nil {
rpcclient.Error(err)
os.Exit(-1)
}
rpcclient.Debug(config)
rpcclient.Debug(config.AddrMap["0"].Addr)
}
type Info struct {
address string
priv string
}
type userInfo struct {
Addr string `json:"addr"`
Priv string `json:"priv"`
Nonce uint64
}
// 创建地址测试
func (c *Client) createPriv() {
fmt.Println("--------------createPriv--------------")
var i int
for {
i++
addrResp, err := c.client.CreateAddr(context.Background(), &message.ReqCreateAddr{})
if err != nil {
fmt.Printf("c.client.CreateAddr failed i:%d,error:%v\n", i, err)
return
}
fmt.Printf("\ri:%d,address:%s", i, addrResp.Address)
time.Sleep(100 * time.Millisecond)
}
}
func (c *Client) sendSignedTx() error {
wallet := transaction.NewWallet()
fmt.Printf("to:%s,len:%d\n", wallet.Address, len(wallet.Address))
nonceResp, err := c.client.GetAddressNonceAt(context.Background(), &message.ReqNonce{Address: coinbaseAddr})
if err != nil {
return err
}
tx := transaction.Transaction{
From: types.BytesToAddress([]byte(coinbaseAddr)),
To: types.BytesToAddress([]byte(wallet.Address)),
Amount: uint64(10000000000),
Nonce: nonceResp.Nonce,
Time: time.Now().Unix(),
}
tx.HashTransaction()
privateKey := until.Decode(coinbasePriv)
tx.Sgin(privateKey)
req := &message.ReqSignedTransaction{
From: coinbaseAddr,
To: wallet.Address,
Amount: tx.Amount,
Nonce: tx.Nonce,
Time: tx.Time,
Hash: tx.Hash,
Signature: tx.Signature,
}
signedTxResp, err := c.client.SendSignedTransaction(context.Background(), req)
if err != nil {
return err
}
fmt.Println(signedTxResp.Hash)
return nil
}
// 获取TokenAddress.josn中代币额
func (c *Client) allTokenBalance(symbol string) uint64 {
fmt.Println("-------------allTokenBalance--------------")
if c.addressList == nil || len(c.addressList) == 0 {
rpcclient.Error("address list error")
return 0
}
var allBlance uint64
for _, address := range c.addressList {
balance := c.getTokenBalance(address.Addr, symbol)
if balance == 0 {
fmt.Printf("address:%s,balance:%d", address.Addr, balance)
}
allBlance += balance
}
return allBlance
}
//获取token余额
func (c *Client) getTokenBalance(addr, symbol string) uint64 {
fmt.Println("----------------getTokenBalance-------------")
balanceResp, err := c.client.GetBalanceToken(context.Background(),
&message.ReqTokenBalance{Address: addr, Symbol: symbol})
if err != nil {
fmt.Println(err)
return 0
}
return balanceResp.Balnce
}
// 创建代币
func (c *Client) mintToken(from, priv, symbol string, total, amount, fee uint64) (addr, topriv string, err error) {
addressList := createAddress(1)
var req = &message.ReqTokenCreate{
From: from,
Priv: priv,
To: addressList[0].Addr,
Amount: amount,
Symbol: symbol,
Total: total,
Fee: fee,
}
nonceResp, err := c.client.GetAddressNonceAt(context.Background(), &message.ReqNonce{Address: req.From})
if err != nil {
return "", "", err
}
req.Nonce = nonceResp.Nonce
hashResp, err := c.client.CreateContract(context.Background(), req)
if err != nil {
rpcclient.Error("c.client.CreateContract failed", err)
return "", "", err
}
fmt.Printf("\r%s", hashResp.Hash)
time.Sleep(1 * time.Second)
req.Nonce++
hashResp, err = c.client.MintToken(context.Background(), req)
if err != nil {
rpcclient.Error("c.client.MintToken failed", err)
return "", "", err
}
fmt.Printf("\r%s", hashResp.Hash)
return addressList[0].Addr, addressList[0].Priv, nil
}
func readAddress(filename string) (userList []userInfo) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil
}
err = json.Unmarshal(data, &userList)
if err != nil {
return nil
}
return userList
}
// 循环发送代币
func (c *Client) tokenCycleTransaction(symbol string, fee uint64, n int) (err error) {
if c.addressList == nil {
c.addressList = createAddress(n)
}
//首先先从铸币者手中向其他地址发送代币
req := &message.ReqTokenTransaction{
From: coinbaseAddr,
Priv: coinbasePriv,
Amount: 10000000000000, //一百块
TokenAmount: 100,
Symbol: symbol,
Fee: fee,
}
nonceResp, err := c.client.GetAddressNonceAt(context.Background(), &message.ReqNonce{Address: req.From})
if err != nil {
rpcclient.Error("c.client.GetAddressNonceAt failed", err)
return err
}
req.Nonce = nonceResp.Nonce
for i, address := range c.addressList {
req.To = address.Addr
hashResp, err := c.client.SendToken(context.Background(), req)
if err != nil {
rpcclient.Errorf("c.client.SendToken failed i:%d error:%v", i, err)
return err
}
fmt.Printf("\r%s", hashResp.Hash)
req.Nonce++
time.Sleep(500 * time.Millisecond)
}
c.addressList = append(c.addressList, userInfo{coinbaseAddr, coinbasePriv, req.Nonce})
//写入文件
jsbyte, _ := json.Marshal(&c.addressList)
err = ioutil.WriteFile("TokenUsers.json", jsbyte, 0644)
if err != nil {
rpcclient.Error("ioutil.WriteFile failed", err)
return
}
for {
for i, address := range c.addressList {
req = &message.ReqTokenTransaction{
From: address.Addr,
Priv: address.Priv,
To: c.addressList[uint64((i+1)%len(c.addressList))].Addr,
Nonce: address.Nonce,
Amount: 500001,
TokenAmount: 1,
Symbol: symbol,
Fee: fee,
}
hashResp, err := c.client.SendToken(context.Background(), req)
if err != nil {
rpcclient.Errorf("c.client.SendToken failed i:%d nonce:%d error:%v", i, req.Nonce, err)
return err
}
fmt.Printf("\r%s", hashResp.Hash)
c.addressList[i].Nonce++
time.Sleep(500 * time.Millisecond)
}
}
}
func (c *Client) sendto100(num int, sleeptime uint) error {
var userList []userInfo
data, err := ioutil.ReadFile("userinfo.json")
if err != nil {
log.Fatal("read file failed:", err)
}
err = json.Unmarshal(data, &userList)
if err != nil {
log.Fatal("parse json failed:", err)
}
if len(userList) == 0 || len(userList)%num != 0 {
panic("不是整数倍")
}
START:
var wg sync.WaitGroup
var t int64
for i := 0; i < len(userList); i += num {
t++
wg.Add(num)
for j := 0; j < num; j++ {
go func(i, j int) {
var reqData = &message.ReqTransaction{}
defer func() {
wg.Done()
}()
reqData.From = userList[i+j].Addr
reqData.Priv = userList[i+j].Priv
nonceData, err := c.client.GetAddressNonceAt(c.ctx, &message.ReqNonce{Address: reqData.From})
if err != nil {
rpcclient.Debug("addr:", reqData.From)
rpcclient.Error("get nonce failed:", err)
return
}
reqData.Nonce = nonceData.Nonce
reqData.Amount = 100000000000 //1块钱
// balanceData, err := c.client.GetBalance(c.ctx, &message.ReqBalance{Address: reqData.From})
// if err != nil {
// rpcclient.Debug("addr:", reqData.From)
// rpcclient.Error("get banlance failed:", err)
// return
// }
// if balanceData.Balnce < reqData.Amount {
// return
// }
for x := 0; x < 10; x++ {
reqData.To = userList[(i+j+x+10)%len(userList)].Addr
//加上order
// reqData.Order = &message.Order{}
// reqData.Order.Id = fmt.Sprintf("%032d", reqData.Nonce)
// reqData.Order.Address = "KtoBA91mi8mFEKnmQyZ698tXa5i89mesyXb2KvwoqyyRx21"
// reqData.Order.Price = 600000
// reqData.Order.Ciphertext = hex.EncodeToString([]byte("Kto2LRDy2u84ty9N5p9d455YhDAqj1dxM1bJxZyRcT6aNmm"))
// signord, err := c.client.SignOrd(c.ctx, &message.ReqSignOrd{Priv: "3aPiZgy3dCcPXG2ZYZiTzT5QD3HBFcYRK88NxSqunkZj522i8swhAh7fZnqRBPeTF1j4MnGXxrdkgamhQebV7ogy", Order: reqData.Order})
// if err != nil {
// rpcclient.Error("sign ord failed:", err)
// return
// } else {
// reqData.Order.Hash = signord.Hash
// reqData.Order.Signature = signord.Signature
// }
beforTime := time.Now()
_, err = c.client.SendTransaction(c.ctx, reqData)
if err != nil {
rpcclient.Debugf("i+j:%d,i+j+x+1:%d", i+x, (i+j+x+10)%100)
rpcclient.Debug("from addr:", reqData.From)
rpcclient.Debug("to addr:", reqData.To)
rpcclient.Errorf("nonce:%d,send tx error:%v\n", reqData.Nonce, err)
return
}
interval := time.Now().Sub(beforTime)
if interval < (100 * time.Millisecond) {
time.Sleep(100*time.Millisecond - interval)
}
reqData.Nonce++
}
}(i, j)
}
wg.Wait()
if num > 50 && t%4 == 0 {
fmt.Println("sleep..............")
time.Sleep(time.Duration(sleeptime) * time.Second)
}
}
goto START
return nil
}
func (c *Client) fristTx() error {
reqData := &message.ReqTransaction{
From: "Kto9sFhbjDdjEHvcdH6n9dtQws1m4ptsAWAy7DhqGdrUFai",
Priv: "5BWVgtMPPUPFuCHssYhXxFx2xVfQRTkB1EjKHKu1B1KxdVxD5cswDEdqiko3PjUPFPGfePoKxdfzHvv4YXRCYNp2",
Amount: 1000000000000000, //100块
To: "KtoGoyujGLnWttEfAwmofdTzeXG534nrSZy6UR74uZ5mcoy",
}
nonce, err := c.client.GetAddressNonceAt(c.ctx, &message.ReqNonce{Address: reqData.From})
if err != nil {
rpcclient.Error("fail to get nonce:", err)
return err
}
reqData.Nonce = nonce.Nonce
hash, err := c.client.SendTransaction(c.ctx, reqData)
if err != nil {
panic(err)
}
fmt.Println("hash:", hash.Hash)
return nil
}
func (c *Client) dataOverflow() error {
var count int64
var list [3]Info
list[0].address = "KtoGoyujGLnWttEfAwmofdTzeXG534nrSZy6UR74uZ5mcoy"
list[0].priv = "4UqKNPRjdXQsja2Geafu6BtLzqxLcScfTSBfSVxtav6gEq4LA2raLpP5hVNgTqdHPZ1rR9N7LbkstbzYeQhvVAXq"
list[1].address = "KtoC5gP1TLyUWbHRkp1gfpMrbdBawnqxQi3NdYtB31dgtJE"
list[1].priv = "5JFcGkBXdhD1E6Kgdrk8XWHRzvAuwbnezzmxkLLa4bfHPyWfG3LUBJLhVGZz7Y9ZcTocXaeqBMzbZfdo9yWyxZhc"
list[2].address = "Kto3tPtVvkoBAgxxAgBbd6HpR1jgcfNWsz7554S4ud1PkBD"
list[2].priv = "3zT8gZ3qstBbHCpB6Gm7T3DL2qz6SW5g5HaoK8EMx5BgxNou6Sn4n5Hpc5gVLFGefEfEsiRwUGdcBRXyLmxzRR6T"
//START:
reqData := &message.ReqTransaction{
From: list[count%3].address,
Priv: list[count%3].priv,
}
nonce, err := c.client.GetAddressNonceAt(c.ctx, &message.ReqNonce{Address: reqData.From})
if err != nil {
rpcclient.Error("fail to get nonce:", err)
return err
}
rpcclient.Debug("nonce:", nonce.Nonce)
balance, err := c.client.GetBalance(c.ctx, &message.ReqBalance{Address: reqData.From})
if err != nil {
return err
}
rpcclient.Debug("balance:", balance.Balnce)
var num uint64 = 10
if balance.Balnce > 100 {
reqData.Amount = (balance.Balnce + 1000000000000) / num
reqData.Nonce = nonce.Nonce
rpcclient.Debug("amount:", reqData.Amount)
rpcclient.Debug("count:", count)
for i := 0; i < int(num); i++ {
if i%2 == 0 {
reqData.To = list[(count+1)%3].address
} else {
reqData.To = list[(count+2)%3].address
}
respData, err := c.client.SendTransaction(c.ctx, reqData)
if err != nil {
rpcclient.Debug("i:", i)
rpcclient.Errorf("dataoverflow:%d,send tx error:%v\n", reqData.Nonce, err)
break
}
rpcclient.Debug(respData.Hash)
reqData.Nonce++
}
} else {
rpcclient.Debug("balance:", balance.Balnce)
}
count++
time.Sleep(5 * time.Second)
//goto START
return nil
}
func createAddress(num int) []userInfo {
var data []userInfo
for i := 0; i < num; i++ {
pubBytes, privBytes, err := until.Generprivkey()
if err != nil {
log.Fatal(err)
}
var userData userInfo
userData.Addr = until.PubtoAddr(pubBytes)
userData.Priv = until.Encode(privBytes)
userData.Nonce = 1
if len(userData.Addr) != 47 {
i--
continue
}
data = append(data, userData)
}
// 写入本地文件
// jsbyte, _ := json.Marshal(&data)
// err := ioutil.WriteFile("userinfo.json", jsbyte, 0644)
// if err != nil {
// log.Fatal(err)
// }
return data
}
// fastTx 因冲突删掉函数体
func (c *Client) fastTx(num int) {}
func newClient(host, addrFileName string) (*Client, error) {
conn, err := grpc.Dial(host, grpc.WithInsecure(), grpc.WithBlock())
if err != nil {
rpcclient.Error("fail to dial:", err)
return nil, err
}
var clinet = &Client{
client: message.NewGreeterClient(conn),
ctx: context.Background(),
addressList: readAddress(addrFileName),
}
return clinet, nil
}
|
package bjkl8
import (
"snatch_ssc/ioc"
"snatch_ssc/models/snatch/base"
"snatch_ssc/models/snatch/inter"
"strings"
"snatch_ssc/sys"
"github.com/astaxie/beego"
"github.com/duansky/goquery"
)
// 北京福彩网
type BwlcSnatch struct {
base.DataProcesserAbs
}
func init() {
ioc.RegisterObj("snatch.ssc.bjkl8.bwlc", &BwlcSnatch{base.DataProcesserAbs{Type: "bjkl8", Site: "bwlc"}})
}
// 抓取网页
func (c *BwlcSnatch) Snatch() (string, error) {
doc, err := goquery.NewDocument("http://www.bwlc.net/bulletin/prevkeno.html")
if err != nil {
beego.Error(err)
return "", err
}
return doc.Html()
}
// 解析网页数据
func (this *BwlcSnatch) Resolve(content string) (datas []*inter.SscData) {
datas = make([]*inter.SscData, 0, 10)
if !sys.HasValue(content) {
return datas
}
doc, err := goquery.NewDocumentFromReader(strings.NewReader(content))
if err != nil {
beego.Error(err)
}
table := doc.Find("table:contains('开奖号码')")
table.Find("tr").Each(func(i int, tr *goquery.Selection) {
if i > 0 {
data := new(inter.SscData)
data.No.SetValue(tr.Children().Eq(0).Text())
data.Results.SetValue(tr.Children().Eq(1).Text())
datas = append(datas, data)
}
})
return datas
}
|
// call from project root with
// go run scripts/push_new_patch/main.go
// goreleaser expects a $GITHUB_TOKEN env variable to be defined
// in order to push the release got github
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"strconv"
"strings"
)
func main() {
version, err := ioutil.ReadFile("VERSION")
if err != nil {
log.Panicln(err.Error())
}
stringVersion := string(version)
fmt.Println("VERSION was " + stringVersion)
runCommand("git", "pull")
splitVersion := strings.Split(stringVersion, ".")
patch := splitVersion[len(splitVersion)-1]
newPatch, _ := strconv.Atoi(patch)
splitVersion[len(splitVersion)-1] = strconv.FormatInt(int64(newPatch)+1, 10)
newVersion := strings.Join(splitVersion, ".")
err = ioutil.WriteFile("VERSION", []byte(newVersion), 0644)
if err != nil {
log.Panicln(err.Error())
}
runCommand("git", "add", "VERSION")
runCommand("git", "commit", "-m", "bump version to "+newVersion, "--", "VERSION")
runCommand("git", "push")
runCommand("git", "tag", newVersion)
runCommand("git", "push", "origin", newVersion)
runCommand("goreleaser", "--rm-dist")
runCommand("rm", "-rf", "dist")
}
func runCommand(args ...string) {
fmt.Println(strings.Join(args, " "))
cmd := exec.Command(args[0], args[1:]...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
err := cmd.Start()
if err != nil {
panic(err.Error())
}
err = cmd.Wait()
if err != nil {
panic(err.Error())
}
}
|
package label
//selector in
// equality-based
// set-based
type Labels interface {
Has(label string) (exists bool)
Get(label string) (value string)
}
type Selector interface {
Matches(Labels) bool
Empty() bool
String() string
Add(r ...Requirement) Selector
Requirements() (requirements Requirements, selectable bool)
DeepCopySelector() Selector
}
type Requirement struct {
key string
operator selection.Operator
strValues []string
}
func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) {
if err := validateLabelKey(key); err != nil {
return nil, err
}
switch op {
case selection.In, selection.NotIn:
assert(len(vars) > 0)
case selection.Equals, selection.DoubleEquals, selection.NotEquals:
assert(len(vars) == 1)
case selection.Exists, selection.DoesNotExist:
assert(len(vars) == 0)
case selection.GreaterThan, selection.LessThan:
assert(len(vars) == 1)
for i := range vals {
assert(strconv.ParseInt(vals[i], 10, 64) == nil)
}
default:
}
for i := range vals {
if err := validateLabelValue(vals[i]); err != nil {
return nil, err
}
}
sort.Strings(vals)
return &Requirement{key: key, operator: op, strValues: vals}, nil
}
|
package login
import (
"database/sql"
"fmt"
"o2clock/api-proto/onboarding/login"
"o2clock/constants/errormsg"
db "o2clock/db/postgres"
"o2clock/table/accesstoken"
"o2clock/table/allusers"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
SQL_STATEMENT_FIND_USER_USING_USERNAME = `
SELECT * FROM all_users WHERE user_name=$1 && password=$2;`
SQL_STATEMENT_FIND_USER_USING_EMAIL = `
SELECT * FROM all_users WHERE email=$1 && password=$2;`
)
func LoginUser(req *loginpb.LoginRequest) (string, error) {
user, err := validateUser(req)
if err != nil {
return "", err
}
return accesstoken.UpdateAccessToken(user.Id, user.UserName)
}
func validateUser(req *loginpb.LoginRequest) (allusers.Users, error) {
var user allusers.Users
sqlStatement := SQL_STATEMENT_FIND_USER_USING_USERNAME
row := db.GetClient().QueryRow(sqlStatement, req.GetUsernameEmail(), req.GetPassword())
err := row.Scan(&user.Id, &user.UserName)
switch err {
case sql.ErrNoRows:
sqlStatement := SQL_STATEMENT_FIND_USER_USING_EMAIL
row := db.GetClient().QueryRow(sqlStatement, req.GetUsernameEmail(), req.GetPassword())
if err := row.Scan(&user.Id, &user.UserName); err != nil {
return user, status.Errorf(
codes.Internal,
fmt.Sprintln(errormsg.ERR_MSG_INVALID_CREDS, err))
} else {
return user, nil
}
case nil:
return user, nil
default:
return user, status.Errorf(
codes.Internal,
fmt.Sprintln(errormsg.ERR_MSG_INTERNAL_SERVER, err))
}
}
|
package discovergy
const API = "https://api.discovergy.com/public/v1"
type Meter struct {
MeterID string `json:"meterId"`
SerialNumber string `json:"serialNumber"`
FullSerialNumber string `json:"fullSerialNumber"`
}
type Reading struct {
Time int64
Values struct {
EnergyOut int64
Energy1, Energy2 int64
Voltage1, Voltage2, Voltage3 int64
EnergyOut1, EnergyOut2 int64
Power1, Power2, Power3 int64
Power int64
Energy int64
}
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Type for a device identity result. Every device should have a subscriber getidentity.
package identity
import (
"fmt"
"github.com/dirkjabl/bricker"
"github.com/dirkjabl/bricker/device"
"github.com/dirkjabl/bricker/device/name"
"github.com/dirkjabl/bricker/net/base58"
"github.com/dirkjabl/bricker/net/packet"
)
const (
function_get_identity = uint8(255)
)
// GetIdentity creates the subscriber to get the identity of a device.
func GetIdentity(id string, uid uint32, handler func(device.Resulter, error)) *device.Device {
return device.Generator{
Id: device.FallbackId(id, "GetIdentity"),
Fid: function_get_identity,
Uid: uid,
Result: &Identity{},
Handler: handler,
WithPacket: true}.CreateDevice()
}
// Future is a future pattern version for a synchronized call of the subscriber.
// If an error occur, the result is nil.
func GetIdentityFuture(brick bricker.Bricker, connectorname string, uid uint32) *Identity {
future := make(chan *Identity)
defer close(future)
sub := GetIdentity("getidentityfuture", uid,
func(r device.Resulter, err error) {
var v *Identity = nil
if err == nil {
if value, ok := r.(*Identity); ok {
v = value
}
}
future <- v
})
err := brick.Subscribe(sub, connectorname)
if err != nil {
return nil
}
v := <-future
return v
}
// Result type for a getidentity subscriber.
type Identity struct {
Uid [8]byte
ConnectedUid [8]byte
Position byte
HardwareVersion [3]uint8
FirmwareVersion [3]uint8
DeviceIdentifer uint16
}
// FromPacket fill up the values of the identity object from a net packet.
// Fullfill the resulter interface.
func (i *Identity) FromPacket(p *packet.Packet) error {
if err := device.CheckForFromPacket(i, p); err != nil {
return err
}
return p.Payload.Decode(i)
}
// Stringer interface fulfill.
func (i *Identity) String() string {
uid := base58.Convert32(base58.Decode(i.Uid))
cuid := base58.Convert32(base58.Decode(i.ConnectedUid))
txt := "Identity ["
txt += fmt.Sprintf("UID: %s (%d), ", i.Uid, uid)
txt += fmt.Sprintf("Connected UID: %s (%d), ", i.ConnectedUid, cuid)
txt += fmt.Sprintf("Position: %c, ", i.Position)
txt += fmt.Sprintf("Hardware Version: %d.%d.%d, ", i.HardwareVersion[0],
i.HardwareVersion[1], i.HardwareVersion[2])
txt += fmt.Sprintf("Firmware Version: %d.%d.%d, ", i.FirmwareVersion[0],
i.FirmwareVersion[1], i.FirmwareVersion[2])
txt += "Name: " + name.Name(i.DeviceIdentifer) + "]"
return txt
}
// Copy creates a copy of the content.
func (i *Identity) Copy() device.Resulter {
if i == nil {
return nil
}
return &Identity{
Uid: i.Uid,
ConnectedUid: i.ConnectedUid,
Position: i.Position,
HardwareVersion: i.HardwareVersion,
FirmwareVersion: i.FirmwareVersion,
DeviceIdentifer: i.DeviceIdentifer}
}
// IntUid returns the Uid from the identity values as uint32.
// Is the identity not filled, the return will be 0.
func (i *Identity) IntUid() uint32 {
if i == nil {
return 0
}
return base58.Convert32(base58.Decode(i.Uid))
}
// Is checks if the given device identifer equals the existing identifer.
func (i *Identity) Is(deviceidentifer uint16) bool {
if i == nil {
return false
}
return (deviceidentifer == i.DeviceIdentifer)
}
|
package main
import (
"fmt"
"sync"
)
// MessageChannel is a global relay for all messages.
type MessageChannel struct {
sync.Mutex
members map[string]chan string
}
// MakeMessageChannel makes a MessageChannel.
func MakeMessageChannel() (m MessageChannel) {
m.members = make(map[string]chan string)
return
}
// AddMember adds a member from MessageChannel.
func (m *MessageChannel) AddMember(MemberID string, privateChan chan string) error {
m.Lock()
defer m.Unlock()
if _, got := m.members[MemberID]; got {
return fmt.Errorf("an entry already exists for '%s'", MemberID)
}
m.members[MemberID] = privateChan
fmt.Printf("[MessageChannel] MEMBER COUNT: %v \n", len(m.members))
return nil
}
// RemoveMember removes a member from MessageChannel.
func (m *MessageChannel) RemoveMember(MemberID string) {
m.Lock()
defer m.Unlock()
delete(m.members, MemberID)
fmt.Printf("[MessageChannel] MEMBER COUNT: %v \n", len(m.members))
}
// SendMessage sends message to all members of MessageChannel.
func (m *MessageChannel) SendMessage(msg string) {
m.Lock()
defer m.Unlock()
for _, v := range m.members {
v <- msg
}
}
|
package ossfile
import (
"github.com/jinzhu/gorm"
"log"
_ "github.com/jinzhu/gorm/dialects/mysql"
)
type Migration struct {
Dsn string
}
func (m Migration) InstallDb() {
db, err := gorm.Open("mysql", m.Dsn)
if err != nil {
log.Fatal(err)
}
defer db.Close()
db.Set("gorm:table_options", "ENGINE=InnoDB").CreateTable(&File{})
}
|
package server
import (
"net/http"
"strings"
"github.com/Sirupsen/logrus"
"github.com/paddycarey/ims/pkg/images"
"github.com/paddycarey/ims/pkg/storage"
)
type Server struct {
Cache *InMemoryCache
Storage storage.FileSystem
// disable optimizations
NoOpts bool
}
func (s *Server) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
// generate a cache key from the incoming request
ck := s.Cache.GenerateKey(r)
// attempt to load the processed image from cache
processedImage, found, _ := s.Cache.Get(ck)
if found {
s.serveImage(rw, r, processedImage)
return
}
// load image from storage backend
storedImage, err := s.Storage.Open(r.URL.Path)
if err != nil {
s.serveError(rw, r, err)
return
}
defer storedImage.Close()
// process the image that's been loaded from storage
processedImage, err = images.Process(storedImage, r.URL, s.NoOpts)
if err != nil {
s.serveError(rw, r, err)
return
}
// cache the newly processed image
s.Cache.Set(ck, processedImage)
// serve the newly processed image
s.serveImage(rw, r, processedImage)
}
func (s *Server) serveError(rw http.ResponseWriter, r *http.Request, err error) {
var status string
if strings.Contains(err.Error(), "no such file or directory") {
rw.WriteHeader(http.StatusNotFound)
status = "404 Not Found"
} else if strings.Contains(err.Error(), "404") {
rw.WriteHeader(http.StatusNotFound)
status = "404 Not Found"
} else {
rw.WriteHeader(http.StatusInternalServerError)
status = "500 Internal Server Error"
}
rw.Write([]byte(status))
logrus.WithFields(logrus.Fields{
"err": err,
"status": status,
"url": r.URL.String(),
}).Error("Error serving request")
}
func (s *Server) serveImage(rw http.ResponseWriter, r *http.Request, f storage.File) {
rw.Header().Set("Content-Type", f.MimeType())
http.ServeContent(rw, r, r.URL.Path, f.ModTime(), f)
}
|
package oauth2bearer
import (
"context"
"fmt"
"log"
"net/http"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2/clientcredentials"
)
type controlMessage struct {
action int
channel chan controlMessage
token *oauth2.Token
}
const (
getToken int = 0
refresh = 1
registerChannel = 2
sendToken = 3
)
// TokenSource describes a place to get tokens
type TokenSource struct {
config clientcredentials.Config
client *http.Client
ctx context.Context
controlChannels []chan controlMessage
refreshChannel chan *oauth2.Token
params TokenSourceParams
currentToken *oauth2.Token
}
// RawBearerToken is the blob of data returned, in JSON, when
// you request a new bearer token
type RawBearerToken struct {
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
TokenType string `json:"token_type"`
expiryTime time.Time
}
// TokenSourceWithChannel attaches a channel to a token source
// Access through one of these to get correctly-serialized responses
type TokenSourceWithChannel struct {
source *TokenSource
channel chan controlMessage
}
// Token retrieves a new fresh bearer token from the
// requested source
func (source *TokenSource) retrieveRawToken() (*oauth2.Token, error) {
for i := 0; i < source.params.NumRetries; i++ {
tok, err := source.config.Token(source.ctx)
if err == nil {
return tok, nil
}
if i == (refreshRetries - 1) {
return nil, err
}
time.Sleep(time.Duration(source.params.RetrySleep) * time.Millisecond)
}
log.Panic("somehow after for loop in retrieveRawToken")
return nil, nil
}
// NewTokenSource sets up a new token source
func NewTokenSource(ctx context.Context, config clientcredentials.Config, params TokenSourceParams) *TokenSource {
ts := TokenSource{ctx: ctx, config: config}
ts.client = config.Client(ctx)
// ts := TokenSource{Credentials: creds, ScopesList: scopes, URL: url}
ts.refreshChannel = make(chan *oauth2.Token)
ts.controlChannels = make([]chan controlMessage, 0)
ts.params = params
ts.controlChannels = append(ts.controlChannels, make(chan controlMessage))
go tokenControllerLoop(ts)
go mainRefreshLoop(ts)
return &ts
}
// NewTokenSourceWithChannel sets up a new channel and wrapper around an existing
// TokenSource
func (source *TokenSource) NewTokenSourceWithChannel() *TokenSourceWithChannel {
newc := make(chan controlMessage)
psrc := TokenSourceWithChannel{
source: source,
channel: newc,
}
adminMessage := controlMessage{
action: registerChannel,
channel: newc,
}
source.controlChannels[0] <- adminMessage
return &psrc
}
// Refresh causes the underlying token source to refresh
func (source *TokenSourceWithChannel) Refresh() {
source.channel <- controlMessage{
action: refresh,
}
}
// Token returns an access token
func (source *TokenSourceWithChannel) Token() *oauth2.Token {
res := getAccessToken(source.channel)
if res == nil {
fmt.Println("got back nil!")
}
return res
}
// eof
|
package handlers
import (
"net/http"
"github.com/jalexanderII/literate-octo-pancake/backend/data"
)
// swagger:route GET /products products listProducts
// Return a list of products from the database
// responses:
// 200: productsResponse
// ListAll handles GET requests and returns all current products
func (p *Products) ListAll(w http.ResponseWriter, r *http.Request) {
p.l.Debug("Get all records")
w.Header().Add("Content-Type", "application/json")
prods, err := p.pdb.GetProducts(r.URL.Query().Get("currency"))
if err != nil {
w.WriteHeader(http.StatusInternalServerError)
data.ToJSON(&GenericError{Message: err.Error()}, w)
return
}
err = data.ToJSON(prods, w)
if err != nil {
// we should never be here but log the error just in case
p.l.Error("Unable to serialize product", "error", err)
}
}
// swagger:route GET /products/{id} products listSingleProduct
// Return a list of products from the database
// responses:
// 200: productResponse
// 404: errorResponse
// ListSingle handles GET requests
func (p *Products) ListSingle(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
id := getProductID(r)
cur := r.URL.Query().Get("currency")
p.l.Debug("Get record id", "id", id, "currency", cur)
prod, err := p.pdb.GetProductByID(id, cur)
switch err {
case nil:
case data.ErrProductNotFound:
p.l.Error("Unable to fetch product", "error", err)
w.WriteHeader(http.StatusNotFound)
err := data.ToJSON(&GenericError{Message: err.Error()}, w)
if err != nil {
return
}
return
default:
p.l.Error("Unable to fetch product", "error", err)
w.WriteHeader(http.StatusInternalServerError)
err := data.ToJSON(&GenericError{Message: err.Error()}, w)
if err != nil {
return
}
return
}
err = data.ToJSON(prod, w)
if err != nil {
// we should never be here but log the error just in case
p.l.Error("Unable to serialize product", "error", err)
}
}
// swagger:route POST /products products createProduct
// Create a new product
//
// responses:
// 200: productResponse
// 422: errorValidation
// 501: errorResponse
// Create handles POST requests to add new products
func (p *Products) Create(_ http.ResponseWriter, r *http.Request) {
// fetch the product from the context
prod := r.Context().Value(KeyProduct{}).(*data.Product)
p.l.Debug("Inserting product %#v\n", prod)
data.AddProduct(prod)
}
// swagger:route PUT /products products updateProduct
// Update a products details
//
// responses:
// 201: noContentResponse
// 404: errorResponse
// 422: errorValidation
// Update handles PUT requests to update products
func (p *Products) Update(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
// fetch the product from the context
prod := r.Context().Value(KeyProduct{}).(data.Product)
p.l.Debug("Updating record id", "id", prod.ID)
err := p.pdb.UpdateProduct(prod, "")
if err == data.ErrProductNotFound {
p.l.Error("Product not found", "error", err)
w.WriteHeader(http.StatusNotFound)
err := data.ToJSON(&GenericError{Message: "Product not found in database"}, w)
if err != nil {
return
}
return
}
// write the no content success header
w.WriteHeader(http.StatusNoContent)
}
// swagger:route DELETE /products/{id} products deleteProduct
// Update a products details
//
// responses:
// 201: noContentResponse
// 404: errorResponse
// 501: errorResponse
// Delete handles DELETE requests and removes items from the database
func (p *Products) Delete(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/json")
id := getProductID(r)
p.l.Debug("Deleting record id", "id", id)
err := data.DeleteProduct(id)
if err == data.ErrProductNotFound {
p.l.Error("Unable to delete record, id does not exist", "error", err)
w.WriteHeader(http.StatusNotFound)
err := data.ToJSON(&GenericError{Message: err.Error()}, w)
if err != nil {
return
}
return
}
if err != nil {
p.l.Error("Unable to delete record", "error", err)
w.WriteHeader(http.StatusInternalServerError)
err := data.ToJSON(&GenericError{Message: err.Error()}, w)
if err != nil {
return
}
return
}
w.WriteHeader(http.StatusNoContent)
}
|
package discovery
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestClientNodes(t *testing.T) {
server := NewServer("testing", 1234, "v1.0")
go server.Serve()
defer server.Stop()
nodes, err := Nodes(1 * time.Second)
assert.NoError(t, err)
assert.Len(t, nodes, 1)
assert.Equal(t, int64(1234), nodes[0].GrpcPort)
assert.Equal(t, "v1.0", nodes[0].Version)
}
|
package command
import (
"fmt"
"math/rand"
"regexp"
)
var swearings = []string{
"a maronn",
"san giuseppe",
"san pietro",
"o patatern 'n croc",
"tutti i santi",
"gesu",
"gesu bambin 'n croc",
"gesu crist",
}
type SwearCommand struct {
pattern *regexp.Regexp
}
func Swear() SwearCommand {
return SwearCommand{regexp.MustCompile(`(?i)swear\s?(.*)`)}
}
func (c SwearCommand) Pattern() *regexp.Regexp {
return c.pattern
}
func (c SwearCommand) Run(query string) []string {
return []string{fmt.Sprintf("annagg %s", swearings[rand.Intn(len(swearings))])}
}
|
package headerutil
import (
"net"
"net/http"
"strings"
)
// GetNextXForwardedFor returns the X-Forwarded-For header value and append
// the remote address (client IP or proxy IP)
func GetNextXForwardedFor(r *http.Request) string {
var IPs string
priorIPs, hasXff := r.Header["X-Forwarded-For"]
if hasXff {
IPs = strings.Join(priorIPs, ", ")
}
if clientIP, _, err := net.SplitHostPort(r.RemoteAddr); err == nil {
if hasXff {
IPs = IPs + ", " + clientIP
} else {
IPs = clientIP
}
}
return IPs
}
// GetXForwardedHost returns the X-Forwarded-Host header value. If not set,
// the host mentioned in the request will be returned
func GetXForwardedHost(r *http.Request) string {
if host := r.Header.Get("X-Forwarded-Host"); host != "" {
return host
}
return r.Host
}
// GetXForwardedProto returns the X-Forwarded-Proto header value. If not set,
// the protocol will be infered from the request:
// - https: If the request was received on a TLS connection
// - http: If nothing else matches
func GetXForwardedProto(r *http.Request) string {
if proto := r.Header.Get("X-Forwarded-Proto"); proto != "" {
return proto
}
if r.TLS != nil {
return "https"
}
return "http"
}
|
package main
import (
"context"
"fmt"
"os"
"os/signal"
"sync"
"syscall"
"testing"
"time"
)
var w sync.WaitGroup
//处理进程信号
var signalChan = make(chan os.Signal)
var ctx, cancel = context.WithCancel(context.Background())
func processSignal() {
//处理退出信号,优雅停机
signal.Notify(signalChan, syscall.SIGKILL, syscall.SIGINT, syscall.SIGTERM)
select {
case <-signalChan:
cancel()
print("\ndone\n")
time.Sleep(time.Second * 2)
os.Exit(0)
}
}
func TestSignal(t *testing.T) {
w.Add(1)
go processSignal()
ticker := time.NewTicker(time.Second)
go func() {
defer w.Done()
for {
select {
case <-ctx.Done():
fmt.Printf("%s", ctx.Err())
//这里不能使用break,break只是跳出select,还是在for循环,只能用return或者标签break
return
case <-ticker.C:
fmt.Printf("p...\n")
}
}
}()
fmt.Scanf("%s")
w.Wait()
}
|
package factory
import (
"bufio"
"encoding/json"
"fmt"
"github.com/mitchellh/cli"
"github.com/thoas/go-funk"
"os"
"seeder/constants"
"seeder/models"
"seeder/tools"
"seeder/utils"
"strings"
)
func Plan() (cli.Command, error) {
plan := &planCommandCLI{}
return plan, nil
}
type planCommandCLI struct {
Args []string
}
func (c *planCommandCLI) Run(args []string) int {
c.Args = args
remoteStateFetcher := tools.NewRemoteStateFetcher()
remoteStateDeployments := remoteStateFetcher.GetDeployments()
jsonRemoteStateDeployments, err := json.Marshal(remoteStateDeployments)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
utils.WriteFile(constants.DEPLOYMENT_STATE, jsonRemoteStateDeployments)
fmt.Println(fmt.Sprintf("The remote state was saved at location: %s.", constants.DEPLOYMENT_STATE))
remoteDeployments := make([]*models.ServerDeployment, 0)
err = json.Unmarshal(utils.ReadFile(constants.DEPLOYMENT_STATE), &remoteDeployments)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
deploymentPlanCreator := tools.NewDeploymentPlanCreator(remoteDeployments)
plannedChanges := deploymentPlanCreator.GetPlannedChanges()
noChanges := deploymentPlanCreator.GetNoChanges()
plan := deploymentPlanCreator.GetPlan()
deploymentPlanPrinter := tools.NewDeploymentPlanPrinter()
deploymentPlanPolicy := tools.NewDeploymentPlanPolicy()
jsonPlan, err := json.Marshal(plan)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
if funk.Contains(c.Args, "-auto-approve") {
utils.WriteFile(constants.DEPLOYMENT_PLAN, jsonPlan)
fmt.Println(fmt.Sprintf("The current plan was saved at location: %s.", constants.DEPLOYMENT_PLAN))
}
alreadySavedPlan := make([]*models.ServerDeployment, 0)
err = json.Unmarshal(utils.ReadFile(constants.DEPLOYMENT_PLAN), &alreadySavedPlan)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
fmt.Println("Deployments already created: ")
deploymentPlanPrinter.PrintFromArray(noChanges)
fmt.Println("Deployments to be (re)/created: ")
deploymentPlanPrinter.PrintFromArray(plannedChanges)
planComparator := tools.NewPlanComparator(alreadySavedPlan, plan)
if len(planComparator.GetChanges()) != 0 {
fmt.Println(fmt.Sprintf("Current plan is different from the one found at '%s'. "+
"Do you want to save the current plan ? [yes/no] : ", constants.DEPLOYMENT_PLAN))
reader := bufio.NewReader(os.Stdin)
answer, err := reader.ReadString('\n')
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
answer = strings.Replace(answer, "\n", "", -1)
if strings.Compare(answer, "yes") == 0 {
utils.WriteFile(constants.DEPLOYMENT_PLAN, jsonPlan)
fmt.Println(fmt.Sprintf("The current plan was saved at location: %s.", constants.DEPLOYMENT_PLAN))
} else {
fmt.Println("The current plan was discarded.")
}
}
err = deploymentPlanPolicy.PrintWarnings(constants.DEPLOYMENT_PLAN)
if err != nil {
fmt.Println(err.Error())
return constants.FAILURE
}
return constants.SUCCESS
}
func (c *planCommandCLI) Synopsis() string { return "Usage: seeder plan" }
func (c *planCommandCLI) Help() string {
return `
Usage: seeder plan [Options]
Creates the deployments plan. By default, creating a plan consists of:
- saves the plan locally as 'deployment_plan.json' in the 'workspace' folder
- reading remote state and comparing to the current local plan
- proposing a set of actions in order to sync remote state with the current plan
Options:
-auto-approve Skip interactive approval of plan before applying.
Call it after 'init'. Always.
It can be used also to detect leeway between local plan and remote state. Can be called with 'show' action as well.
`
}
|
package sub
var version = "v0.1"
func Version() string {
return version
}
|
/*
@Time : 2019/9/16 16:06
@Author : zxr
@File : recommend
@Software: GoLand
*/
package poetry
import "poetryAdmin/worker/core/grasp/poetry"
//诗词首页推荐信息抓取
type Recommend struct {
}
func NewRecommend() *Recommend {
return &Recommend{}
}
func (r *Recommend) Run() {
poetry.NewRecommend().StartGrasp()
}
|
package main
import "fmt"
func main() {
//fmt.Println("Hello World!")
var age int = 20
// 格式化字符串
var name string = "liuruichao"
fmt.Printf("name: %s, age: %d.\n", name, age)
}
|
package ingress
import (
"fmt"
"net"
"net/url"
"regexp"
"strconv"
"strings"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
"golang.org/x/net/idna"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/ingress/middleware"
"github.com/cloudflare/cloudflared/ipaccess"
)
var (
ErrNoIngressRules = errors.New("The config file doesn't contain any ingress rules")
ErrNoIngressRulesCLI = errors.New("No ingress rules were defined in provided config (if any) nor from the cli, cloudflared will return 503 for all incoming HTTP requests")
errLastRuleNotCatchAll = errors.New("The last ingress rule must match all URLs (i.e. it should not have a hostname or path filter)")
errBadWildcard = errors.New("Hostname patterns can have at most one wildcard character (\"*\") and it can only be used for subdomains, e.g. \"*.example.com\"")
errHostnameContainsPort = errors.New("Hostname cannot contain a port")
ErrURLIncompatibleWithIngress = errors.New("You can't set the --url flag (or $TUNNEL_URL) when using multiple-origin ingress rules")
)
const (
ServiceBastion = "bastion"
ServiceSocksProxy = "socks-proxy"
ServiceWarpRouting = "warp-routing"
)
// FindMatchingRule returns the index of the Ingress Rule which matches the given
// hostname and path. This function assumes the last rule matches everything,
// which is the case if the rules were instantiated via the ingress#Validate method.
//
// Negative index rule signifies local cloudflared rules (not-user defined).
func (ing Ingress) FindMatchingRule(hostname, path string) (*Rule, int) {
// The hostname might contain port. We only want to compare the host part with the rule
host, _, err := net.SplitHostPort(hostname)
if err == nil {
hostname = host
}
for i, rule := range ing.InternalRules {
if rule.Matches(hostname, path) {
// Local rule matches return a negative rule index to distiguish local rules from user-defined rules in logs
// Full range would be [-1 .. )
return &rule, -1 - i
}
}
for i, rule := range ing.Rules {
if rule.Matches(hostname, path) {
return &rule, i
}
}
i := len(ing.Rules) - 1
return &ing.Rules[i], i
}
func matchHost(ruleHost, reqHost string) bool {
if ruleHost == reqHost {
return true
}
// Validate hostnames that use wildcards at the start
if strings.HasPrefix(ruleHost, "*.") {
toMatch := strings.TrimPrefix(ruleHost, "*")
return strings.HasSuffix(reqHost, toMatch)
}
return false
}
// Ingress maps eyeball requests to origins.
type Ingress struct {
// Set of ingress rules that are not added to remote config, e.g. management
InternalRules []Rule
// Rules that are provided by the user from remote or local configuration
Rules []Rule `json:"ingress"`
Defaults OriginRequestConfig `json:"originRequest"`
}
// ParseIngress parses ingress rules, but does not send HTTP requests to the origins.
func ParseIngress(conf *config.Configuration) (Ingress, error) {
if len(conf.Ingress) == 0 {
return Ingress{}, ErrNoIngressRules
}
return validateIngress(conf.Ingress, originRequestFromConfig(conf.OriginRequest))
}
// ParseIngressFromConfigAndCLI will parse the configuration rules from config files for ingress
// rules and then attempt to parse CLI for ingress rules.
// Will always return at least one valid ingress rule. If none are provided by the user, the default
// will be to return 503 status code for all incoming requests.
func ParseIngressFromConfigAndCLI(conf *config.Configuration, c *cli.Context, log *zerolog.Logger) (Ingress, error) {
// Attempt to parse ingress rules from configuration
ingressRules, err := ParseIngress(conf)
if err == nil && !ingressRules.IsEmpty() {
return ingressRules, nil
}
if err != ErrNoIngressRules {
return Ingress{}, err
}
// Attempt to parse ingress rules from CLI:
// --url or --unix-socket flag for a tunnel HTTP ingress
// --hello-world for a basic HTTP ingress self-served
// --bastion for ssh bastion service
ingressRules, err = parseCLIIngress(c, false)
if errors.Is(err, ErrNoIngressRulesCLI) {
// If no token is provided, the probability of NOT being a remotely managed tunnel is higher.
// So, we should warn the user that no ingress rules were found, because remote configuration will most likely not exist.
if !c.IsSet("token") {
log.Warn().Msgf(ErrNoIngressRulesCLI.Error())
}
return newDefaultOrigin(c, log), nil
}
if err != nil {
return Ingress{}, err
}
return ingressRules, nil
}
// parseCLIIngress constructs an Ingress set with only one rule constructed from
// CLI parameters: --url, --hello-world, --bastion, or --unix-socket
func parseCLIIngress(c *cli.Context, allowURLFromArgs bool) (Ingress, error) {
service, err := parseSingleOriginService(c, allowURLFromArgs)
if err != nil {
return Ingress{}, err
}
// Construct an Ingress with the single rule.
defaults := originRequestFromSingleRule(c)
ing := Ingress{
Rules: []Rule{
{
Service: service,
Config: setConfig(defaults, config.OriginRequestConfig{}),
},
},
Defaults: defaults,
}
return ing, err
}
// newDefaultOrigin always returns a 503 response code to help indicate that there are no ingress
// rules setup, but the tunnel is reachable.
func newDefaultOrigin(c *cli.Context, log *zerolog.Logger) Ingress {
defaultRule := GetDefaultIngressRules(log)
defaults := originRequestFromSingleRule(c)
ingress := Ingress{
Rules: defaultRule,
Defaults: defaults,
}
return ingress
}
// Get a single origin service from the CLI/config.
func parseSingleOriginService(c *cli.Context, allowURLFromArgs bool) (OriginService, error) {
if c.IsSet(HelloWorldFlag) {
return new(helloWorld), nil
}
if c.IsSet(config.BastionFlag) {
return newBastionService(), nil
}
if c.IsSet("url") {
originURL, err := config.ValidateUrl(c, allowURLFromArgs)
if err != nil {
return nil, errors.Wrap(err, "Error validating origin URL")
}
if isHTTPService(originURL) {
return &httpService{
url: originURL,
}, nil
}
return newTCPOverWSService(originURL), nil
}
if c.IsSet("unix-socket") {
path, err := config.ValidateUnixSocket(c)
if err != nil {
return nil, errors.Wrap(err, "Error validating --unix-socket")
}
return &unixSocketPath{path: path, scheme: "http"}, nil
}
return nil, ErrNoIngressRulesCLI
}
// IsEmpty checks if there are any ingress rules.
func (ing Ingress) IsEmpty() bool {
return len(ing.Rules) == 0
}
// IsSingleRule checks if the user only specified a single ingress rule.
func (ing Ingress) IsSingleRule() bool {
return len(ing.Rules) == 1
}
// StartOrigins will start any origin services managed by cloudflared, e.g. proxy servers or Hello World.
func (ing Ingress) StartOrigins(
log *zerolog.Logger,
shutdownC <-chan struct{},
) error {
for _, rule := range ing.Rules {
if err := rule.Service.start(log, shutdownC, rule.Config); err != nil {
return errors.Wrapf(err, "Error starting local service %s", rule.Service)
}
}
return nil
}
// CatchAll returns the catch-all rule (i.e. the last rule)
func (ing Ingress) CatchAll() *Rule {
return &ing.Rules[len(ing.Rules)-1]
}
// Gets the default ingress rule that will be return 503 status
// code for all incoming requests.
func GetDefaultIngressRules(log *zerolog.Logger) []Rule {
noRulesService := newDefaultStatusCode(log)
return []Rule{
{
Service: &noRulesService,
},
}
}
func validateAccessConfiguration(cfg *config.AccessConfig) error {
if !cfg.Required {
return nil
}
// we allow for an initial setup where user can force Access but not configure the rest of the keys.
// however, if the user specified audTags but forgot teamName, we should alert it.
if cfg.TeamName == "" && len(cfg.AudTag) > 0 {
return errors.New("access.TeamName cannot be blank when access.audTags are present")
}
return nil
}
func validateIngress(ingress []config.UnvalidatedIngressRule, defaults OriginRequestConfig) (Ingress, error) {
rules := make([]Rule, len(ingress))
for i, r := range ingress {
cfg := setConfig(defaults, r.OriginRequest)
var service OriginService
if prefix := "unix:"; strings.HasPrefix(r.Service, prefix) {
// No validation necessary for unix socket filepath services
path := strings.TrimPrefix(r.Service, prefix)
service = &unixSocketPath{path: path, scheme: "http"}
} else if prefix := "unix+tls:"; strings.HasPrefix(r.Service, prefix) {
path := strings.TrimPrefix(r.Service, prefix)
service = &unixSocketPath{path: path, scheme: "https"}
} else if prefix := "http_status:"; strings.HasPrefix(r.Service, prefix) {
statusCode, err := strconv.Atoi(strings.TrimPrefix(r.Service, prefix))
if err != nil {
return Ingress{}, errors.Wrap(err, "invalid HTTP status code")
}
if statusCode < 100 || statusCode > 999 {
return Ingress{}, fmt.Errorf("invalid HTTP status code: %d", statusCode)
}
srv := newStatusCode(statusCode)
service = &srv
} else if r.Service == HelloWorldFlag || r.Service == HelloWorldService {
service = new(helloWorld)
} else if r.Service == ServiceSocksProxy {
rules := make([]ipaccess.Rule, len(r.OriginRequest.IPRules))
for i, ipRule := range r.OriginRequest.IPRules {
rule, err := ipaccess.NewRuleByCIDR(ipRule.Prefix, ipRule.Ports, ipRule.Allow)
if err != nil {
return Ingress{}, fmt.Errorf("unable to create ip rule for %s: %s", r.Service, err)
}
rules[i] = rule
}
accessPolicy, err := ipaccess.NewPolicy(false, rules)
if err != nil {
return Ingress{}, fmt.Errorf("unable to create ip access policy for %s: %s", r.Service, err)
}
service = newSocksProxyOverWSService(accessPolicy)
} else if r.Service == ServiceBastion || cfg.BastionMode {
// Bastion mode will always start a Websocket proxy server, which will
// overwrite the localService.URL field when `start` is called. So,
// leave the URL field empty for now.
cfg.BastionMode = true
service = newBastionService()
} else {
// Validate URL services
u, err := url.Parse(r.Service)
if err != nil {
return Ingress{}, err
}
if u.Scheme == "" || u.Hostname() == "" {
return Ingress{}, fmt.Errorf("%s is an invalid address, please make sure it has a scheme and a hostname", r.Service)
}
if u.Path != "" {
return Ingress{}, fmt.Errorf("%s is an invalid address, ingress rules don't support proxying to a different path on the origin service. The path will be the same as the eyeball request's path", r.Service)
}
if isHTTPService(u) {
service = &httpService{url: u}
} else {
service = newTCPOverWSService(u)
}
}
var handlers []middleware.Handler
if access := r.OriginRequest.Access; access != nil {
if err := validateAccessConfiguration(access); err != nil {
return Ingress{}, err
}
if access.Required {
verifier := middleware.NewJWTValidator(access.TeamName, "", access.AudTag)
handlers = append(handlers, verifier)
}
}
if err := validateHostname(r, i, len(ingress)); err != nil {
return Ingress{}, err
}
isCatchAllRule := (r.Hostname == "" || r.Hostname == "*") && r.Path == ""
punycodeHostname := ""
if !isCatchAllRule {
punycode, err := idna.Lookup.ToASCII(r.Hostname)
// Don't provide the punycode hostname if it is the same as the original hostname
if err == nil && punycode != r.Hostname {
punycodeHostname = punycode
}
}
var pathRegexp *Regexp
if r.Path != "" {
var err error
regex, err := regexp.Compile(r.Path)
if err != nil {
return Ingress{}, errors.Wrapf(err, "Rule #%d has an invalid regex", i+1)
}
pathRegexp = &Regexp{Regexp: regex}
}
rules[i] = Rule{
Hostname: r.Hostname,
punycodeHostname: punycodeHostname,
Service: service,
Path: pathRegexp,
Handlers: handlers,
Config: cfg,
}
}
return Ingress{Rules: rules, Defaults: defaults}, nil
}
func validateHostname(r config.UnvalidatedIngressRule, ruleIndex, totalRules int) error {
// Ensure that the hostname doesn't contain port
_, _, err := net.SplitHostPort(r.Hostname)
if err == nil {
return errHostnameContainsPort
}
// Ensure that there are no wildcards anywhere except the first character
// of the hostname.
if strings.LastIndex(r.Hostname, "*") > 0 {
return errBadWildcard
}
// The last rule should catch all hostnames.
isCatchAllRule := (r.Hostname == "" || r.Hostname == "*") && r.Path == ""
isLastRule := ruleIndex == totalRules-1
if isLastRule && !isCatchAllRule {
return errLastRuleNotCatchAll
}
// ONLY the last rule should catch all hostnames.
if !isLastRule && isCatchAllRule {
return errRuleShouldNotBeCatchAll{index: ruleIndex, hostname: r.Hostname}
}
return nil
}
type errRuleShouldNotBeCatchAll struct {
index int
hostname string
}
func (e errRuleShouldNotBeCatchAll) Error() string {
return fmt.Sprintf("Rule #%d is matching the hostname '%s', but "+
"this will match every hostname, meaning the rules which follow it "+
"will never be triggered.", e.index+1, e.hostname)
}
func isHTTPService(url *url.URL) bool {
return url.Scheme == "http" || url.Scheme == "https" || url.Scheme == "ws" || url.Scheme == "wss"
}
|
package iot
import (
"log"
"gobot.io/x/gobot"
"gobot.io/x/gobot/drivers/gpio"
"gobot.io/x/gobot/platforms/firmata"
"gobot.io/x/gobot/platforms/mqtt"
)
var (
led1 *gpio.LedDriver
led2 *gpio.LedDriver
mqttAdaptor *mqtt.Adaptor
)
func work() {
mqttAdaptor.On("leds", func(msg mqtt.Message) {
if string(msg.Payload()) == "liga" {
err := led1.On()
if err != nil {
log.Fatal(err)
}
err = led2.On()
if err != nil {
log.Fatal(err)
}
} else {
err := led1.Off()
if err != nil {
log.Fatal(err)
}
err = led2.Off()
if err != nil {
log.Fatal(err)
}
}
})
}
// Setup arduino
func Setup() {
mqttAdaptor = mqtt.NewAdaptor("tcp://iot.eclipse.org:1883", "arduino")
firmataAdaptor := firmata.NewAdaptor("/dev/cu.usbmodem1411")
led1 = gpio.NewLedDriver(firmataAdaptor, "10")
led2 = gpio.NewLedDriver(firmataAdaptor, "13")
robot := gobot.NewRobot("bot",
[]gobot.Connection{firmataAdaptor, mqttAdaptor},
[]gobot.Device{led1, led2},
work,
)
go func() {
err := robot.Start()
if err != nil {
log.Fatal(err)
}
}()
}
|
package cmd
import (
"fmt"
"github.com/fugue/fugue-client/client/scans"
"github.com/fugue/fugue-client/format"
"github.com/spf13/cobra"
)
type listScansOptions struct {
Offset int64
MaxItems int64
OrderBy string
OrderDirection string
Status []string
RangeFrom int64
RangeTo int64
}
type listScansViewItem struct {
ScanID string
EnvironmentID string
CreatedAt string
FinishedAt string
Status string
Message string
}
// NewListScansCommand returns a command that lists scans in Fugue
func NewListScansCommand() *cobra.Command {
var opts listScansOptions
cmd := &cobra.Command{
Use: "scans [environment_id]",
Short: "List scans belonging to an environment",
Aliases: []string{"scan"},
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
client, auth := getClient()
params := scans.NewListScansParams()
params.EnvironmentID = args[0]
if opts.Offset > 0 {
params.Offset = &opts.Offset
}
if opts.MaxItems > 0 {
params.MaxItems = &opts.MaxItems
}
if opts.RangeFrom > 0 {
params.RangeFrom = &opts.RangeFrom
}
if opts.RangeTo > 0 {
params.RangeTo = &opts.RangeTo
}
if opts.OrderBy != "" {
params.OrderBy = &opts.OrderBy
}
if opts.OrderDirection != "" {
params.OrderDirection = &opts.OrderDirection
}
if len(opts.Status) > 0 {
params.Status = opts.Status
}
resp, err := client.Scans.ListScans(params, auth)
CheckErr(err)
scans := resp.Payload.Items
rows := make([]interface{}, len(scans))
for i, scan := range scans {
rows[i] = listScansViewItem{
ScanID: scan.ID,
EnvironmentID: scan.EnvironmentID,
CreatedAt: format.Unix(scan.CreatedAt),
FinishedAt: format.Unix(scan.FinishedAt),
Status: scan.Status,
Message: scan.Message,
}
}
table, err := format.Table(format.TableOpts{
Rows: rows,
Columns: []string{"ScanID", "CreatedAt", "FinishedAt", "Status"},
ShowHeader: true,
})
CheckErr(err)
for _, tableRow := range table {
fmt.Println(tableRow)
}
},
}
cmd.Flags().Int64Var(&opts.Offset, "offset", 0, "offset into results")
cmd.Flags().Int64Var(&opts.MaxItems, "max-items", 20, "max items to return")
cmd.Flags().StringVar(&opts.OrderBy, "order-by", "", "order by attribute")
cmd.Flags().StringVar(&opts.OrderDirection, "order-direction", "", "order by direction [asc | desc]")
cmd.Flags().StringSliceVar(&opts.Status, "status", nil, "Scan status filter [IN_PROGRESS | SUCCESS | ERROR]")
cmd.Flags().Int64Var(&opts.RangeFrom, "range-from", 0, "Range from time filter")
cmd.Flags().Int64Var(&opts.RangeTo, "range-to", 0, "Range to time filter")
return cmd
}
func init() {
listCmd.AddCommand(NewListScansCommand())
}
|
// Copyright 2019 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package protectedts
import (
"time"
"github.com/cockroachdb/cockroach/pkg/settings"
)
// Records and their spans are stored in memory on every host so it's best
// not to let this data size be unbounded.
// MaxBytes controls the maximum number of bytes worth of spans and metadata
// which can be protected by all protected timestamp records.
var MaxBytes = settings.RegisterIntSetting(
"kv.protectedts.max_bytes",
"if non-zero the limit of the number of bytes of spans and metadata which can be protected",
1<<20, // 1 MiB
settings.NonNegativeInt,
)
// MaxSpans controls the maximum number of spans which can be protected
// by all protected timestamp records.
var MaxSpans = settings.RegisterIntSetting(
"kv.protectedts.max_spans",
"if non-zero the limit of the number of spans which can be protected",
32768,
settings.NonNegativeInt,
)
// PollInterval defines how frequently the protectedts state is polled by the
// Tracker.
var PollInterval = settings.RegisterDurationSetting(
"kv.protectedts.poll_interval",
// TODO(ajwerner): better description.
"the interval at which the protectedts subsystem state is polled",
2*time.Minute, settings.NonNegativeDuration)
func init() {
MaxBytes.SetVisibility(settings.Reserved)
MaxSpans.SetVisibility(settings.Reserved)
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package slstorage
import (
"context"
"math/rand"
"time"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/security"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/settings/cluster"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlliveness"
"github.com/cockroachdb/cockroach/pkg/sql/sqlutil"
"github.com/cockroachdb/cockroach/pkg/util/cache"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/syncutil/singleflight"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/errors"
)
// GCInterval specifies duration between attempts to delete extant
// sessions that have expired.
var GCInterval = settings.RegisterDurationSetting(
"server.sqlliveness.gc_interval",
"duration between attempts to delete extant sessions that have expired",
20*time.Second,
settings.NonNegativeDuration,
)
// GCJitter specifies the jitter fraction on the interval between attempts to
// delete extant sessions that have expired.
//
// [(1-GCJitter) * GCInterval, (1+GCJitter) * GCInterval]
var GCJitter = settings.RegisterFloatSetting(
"server.sqlliveness.gc_jitter",
"jitter fraction on the duration between attempts to delete extant sessions that have expired",
.15,
func(f float64) error {
if f < 0 || f > 1 {
return errors.Errorf("%f is not in [0, 1]", f)
}
return nil
},
)
// CacheSize is the size of the entries to store in the cache.
// In general this should be larger than the number of nodes in the cluster.
//
// TODO(ajwerner): thread memory monitoring to this level and consider
// increasing the cache size dynamically. The entries are just bytes each so
// this should not be a big deal.
var CacheSize = settings.RegisterIntSetting(
"server.sqlliveness.storage_session_cache_size",
"number of session entries to store in the LRU",
1024)
// Storage implements sqlliveness.Storage.
type Storage struct {
settings *cluster.Settings
stopper *stop.Stopper
clock *hlc.Clock
db *kv.DB
ex sqlutil.InternalExecutor
metrics Metrics
gcInterval func() time.Duration
g singleflight.Group
sd sessiondata.InternalExecutorOverride
newTimer func() timeutil.TimerI
mu struct {
syncutil.Mutex
started bool
// liveSessions caches the current view of expirations of live sessions.
liveSessions *cache.UnorderedCache
// deadSessions caches the IDs of sessions which have not been found. This
// package makes an assumption that a session which is queried at some
// point was alive (otherwise, how would one know the ID to query?).
// Furthermore, this package assumes that once a sessions no longer exists,
// it will never exist again in the future.
deadSessions *cache.UnorderedCache
}
}
// NewTestingStorage constructs a new storage with control for the database
// in which the `sqlliveness` table should exist.
func NewTestingStorage(
stopper *stop.Stopper,
clock *hlc.Clock,
db *kv.DB,
ie sqlutil.InternalExecutor,
settings *cluster.Settings,
database string,
newTimer func() timeutil.TimerI,
) *Storage {
s := &Storage{
settings: settings,
stopper: stopper,
clock: clock,
db: db,
ex: ie,
sd: sessiondata.InternalExecutorOverride{
User: security.NodeUserName(),
Database: database,
},
newTimer: newTimer,
gcInterval: func() time.Duration {
baseInterval := GCInterval.Get(&settings.SV)
jitter := GCJitter.Get(&settings.SV)
frac := 1 + (2*rand.Float64()-1)*jitter
return time.Duration(frac * float64(baseInterval.Nanoseconds()))
},
metrics: makeMetrics(),
}
cacheConfig := cache.Config{
Policy: cache.CacheLRU,
ShouldEvict: func(size int, key, value interface{}) bool {
return size > int(CacheSize.Get(&settings.SV))
},
}
s.mu.liveSessions = cache.NewUnorderedCache(cacheConfig)
s.mu.deadSessions = cache.NewUnorderedCache(cacheConfig)
return s
}
// NewStorage creates a new storage struct.
func NewStorage(
stopper *stop.Stopper,
clock *hlc.Clock,
db *kv.DB,
ie sqlutil.InternalExecutor,
settings *cluster.Settings,
) *Storage {
return NewTestingStorage(stopper, clock, db, ie, settings, "system",
timeutil.DefaultTimeSource{}.NewTimer)
}
// Metrics returns the associated metrics struct.
func (s *Storage) Metrics() *Metrics {
return &s.metrics
}
// Start runs the delete sessions loop.
func (s *Storage) Start(ctx context.Context) {
s.mu.Lock()
defer s.mu.Unlock()
if s.mu.started {
return
}
_ = s.stopper.RunAsyncTask(ctx, "slstorage", s.deleteSessionsLoop)
s.mu.started = true
}
// IsAlive determines whether a given session is alive. If this method returns
// true, the session may no longer be alive, but if it returns false, the
// session definitely is not alive.
func (s *Storage) IsAlive(ctx context.Context, sid sqlliveness.SessionID) (alive bool, err error) {
s.mu.Lock()
if !s.mu.started {
s.mu.Unlock()
return false, sqlliveness.NotStartedError
}
if _, ok := s.mu.deadSessions.Get(sid); ok {
s.mu.Unlock()
s.metrics.IsAliveCacheHits.Inc(1)
return false, nil
}
var prevExpiration hlc.Timestamp
if expiration, ok := s.mu.liveSessions.Get(sid); ok {
expiration := expiration.(hlc.Timestamp)
// The record exists and is valid.
if s.clock.Now().Less(expiration) {
s.mu.Unlock()
s.metrics.IsAliveCacheHits.Inc(1)
return true, nil
}
// The record exists in the cache but seems expired according to our clock.
// If we returned that the session was alive regardless of the expiration
// then we'd never update the cache. Go fetch the session and pass in the
// current view of the expiration. If the expiration has not changed, then
// the session is expired and should be deleted. If it has, get the new
// expiration for the cache.
prevExpiration = expiration
}
// Launch singleflight to go read from the database and maybe delete the
// entry. If it is found, we can add it and its expiration to the liveSessions
// cache. If it isn't found, we know it's dead and we can add that to the
// deadSessions cache.
resChan, _ := s.g.DoChan(string(sid), func() (interface{}, error) {
// store the result underneath the singleflight to avoid the need
// for additional synchronization.
live, expiration, err := s.deleteOrFetchSession(ctx, sid, prevExpiration)
if err != nil {
return nil, err
}
s.mu.Lock()
defer s.mu.Unlock()
if live {
s.mu.liveSessions.Add(sid, expiration)
} else {
s.mu.deadSessions.Del(sid)
s.mu.deadSessions.Add(sid, nil)
}
return live, nil
})
s.mu.Unlock()
res := <-resChan
if res.Err != nil {
return false, err
}
s.metrics.IsAliveCacheMisses.Inc(1)
return res.Val.(bool), nil
}
// deleteOrFetchSession returns whether the query session currently exists by
// reading from the database. If passed expiration is non-zero and the existing
// record has the same expiration, the record will be deleted and false will
// be returning, indicating that it no longer exists. If the record exists and
// has a differring expiration timestamp, true and the associated expiration
// will be returned.
func (s *Storage) deleteOrFetchSession(
ctx context.Context, sid sqlliveness.SessionID, prevExpiration hlc.Timestamp,
) (alive bool, expiration hlc.Timestamp, err error) {
if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
row, err := s.ex.QueryRowEx(ctx, "fetch-single-session", txn, s.sd, `
SELECT expiration FROM sqlliveness WHERE session_id = $1
`, sid.UnsafeBytes())
if err != nil {
return err
}
// The session is not alive.
if row == nil {
return nil
}
// The session is alive if the read expiration differs from prevExpiration.
expiration, err = tree.DecimalToHLC(&row[0].(*tree.DDecimal).Decimal)
if err != nil {
return errors.Wrapf(err, "failed to parse expiration for session")
}
if !expiration.Equal(prevExpiration) {
alive = true
return nil
}
// The session is expired and needs to be deleted.
expiration = hlc.Timestamp{}
_, err = s.ex.ExecEx(ctx, "delete-expired-session", txn, s.sd, `
DELETE FROM sqlliveness WHERE session_id = $1
`, sid.UnsafeBytes())
return err
}); err != nil {
return false, hlc.Timestamp{}, errors.Wrapf(err,
"could not query session id: %s", sid)
}
return alive, expiration, nil
}
// deleteSessionsLoop is launched in start and periodically deletes sessions.
func (s *Storage) deleteSessionsLoop(ctx context.Context) {
ctx, cancel := s.stopper.WithCancelOnQuiesce(ctx)
defer cancel()
t := s.newTimer()
t.Reset(s.gcInterval())
for {
select {
case <-ctx.Done():
return
case <-t.Ch():
t.MarkRead()
s.deleteExpiredSessions(ctx)
t.Reset(s.gcInterval())
}
}
}
// TODO(ajwerner): find a way to utilize this table scan to update the
// expirations stored in the in-memory cache or remove it altogether. As it
// stand, this scan will run more frequently than sessions expire but it won't
// propagate that fact to IsAlive. It seems like the lazy session deletion
// which has been added should be sufficient to delete expired sessions which
// matter. This would closer align with the behavior in node-liveness.
func (s *Storage) deleteExpiredSessions(ctx context.Context) {
now := s.clock.Now()
row, err := s.ex.QueryRowEx(ctx, "delete-sessions", nil /* txn */, s.sd,
`
WITH deleted_sessions AS (
DELETE FROM sqlliveness
WHERE expiration < $1
RETURNING session_id
)
SELECT count(*)
FROM deleted_sessions;`,
tree.TimestampToDecimalDatum(now),
)
if err != nil {
if ctx.Err() == nil {
log.Errorf(ctx, "could not delete expired sessions: %+v", err)
}
return
}
if row == nil {
if ctx.Err() == nil {
log.Error(ctx, "could not delete expired sessions")
}
return
}
deleted := int64(*row[0].(*tree.DInt))
s.metrics.SessionDeletionsRuns.Inc(1)
s.metrics.SessionsDeleted.Inc(deleted)
if log.V(2) || deleted > 0 {
log.Infof(ctx, "deleted %d expired SQL liveness sessions", deleted)
}
}
// Insert inserts the input Session in table `system.sqlliveness`.
// A client must never call this method with a session which was previously
// used! The contract of IsAlive is that once a session becomes not alive, it
// must never become alive again.
func (s *Storage) Insert(
ctx context.Context, sid sqlliveness.SessionID, expiration hlc.Timestamp,
) (err error) {
if err := s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
_, err := s.ex.QueryRowEx(
ctx, "insert-session", txn, s.sd,
`INSERT INTO sqlliveness VALUES ($1, $2)`,
sid.UnsafeBytes(), tree.TimestampToDecimalDatum(expiration),
)
return err
}); err != nil {
s.metrics.WriteFailures.Inc(1)
return errors.Wrapf(err, "could not insert session %s", sid)
}
log.Infof(ctx, "inserted sqlliveness session %s", sid)
s.metrics.WriteSuccesses.Inc(1)
return nil
}
// Update updates the row in table `system.sqlliveness` with the given input if
// if the row exists and in that case returns true. Otherwise it returns false.
func (s *Storage) Update(
ctx context.Context, sid sqlliveness.SessionID, expiration hlc.Timestamp,
) (sessionExists bool, err error) {
err = s.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
data, err := s.ex.QueryRowEx(
ctx, "update-session", txn, s.sd, `
UPDATE sqlliveness SET expiration = $1 WHERE session_id = $2 RETURNING session_id`,
tree.TimestampToDecimalDatum(expiration), sid.UnsafeBytes(),
)
if err != nil {
return err
}
sessionExists = data != nil
return nil
})
if err != nil || !sessionExists {
s.metrics.WriteFailures.Inc(1)
}
if err != nil {
return false, errors.Wrapf(err, "could not update session %s", sid)
}
s.metrics.WriteSuccesses.Inc(1)
return sessionExists, nil
}
|
package lru
func (c *Cache) GetCalltracking(phones []RealPhone) (phonesInCache map[RealPhone]VirtualPhone, phonesNotFoundInCache []RealPhone) {
phonesInCache = make(map[RealPhone]VirtualPhone, len(phones))
phonesNotFoundInCache = make([]RealPhone, 0, len(phones))
if !config.calltracking.enabled {
phonesNotFoundInCache = phones
return
}
for _, realPhone := range phones {
value, err := c.calltracking.Get(realPhone)
if err == nil {
phonesInCache[realPhone] = value.(VirtualPhone)
continue
}
phonesNotFoundInCache = append(phonesNotFoundInCache, realPhone)
}
return
}
func (c *Cache) SetCalltracking(realPhone RealPhone, virtualPhone VirtualPhone) error {
if config.calltracking.enabled {
return c.calltracking.SetWithExpire(realPhone, virtualPhone, config.calltracking.ttl)
}
return nil
}
func (c *Cache) Stat() float64 {
return c.calltracking.HitRate()
}
func (c *Cache) Len() int {
return c.calltracking.Len(false)
}
|
package lc
import "math"
// Time: O(n)
// Benchmark 4ms 3.1mb | 100%
func findNumbers(nums []int) int {
var c, digits int
for _, n := range nums {
digits = int(math.Log10(float64(n))) + 1
if digits%2 == 0 {
c++
}
}
return c
}
|
package main
import (
"flag"
"fmt"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
cliFlag "k8s.io/component-base/cli/flag"
"k8s.io/klog"
"multidim-pod-autoscaler/pkg/admission/config"
"multidim-pod-autoscaler/pkg/admission/logic"
podPatch "multidim-pod-autoscaler/pkg/admission/pod/patch"
admissionUtil "multidim-pod-autoscaler/pkg/admission/util"
clientSet "multidim-pod-autoscaler/pkg/client/clientset/versioned"
"multidim-pod-autoscaler/pkg/target"
"multidim-pod-autoscaler/pkg/util"
"multidim-pod-autoscaler/pkg/util/limitrange"
metricsUtil "multidim-pod-autoscaler/pkg/util/metrics"
mpaUtil "multidim-pod-autoscaler/pkg/util/mpa"
"multidim-pod-autoscaler/pkg/util/recommendation"
"net/http"
"os"
"time"
)
const (
// 默认的cache再同步时间间隔
defaultResyncPeriod = 10 * time.Minute
)
var (
certsConfiguration = &config.CertsConfig{
ClientCaFile: flag.String("client-ca-file", "/etc/mpa-tls-certs/caCert.pem", "CA证书的路径"),
TlsCertFile: flag.String("tls-cert-file", "/etc/mpa-tls-certs/serverCert.pem", "server证书的路径"),
TlsPrivateKey: flag.String("tls-private-key", "/etc/mpa-tls-certs/serverKey.pem", "server秘钥的路径"),
}
port = flag.Int("port", 8000, "webhook server 监听的端口号")
prometheusAddress = flag.String("address", ":8944", "Prometheus metrics对外暴露的地址")
kubeconfig = flag.String("kubeconfig", "", "Path to kubeconfig. 使用out-cluster配置时指定")
kubeApiQps = flag.Float64("kube-api-qps", 5.0, "访问API-Server的 QPS 限制")
kubeApiBurst = flag.Int("kube-api-burst", 10, "访问API-Server的 QPS 峰值限制")
namespace = os.Getenv("NAMESPACE")
serviceName = flag.String("webhook-service", "mpa-webhook", "当不使用url注册webhook时,需要指定webhook的服务名")
webhookTimeout = flag.Int("webhook-timeout-seconds", 30, "API-Server等待webhook响应的超时时间")
mpaObjectNamespace = flag.String("mpa-object-namespace", corev1.NamespaceAll, "搜索MPA Objects的命名空间")
)
func main() {
klog.InitFlags(nil)
cliFlag.InitFlags()
klog.V(1).Info("Multidim Pod Autoscaler(%s) Admission Controller", mpaUtil.MultidimPodAutoscalerVersion)
// 初始化 prometheus metrics
metricsUtil.InitializeMetrics(*prometheusAddress)
// 注册 admission controller用到的 metrics tools
admissionUtil.RegisterMetrics()
// 初始化 tls 证书配置
certs := config.InitCerts(*certsConfiguration)
// 创建kubeconfig
kubeconfig := util.CreateKubeConfig(*kubeconfig, float32(*kubeApiQps), *kubeApiBurst)
// 创建 mpa lister(获取所有mpa对象)
mpaClientset := clientSet.NewForConfigOrDie(kubeconfig)
mpaLister := mpaUtil.NewMpasLister(mpaClientset, *mpaObjectNamespace, make(chan struct{}))
// 创建informerFactory 及 mpa target ref选择器 fetcher
kubeClient := kubernetes.NewForConfigOrDie(kubeconfig)
informerFactory := informers.NewSharedInformerFactory(kubeClient, defaultResyncPeriod)
mpaTargetSelectorFetcher := target.NewMpaTargetSelectorFetcher(kubeconfig, kubeClient, informerFactory)
// 创建 recommendation 获取器
limitRangeCalculator, err := limitrange.NewCalculator(informerFactory)
if err != nil {
klog.Errorf("failed to create limitRangeCalculator, err: %v", err)
}
recommedendationProcessor := recommendation.NewProcessor(limitRangeCalculator)
recommendationProvider := admissionUtil.NewRecommendationProvider(limitRangeCalculator, recommedendationProcessor)
// 创建mpa matcher & patchesCalculators
mpaMatcher := mpaUtil.NewMatcher(mpaLister, mpaTargetSelectorFetcher)
patchesCalculators := []admissionUtil.PatchCalculator{
podPatch.NewObservedPodPatchCalculator(),
podPatch.NewResourceUpdatesPatchCalculator(recommendationProvider),
}
admissionServer := logic.NewAdmissionServer(mpaMatcher, patchesCalculators)
http.HandleFunc("/", admissionServer.Serve)
webhookServer := &http.Server{
Addr: fmt.Sprintf(":%d", *port),
TLSConfig: config.ConfigTLS(kubeClient, certs.ServerCert, certs.ServerKey),
}
go config.WebhookRegistration(kubeClient, certs.CaCert, namespace, *serviceName, "", false, int32(*webhookTimeout))
webhookServer.ListenAndServeTLS("", "")
}
|
/*
* Flow CLI
*
* Copyright 2019-2021 Dapper Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package accounts
import (
"fmt"
"github.com/spf13/cobra"
"github.com/onflow/flow-cli/internal/command"
"github.com/onflow/flow-cli/pkg/flowcli/services"
)
type flagsRemoveContract struct {
Signer string `default:"emulator-account" flag:"signer" info:"Account name from configuration used to sign the transaction"`
Results bool `default:"false" flag:"results" info:"⚠️ Deprecated: results are provided by default"`
}
var flagsRemove = flagsRemoveContract{}
var RemoveCommand = &command.Command{
Cmd: &cobra.Command{
Use: "remove-contract <name>",
Short: "Remove a contract deployed to an account",
Example: `flow accounts remove-contract FungibleToken`,
Args: cobra.ExactArgs(1),
},
Flags: &flagsRemove,
Run: func(
cmd *cobra.Command,
args []string,
globalFlags command.GlobalFlags,
services *services.Services,
) (command.Result, error) {
if flagsRemove.Results {
fmt.Println("⚠️ DEPRECATION WARNING: results flag is deprecated, results are by default included in all executions")
}
account, err := services.Accounts.RemoveContract(
args[0], // name
flagsRemove.Signer,
)
if err != nil {
return nil, err
}
return &AccountResult{
Account: account,
showCode: false,
}, nil
},
}
|
package forum
import (
"github.com/kil0meters/acolyte/pkg/authorization"
"github.com/kil0meters/acolyte/pkg/database"
"log"
"time"
)
type Comment struct {
ID string `db:"comment_id" valid:"printableascii,required"`
Account *authorization.Account `db:"-" valid:"-"`
AccountID string `db:"account_id" valid:"-"`
Username string `db:"username" valid:"-"`
ParentID string `db:"parent_id" valid:"-"`
PostID string `db:"post_id" valid:"_"`
Body string `db:"body" valid:"type(string),optional"`
CreatedAt time.Time `db:"created_at" valid:"-"`
Removed bool `db:"removed" valid:"-"`
Upvotes int `db:"upvotes" valid:"-"`
Downvotes int `db:"downvotes" valid:"-"`
HasMoreChildren bool `db:"-" valid:"-"`
Replies []*Comment `db:"-" valid:"-"`
}
func CreateComment(account *authorization.Account, parentID string, postID string, body string) (string, error) {
commentID := authorization.GenerateID("c", 6)
_, err := database.DB.Exec("INSERT INTO comments (comment_id, parent_id, post_id, account_id, username, body) VALUES ($1, $2, $3, $4, $5, $6)", commentID, parentID, postID, account.ID, account.Username, body)
if err != nil {
return "", err
}
return commentID, nil
}
func CommentsFromUsername(username string, depth int) []*Comment {
rows, err := database.DB.Queryx("SELECT * FROM comments WHERE username = $1 ORDER BY created_at DESC", username)
if err != nil {
log.Println(err)
return nil
}
comments := make([]*Comment, 0)
for rows.Next() {
comment := new(Comment)
comment.HasMoreChildren = false
err = rows.StructScan(comment)
if err != nil {
log.Println(err)
return nil
}
if depth != 1 {
comment.Replies = GetCommentChildren(comment.ID, depth-1)
} else {
comment.HasMoreChildren = true
}
comments = append(comments, comment)
}
return comments
}
func GetComment(commentID string, depth int) *Comment {
if depth == 0 {
return nil
}
comment := new(Comment)
err := database.DB.QueryRowx("SELECT * FROM comments WHERE comment_id = $1", commentID).StructScan(comment)
if err != nil {
log.Println(err)
return nil
}
comment.Replies = GetCommentChildren(commentID, depth)
return comment
}
func GetCommentChildren(commentID string, depth int) []*Comment {
log.Println("Getting children of", commentID, "at depth", depth)
if depth <= 0 {
return nil
}
rows, err := database.DB.Queryx("SELECT * FROM comments WHERE parent_id = $1", commentID)
if err != nil {
log.Println(err)
return nil
}
comments := make([]*Comment, 0)
for rows.Next() {
comment := new(Comment)
comment.HasMoreChildren = false
err = rows.StructScan(comment)
if err != nil {
log.Println(err)
return nil
}
if depth != 1 {
comment.Replies = GetCommentChildren(comment.ID, depth-1)
} else {
comment.HasMoreChildren = true
}
comments = append(comments, comment)
}
return comments
}
|
package config
import (
"github.com/kosotd/go-microservice-skeleton/config"
"gotest.tools/assert"
"testing"
)
type testConfig struct {
config config.Config
}
func (c *testConfig) GetBaseConfig() *config.Config {
return &c.config
}
func TestConfigEnv(t *testing.T) {
config.InitConfig(&testConfig{}, func(helper config.EnvHelper) {})
assert.Equal(t, "9090", config.GetConfig().ServerPort)
assert.Equal(t, "1s", config.GetConfig().CacheExpiration)
assert.Equal(t, "2s", config.GetConfig().CacheUpdatePeriod)
}
|
package openrtb_ext
import (
"errors"
"github.com/buger/jsonparser"
)
// ExtSite defines the contract for bidrequest.site.ext
type ExtSite struct {
// AMP should be 1 if the request comes from an AMP page, and 0 if not.
AMP int8 `json:"amp"`
}
func (es *ExtSite) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
return errors.New("request.site.ext must have some data in it")
}
if value, dataType, _, _ := jsonparser.Get(b, "amp"); (dataType != jsonparser.NotExist && dataType != jsonparser.Number) || (len(value) != 1) {
return errors.New(`request.site.ext.amp must be either 1, 0, or undefined`)
} else {
switch value[0] {
case byte(48): // 0
es.AMP = 0
case byte(49): // 1
es.AMP = 1
default:
return errors.New(`request.site.ext.amp must be either 1, 0, or undefined`)
}
}
return nil
}
|
package testutil
import (
"github.com/codeskyblue/go-sh"
"github.com/stretchr/testify/require"
"testing"
)
func MustRun(t *testing.T, name string, a ...interface{}) {
require.Nil(t, sh.Command(name, a...).Run())
}
|
package reader
import (
"adventure_book/model"
"encoding/json"
"os"
)
func ReadJsonStory(filename string) (storyRes model.Story, err error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
d := json.NewDecoder(file)
if err := d.Decode(&storyRes); err != nil {
return nil, err
}
return storyRes, nil
}
|
package models
type Following struct {
Username string
}
|
package request
import (
"github.com/z-ray/alipay/api/response"
)
// AlipayMobilePublicMessageCustomSendRequest
// API: alipay.mobile.public.message.caustom.send request
type AlipayMobilePublicMessageCustomSendRequest struct {
BizContent string
}
func (r *AlipayMobilePublicMessageCustomSendRequest) GetApiMethod() string {
return "alipay.mobile.public.message.custom.send"
}
func (r *AlipayMobilePublicMessageCustomSendRequest) GetTextParams() map[string]string {
params := make(map[string]string)
params["biz_content"] = r.BizContent
//TODO 提供一个用户设置参数的接口
//utils.putAll(params,userParams)
return params
}
func (r *AlipayMobilePublicMessageCustomSendRequest) GetResponse() response.AlipayResponse {
resp := new(response.AlipayMobilePublicMessageCustomSendResponse)
// 类名,在获取结果时有用
resp.Name = "AlipayMobilePublicMessageCustomSendResponse"
return resp
}
func (r *AlipayMobilePublicMessageCustomSendRequest) GetApiVersion() string {
return "1.0"
}
|
package todo
import (
"github.com/codegangsta/cli"
)
func Commands() []cli.Command {
return []cli.Command{
{
Name: "add",
Usage: "Add a new todo item.",
Action: AddAction,
},
{
Name: "list",
Usage: "List all active todo items.",
Action: ListAction,
},
{
Name: "show",
Usage: "Show a todo item by id",
Action: ShowAction,
},
{
Name: "remove",
Usage: "Remove a todo item by id",
Action: RemoveAction,
},
}
}
|
package torrent
const (
// Base API Endpoint
ENDPOINT_API = "%s/api/v2"
// Authentication Endpoints
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#authentication
ENDPOINT_AUTHENTICATION = ENDPOINT_API + "/auth/"
// [GET] Login
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#login
ENDPOINT_AUTHENTICATION_LOGIN = ENDPOINT_AUTHENTICATION + "login"
// [GET] Logout
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#logout
ENDPOINT_AUTHENTICATION_LOGOUT = ENDPOINT_AUTHENTICATION + "logout"
// Application endpoints
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#application
ENDPOINT_APPLICATION = ENDPOINT_API + "/app/"
// [GET] Get application version
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-application-version
ENDPOINT_APPLICATION_VERSION = ENDPOINT_APPLICATION + "version"
// [GET] Get API version
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-api-version
ENDPOINT_APPLICATION_API_VERSION = ENDPOINT_APPLICATION + "webapiVersion"
// [GET] Get build info
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-build-info
ENDPOINT_APPLICATION_BUILD_INFO = ENDPOINT_APPLICATION + "buildInfo"
// [GET] Shutdown application
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#shutdown-application
ENDPOINT_APPLICATION_SHUTDOWN = ENDPOINT_APPLICATION + "shutdown"
// [GET] Get application preferences
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-application-preferences
ENDPOINT_APPLICATION_PREFERENCES = ENDPOINT_APPLICATION + "preferences"
// [POST] Set application preferences
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#set-application-preferences
ENDPOINT_APPLICATION_SET_PREFERENCES = ENDPOINT_APPLICATION + "setPreferences"
// [GET] Get default save path
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-default-save-path
ENDPOINT_APPLICATION_DEFAULT_SAVE_PATH = ENDPOINT_APPLICATION + "defaultSavePath"
// Log Endpoints
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#log
ENDPOINT_LOG = ENDPOINT_API + "/log/"
// [GET] Get log
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-log
ENDPOINT_LOG_GET = ENDPOINT_LOG + "main"
// [GET] Get peer log
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-peer-log
ENDPOINT_LOG_PEERS = ENDPOINT_LOG + "peers"
// Sync Endpoints
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#sync
ENDPOINT_SYNC = ENDPOINT_API + "/sync/"
// [GET] Get main data
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-main-data
ENDPOINT_SYNC_MAIN = ENDPOINT_SYNC + "maindata"
// [GET] Get torrent peers data
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-peers-data
ENDPOINT_SYNC_TORRENT_PEERS = ENDPOINT_SYNC + "torrentPeers"
// Transfer info
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#transfer-info
ENDPOINT_TRANSFER = ENDPOINT_API + "transfer"
// [GET] Get global transfer info
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-global-transfer-info
ENDPOINT_TRANSFER_INFO = ENDPOINT_TRANSFER + "info"
// [GET] Get alternative speed limits state
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-alternative-speed-limits-state
ENDPOINT_TRANSFER_ALT_SPEED_LIMITS = ENDPOINT_TRANSFER + "speedLimitsMode"
// [GET] Toggle alternative speed limits
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#toggle-alternative-speed-limits
ENDPOINT_TRANSFER_TOGGLE_SPEED_LIMITS = ENDPOINT_TRANSFER + "toggleSpeedLimitsMode"
// [GET] Get global download limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-global-download-limit
ENDPOINT_TRANSFER_GET_DOWNLOAD_LIMIT = ENDPOINT_TRANSFER + "downloadLimit"
// [POST] Set global download limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#set-global-download-limit
ENDPOINT_TRANSFER_SET_DOWNLOAD_LIMIT = ENDPOINT_TRANSFER + "setDownloadLimit"
// [GET] Get global upload limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-global-upload-limit
ENDPOINT_TRANSFER_GET_UPLOAD_LIMIT = ENDPOINT_TRANSFER + "uploadLimit"
// [POST] Set global upload limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#set-global-upload-limit
ENDPOINT_TRANSFER_SET_UPLOAD_LIMIT = ENDPOINT_TRANSFER + "setUploadLimit"
// [ANY] Ban peers
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#ban-peers
ENDPOINT_TRANSFER_BAN_PEERS = ENDPOINT_TRANSFER + "banPeers"
// Torrent Management Endpoint
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#torrent-management
ENDPOINT_TORRENT = ENDPOINT_API + "/torrents/"
// Get torrent list
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-list
ENDPOINT_TORRENT_LIST = ENDPOINT_TORRENT + "info"
// [GET] Get torrent generic properties
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-generic-properties
ENDPOINT_TORRENT_PROPERTIES = ENDPOINT_TORRENT + "properties"
// [GET] Get torrent trackers
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-trackers
ENDPOINT_TORRENT_TRACKERS = ENDPOINT_TORRENT + "trackers"
// [GET] Get torrent web seeds
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-web-seeds
ENDPOINT_TORRENT_WEB_SEEDS = ENDPOINT_TORRENT + "webseeds"
// [GET] Get torrent contents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-contents
ENDPOINT_TORRENT_CONTENTS = ENDPOINT_TORRENT + "files"
// [GET] Get torrent pieces' states
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-pieces-states
ENDPOINT_TORRENT_PIECES_STATES = ENDPOINT_TORRENT + "pieceStates"
// [GET] Get torrent pieces' hashes
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-pieces-hashes
ENDPOINT_TORRENT_PIECES_HASHES = ENDPOINT_TORRENT + "pieceHashes"
// [POST] Pause torrents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#pause-torrents
ENDPOINT_TORRENT_PAUSE = ENDPOINT_TORRENT + "pause"
// [POST] Resume torrents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#resume-torrents
ENDPOINT_TORRENT_RESUME = ENDPOINT_TORRENT + "resume"
// [POST] Delete torrents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#delete-torrents
ENDPOINT_TORRENT_DELETE = ENDPOINT_TORRENT + "delete"
// [POST] Recheck torrents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#recheck-torrents
ENDPOINT_TORRENT_RECHECK = ENDPOINT_TORRENT + "recheck"
// [POST] Reannounce torrents
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#reannounce-torrents
ENDPOINT_TORRENT_REANNOUNCE = ENDPOINT_TORRENT + "reannounce"
// [POST] Add new torrent
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#add-new-torrent
ENDPOINT_TORRENT_CREATE = ENDPOINT_TORRENT + "add"
// [POST] Add trackers to torrent
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#add-trackers-to-torrent
ENDPOINT_TORRENT_ADD_TRACKERS = ENDPOINT_TORRENT + "addTrackers"
// [POST] Edit trackers
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#edit-trackers
ENDPOINT_TORRENT_EDIT_TRACKERS = ENDPOINT_TORRENT + "editTracker"
// [POST] Remove trackers
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#remove-trackers
ENDPOINT_TORRENT_REMOVE_TRACKERS = ENDPOINT_TORRENT + "removeTrackers"
// [POST] Add peers
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#add-peers
ENDPOINT_TORRENT_ADD_PEERS = ENDPOINT_TORRENT + "addPeers"
// [POST] Increase torrent priority
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#increase-torrent-priority
ENDPOINT_TORRENT_INCREASE_PRIORITY = ENDPOINT_TORRENT + "increasePrio"
// [POST] Decrease torrent priority
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#decrease-torrent-priority
ENDPOINT_TORRENT_DECREASE_PRIORITY = ENDPOINT_TORRENT + "decreasePrio"
// [POST] Maximal torrent priority
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#maximal-torrent-priority
ENDPOINT_TORRENT_MAX_PRIORITY = ENDPOINT_TORRENT + "topPrio"
// [POST] Minimal torrent priority
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#minimal-torrent-priority
ENDPOINT_TORRENT_MIN_PRIORITY = ENDPOINT_TORRENT + "bottomPrio"
// [POST] Set file priority
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#set-file-priority
ENDPOINT_TORRENT_SET_FILE_PRIORITY = ENDPOINT_TORRENT + "filePrio"
// [POST] Get torrent download limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#get-torrent-download-limit
ENDPOINT_TORRENT_GET_DOWNLOAD_LIMIT = ENDPOINT_TORRENT + "downloadLimit"
// [POST] Set torrent download limit
// https://github.com/qbittorrent/qBittorrent/wiki/Web-API-Documentation#set-torrent-download-limit
ENDPOINT_TORRENT_SET_DOWNLOAD_LIMIT = ENDPOINT_TORRENT + "setDownloadLimit"
)
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package firmware
import (
"bufio"
"context"
"io"
"os"
"strings"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/bundles/cros/firmware/fwupd"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: FwupdInhibitSuspend,
Desc: "Ensures .lock file does not exist before, after update, does exist during",
Contacts: []string{
"binarynewts@google.org", // Test Author
"chromeos-fwupd@google.com", // CrOS FWUPD
},
Attr: []string{"group:mainline", "informational"},
SoftwareDeps: []string{"fwupd"},
HardwareDeps: hwdep.D(
hwdep.Battery(), // Test doesn't run on ChromeOS devices without a battery.
hwdep.ChromeEC(), // Test requires Chrome EC to set battery to charge via ectool.
),
Timeout: fwupd.ChargingStateTimeout + 1*time.Minute,
})
}
// streamOutput sends back messages as they occur
func streamOutput(rc io.ReadCloser) <-chan string {
ch := make(chan string)
scanner := bufio.NewScanner(rc)
go func() {
for scanner.Scan() {
if s := scanner.Text(); s != "" {
ch <- s
}
}
close(ch)
}()
return ch
}
// FwupdInhibitSuspend runs the fwupdtool utility and makes sure
// that the system can suspend before and after, but not during an update.
func FwupdInhibitSuspend(ctx context.Context, s *testing.State) {
// make sure file does not exist before update
if _, err := os.Stat("/run/lock/power_override/fwupd.lock"); err == nil {
s.Fatal("System cannot suspend but no update has started")
}
// make sure dut battery is charging/charged
if cleanup, err := fwupd.SetFwupdChargingState(ctx, true); err != nil {
s.Fatal("Failed to set charging state: ", err)
} else {
defer func() {
if err := cleanup(ctx); err != nil {
s.Fatal("Failed to cleanup: ", err)
}
}()
}
// run the update
cmd := testexec.CommandContext(ctx, "/usr/bin/fwupdmgr", "install", "--allow-reinstall", "-v", fwupd.ReleaseURI)
cmd.Env = append(os.Environ(), "CACHE_DIRECTORY=/var/cache/fwupd")
stdout, err := cmd.StdoutPipe()
if err != nil {
s.Fatalf("%q failed: %v", cmd.Args, err)
}
// watch output until update begins write phase
outch := streamOutput(stdout)
defer func() {
for range outch {
}
}()
if err := cmd.Start(); err != nil {
s.Fatalf("%q failed: %v", cmd.Args, err)
}
defer func() {
cmd.Kill()
cmd.Wait()
}()
// ensure write phase entered; stop reading output at this point
write := false
for str := range outch {
if strings.Contains(str, "Emitting ::status-changed() [device-write]") {
write = true
break
}
}
if !write {
s.Fatal("Write phase not entered by fwupd")
}
// ensure that file exists during update
if _, err := os.Stat("/run/lock/power_override/fwupd.lock"); os.IsNotExist(err) {
s.Fatal("System can suspend but update is in progress")
}
// make sure that file does not exist after update completed
cmd.Wait()
if _, err := os.Stat("/run/lock/power_override/fwupd.lock"); err == nil {
s.Fatal("System cannot suspend but update has finished")
}
}
|
package main
import "fmt"
import "sync"
import "time"
var wg sync.WaitGroup
func main() {
wg.Add(2)
go foo()
go bar()
wg.Wait()
}
func foo() {
for i := 0; i < 45; i++ {
fmt.Println("Foo: ", i)
time.Sleep(time.Duration(3 * time.Millisecond))
}
wg.Done()
}
func bar() {
for i := 0; i < 45; i++ {
fmt.Println("bar: ", i)
time.Sleep(time.Duration(20 * time.Millisecond))
}
wg.Done()
}
// Executing concurrent method you need to define wait group and add the number of method to it and
//ask the wait group to wait. Also you need to put waitgroup.Done in those method to take those method from wait group once completed.
//when wg.done is excecuted it will take that method from wait group. wg.wait becomes 0 , the program will execute the main func
// concurrency - idependenty executing lots of things at once
// parallelism - simulatenous execution of (possibly related) computation
//go run -race main.go ---> to check the race condition
|
package filter
import (
"strconv"
"github.com/layer5io/meshkit/errors"
)
const (
ErrInvalidAuthTokenCode = "1000"
ErrInvalidAPICallCode = "1001"
ErrReadAPIResponseCode = "1002"
ErrUnmarshalCode = "1003"
)
func ErrInvalidAuthToken() error {
return errors.New(ErrInvalidAuthTokenCode, errors.Alert, []string{"authentication token not found. please supply a valid user token with the --token (or -t) flag"}, []string{}, []string{}, []string{})
}
func ErrInvalidAPICall(statusCode int) error {
return errors.New(ErrInvalidAPICallCode, errors.Alert, []string{"Response Status Code ", strconv.Itoa(statusCode), " possible Server Error"}, []string{}, []string{}, []string{})
}
func ErrReadAPIResponse(err error) error {
return errors.New(ErrReadAPIResponseCode, errors.Alert, []string{"failed to read response body"}, []string{err.Error()}, []string{}, []string{})
}
func ErrUnmarshal(err error) error {
return errors.New(ErrUnmarshalCode, errors.Alert, []string{"Error unmarshalling response "}, []string{err.Error()}, []string{}, []string{})
}
|
package csv
import (
"context"
"log"
"testing"
"github.com/go-tamate/tamate"
"github.com/go-tamate/tamate/driver"
"github.com/stretchr/testify/assert"
)
func Test_GetSchema(t *testing.T) {
var (
rootDir = "./"
fileName = "getSchema"
testData = `
(id),name,age
`
)
path := joinPath(rootDir, fileName)
err := createFile(path, testData)
assert.NoError(t, err)
defer func() {
cerr := deleteFile(path)
assert.NoError(t, cerr)
}()
ds, err := tamate.Open(driverName, rootDir)
if assert.NoError(t, err) {
ctx := context.Background()
schema, err := ds.GetSchema(ctx, fileName)
if assert.NoError(t, err) {
columns := schema.Columns
assert.Equal(t, driver.ColumnTypeString, columns[0].Type)
assert.Equal(t, "id", columns[0].Name)
assert.Equal(t, 0, columns[0].OrdinalPosition)
assert.Equal(t, driver.ColumnTypeString, columns[1].Type)
assert.Equal(t, "name", columns[1].Name)
assert.Equal(t, 1, columns[1].OrdinalPosition)
assert.Equal(t, driver.ColumnTypeString, columns[2].Type)
assert.Equal(t, "age", columns[2].Name)
assert.Equal(t, 2, columns[2].OrdinalPosition)
}
}
}
func Test_SetSchema(t *testing.T) {
var (
rootDir = "./"
fileName = "setSchema"
beforeData = `
(id),name,age
`
afterData = `
(id),name,from
`
)
path := joinPath(rootDir, fileName)
err := createFile(path, beforeData)
assert.NoError(t, err)
defer func() {
cerr := deleteFile(path)
assert.NoError(t, cerr)
}()
log.Println(afterData)
}
func Test_GetRows(t *testing.T) {
var (
rootDir = "./"
fileName = "getRows"
testData = `
(id),name,age
1,hana,16
`
)
path := joinPath(rootDir, fileName)
err := createFile(path, testData)
assert.NoError(t, err)
defer func() {
cerr := deleteFile(path)
assert.NoError(t, cerr)
}()
}
func Test_SetRows(t *testing.T) {
var (
rootDir = "./"
fileName = "setRows"
beforeData = `
(id),name,age
1,hana,16
`
afterData = `
(id),name,age
1,tamate,15
`
)
path := joinPath(rootDir, fileName)
err := createFile(path, beforeData)
assert.NoError(t, err)
defer func() {
cerr := deleteFile(path)
assert.NoError(t, cerr)
}()
log.Println(afterData)
}
|
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package clusteragent
import (
"fmt"
"strconv"
securityv1 "github.com/openshift/api/security/v1"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apicommon "github.com/DataDog/datadog-operator/apis/datadoghq/common"
apicommonv1 "github.com/DataDog/datadog-operator/apis/datadoghq/common/v1"
"github.com/DataDog/datadog-operator/apis/datadoghq/v2alpha1"
apiutils "github.com/DataDog/datadog-operator/apis/utils"
"github.com/DataDog/datadog-operator/controllers/datadogagent/common"
"github.com/DataDog/datadog-operator/controllers/datadogagent/component"
"github.com/DataDog/datadog-operator/pkg/controller/utils"
"github.com/DataDog/datadog-operator/pkg/defaulting"
"github.com/DataDog/datadog-operator/pkg/kubernetes/rbac"
)
// NewDefaultClusterAgentDeployment return a new default cluster-agent deployment
func NewDefaultClusterAgentDeployment(dda metav1.Object) *appsv1.Deployment {
deployment := component.NewDeployment(dda, apicommon.DefaultClusterAgentResourceSuffix, GetClusterAgentName(dda), GetClusterAgentVersion(dda), nil)
podTemplate := NewDefaultClusterAgentPodTemplateSpec(dda)
for key, val := range deployment.GetLabels() {
podTemplate.Labels[key] = val
}
for key, val := range deployment.GetAnnotations() {
podTemplate.Annotations[key] = val
}
deployment.Spec.Template = *podTemplate
deployment.Spec.Replicas = apiutils.NewInt32Pointer(apicommon.DefaultClusterAgentReplicas)
return deployment
}
// NewDefaultClusterAgentPodTemplateSpec return a default PodTemplateSpec for the cluster-agent deployment
func NewDefaultClusterAgentPodTemplateSpec(dda metav1.Object) *corev1.PodTemplateSpec {
volumes := []corev1.Volume{
component.GetVolumeInstallInfo(dda),
component.GetVolumeForConfd(),
component.GetVolumeForLogs(),
component.GetVolumeForCertificates(),
// /tmp is needed because some versions of the DCA (at least until
// 1.19.0) write to it.
// In some code paths, the klog lib writes to /tmp instead of using the
// standard datadog logs path.
// In some envs like Openshift, when running as non-root, the pod will
// not have permissions to write on /tmp, that's why we need to mount
// it with write perms.
component.GetVolumeForTmp(),
}
volumeMounts := []corev1.VolumeMount{
component.GetVolumeMountForInstallInfo(),
component.GetVolumeMountForConfd(),
component.GetVolumeMountForLogs(),
component.GetVolumeMountForCertificates(),
component.GetVolumeMountForTmp(),
}
podTemplate := &corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: make(map[string]string),
Annotations: make(map[string]string),
},
Spec: defaultPodSpec(dda, volumes, volumeMounts, defaultEnvVars(dda)),
}
return podTemplate
}
// GetDefaultServiceAccountName return the default Cluster-Agent ServiceAccountName
func GetDefaultServiceAccountName(dda metav1.Object) string {
return fmt.Sprintf("%s-%s", dda.GetName(), apicommon.DefaultClusterAgentResourceSuffix)
}
func defaultPodSpec(dda metav1.Object, volumes []corev1.Volume, volumeMounts []corev1.VolumeMount, envVars []corev1.EnvVar) corev1.PodSpec {
podSpec := corev1.PodSpec{
ServiceAccountName: GetDefaultServiceAccountName(dda),
Containers: []corev1.Container{
{
Name: string(apicommonv1.ClusterAgentContainerName),
Image: fmt.Sprintf("%s/%s:%s", apicommon.DefaultImageRegistry, apicommon.DefaultClusterAgentImageName, defaulting.ClusterAgentLatestVersion),
Ports: []corev1.ContainerPort{
{
ContainerPort: 5005,
Name: "agentport",
Protocol: "TCP",
},
},
Env: envVars,
VolumeMounts: volumeMounts,
Command: nil,
Args: nil,
SecurityContext: &corev1.SecurityContext{
ReadOnlyRootFilesystem: apiutils.NewBoolPointer(true),
AllowPrivilegeEscalation: apiutils.NewBoolPointer(false),
},
},
},
Affinity: DefaultAffinity(),
Volumes: volumes,
// To be uncommented when the cluster-agent Dockerfile will be updated to use a non-root user by default
// SecurityContext: &corev1.PodSecurityContext{
// RunAsNonRoot: apiutils.NewBoolPointer(true),
// },
}
return podSpec
}
func defaultEnvVars(dda metav1.Object) []corev1.EnvVar {
envVars := []corev1.EnvVar{
{
Name: apicommon.DDPodName,
ValueFrom: &corev1.EnvVarSource{
FieldRef: &corev1.ObjectFieldSelector{
FieldPath: "metadata.name",
},
},
},
{
Name: apicommon.DDClusterAgentKubeServiceName,
Value: GetClusterAgentServiceName(dda),
},
{
Name: apicommon.DDLeaderElection,
Value: "true",
},
{
Name: apicommon.DDHealthPort,
Value: strconv.Itoa(int(apicommon.DefaultAgentHealthPort)),
},
}
return envVars
}
// DefaultAffinity returns the pod anti affinity of the cluster agent
// the default anti affinity prefers scheduling the runners on different nodes if possible
// for better checks stability in case of node failure.
func DefaultAffinity() *corev1.Affinity {
return &corev1.Affinity{
PodAntiAffinity: &corev1.PodAntiAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{
{
Weight: 50,
PodAffinityTerm: corev1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
apicommon.AgentDeploymentComponentLabelKey: apicommon.DefaultClusterAgentResourceSuffix,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
},
}
}
// GetDefaultClusterAgentRolePolicyRules returns the default policy rules for the Cluster Agent
// Can be used by the Agent if the Cluster Agent is disabled
func GetDefaultClusterAgentRolePolicyRules(dda metav1.Object) []rbacv1.PolicyRule {
rules := []rbacv1.PolicyRule{}
rules = append(rules, GetLeaderElectionPolicyRule(dda)...)
rules = append(rules, rbacv1.PolicyRule{
APIGroups: []string{rbac.CoreAPIGroup},
Resources: []string{rbac.ConfigMapsResource},
ResourceNames: []string{
common.DatadogClusterIDResourceName,
},
Verbs: []string{rbac.GetVerb, rbac.UpdateVerb, rbac.CreateVerb},
})
return rules
}
// GetDefaultClusterAgentClusterRolePolicyRules returns the default policy rules for the Cluster Agent
// Can be used by the Agent if the Cluster Agent is disabled
func GetDefaultClusterAgentClusterRolePolicyRules(dda metav1.Object) []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{
APIGroups: []string{rbac.CoreAPIGroup},
Resources: []string{
rbac.ServicesResource,
rbac.EventsResource,
rbac.EndpointsResource,
rbac.PodsResource,
rbac.NodesResource,
rbac.ComponentStatusesResource,
rbac.ConfigMapsResource,
rbac.NamespaceResource,
},
Verbs: []string{
rbac.GetVerb,
rbac.ListVerb,
rbac.WatchVerb,
},
},
{
APIGroups: []string{rbac.OpenShiftQuotaAPIGroup},
Resources: []string{rbac.ClusterResourceQuotasResource},
Verbs: []string{rbac.GetVerb, rbac.ListVerb},
},
{
NonResourceURLs: []string{rbac.VersionURL, rbac.HealthzURL},
Verbs: []string{rbac.GetVerb},
},
{
// Horizontal Pod Autoscaling
APIGroups: []string{rbac.AutoscalingAPIGroup},
Resources: []string{rbac.HorizontalPodAutoscalersRecource},
Verbs: []string{rbac.ListVerb, rbac.WatchVerb},
},
{
APIGroups: []string{rbac.CoreAPIGroup},
Resources: []string{rbac.NamespaceResource},
ResourceNames: []string{
common.KubeSystemResourceName,
},
Verbs: []string{rbac.GetVerb},
},
}
}
// GetLeaderElectionPolicyRule returns the policy rules for leader election
func GetLeaderElectionPolicyRule(dda metav1.Object) []rbacv1.PolicyRule {
return []rbacv1.PolicyRule{
{
APIGroups: []string{rbac.CoreAPIGroup},
Resources: []string{rbac.ConfigMapsResource},
ResourceNames: []string{
common.DatadogLeaderElectionOldResourceName, // Kept for backward compatibility with agent <7.37.0
utils.GetDatadogLeaderElectionResourceName(dda),
},
Verbs: []string{rbac.GetVerb, rbac.UpdateVerb},
},
{
APIGroups: []string{rbac.CoreAPIGroup},
Resources: []string{rbac.ConfigMapsResource},
Verbs: []string{rbac.CreateVerb},
},
{
APIGroups: []string{rbac.CoordinationAPIGroup},
Resources: []string{rbac.LeasesResource},
Verbs: []string{rbac.CreateVerb},
},
{
APIGroups: []string{rbac.CoordinationAPIGroup},
Resources: []string{rbac.LeasesResource},
ResourceNames: []string{
utils.GetDatadogLeaderElectionResourceName(dda),
},
Verbs: []string{rbac.GetVerb, rbac.UpdateVerb},
},
}
}
// GetDefaultSCC returns the default SCC for the cluster agent component
func GetDefaultSCC(dda *v2alpha1.DatadogAgent) *securityv1.SecurityContextConstraints {
return &securityv1.SecurityContextConstraints{
Users: []string{
fmt.Sprintf("system:serviceaccount:%s:%s", dda.Namespace, v2alpha1.GetClusterAgentServiceAccount(dda)),
},
Priority: apiutils.NewInt32Pointer(8),
AllowHostPorts: v2alpha1.IsHostNetworkEnabled(dda, v2alpha1.ClusterAgentComponentName),
AllowHostNetwork: v2alpha1.IsHostNetworkEnabled(dda, v2alpha1.ClusterAgentComponentName),
AllowHostDirVolumePlugin: false,
AllowHostIPC: false,
AllowHostPID: false,
// AllowPrivilegeEscalation: false, // unavailable: https://github.com/openshift/api/issues/1281
AllowPrivilegedContainer: false,
FSGroup: securityv1.FSGroupStrategyOptions{
Type: securityv1.FSGroupStrategyMustRunAs,
},
ReadOnlyRootFilesystem: false,
RequiredDropCapabilities: []corev1.Capability{
"KILL",
"MKNOD",
"SETUID",
"SETGID",
},
RunAsUser: securityv1.RunAsUserStrategyOptions{
Type: securityv1.RunAsUserStrategyMustRunAsRange,
},
SELinuxContext: securityv1.SELinuxContextStrategyOptions{
Type: securityv1.SELinuxStrategyMustRunAs,
},
SupplementalGroups: securityv1.SupplementalGroupsStrategyOptions{
Type: securityv1.SupplementalGroupsStrategyRunAsAny,
},
Volumes: []securityv1.FSType{
securityv1.FSTypeConfigMap,
securityv1.FSTypeDownwardAPI,
securityv1.FSTypeEmptyDir,
securityv1.FSTypePersistentVolumeClaim,
securityv1.FSProjected,
securityv1.FSTypeSecret,
},
}
}
|
package httpexpect
import (
"testing"
"github.com/gorilla/websocket"
)
func TestWebsocketFailed(t *testing.T) {
chain := makeChain(newMockReporter(t))
chain.fail("fail")
ws := &Websocket{
chain: chain,
}
ws.chain.assertFailed(t)
ws.Raw()
ws.WithReadTimeout(0)
ws.WithoutReadTimeout()
ws.WithWriteTimeout(0)
ws.WithoutWriteTimeout()
ws.Subprotocol().chain.assertFailed(t)
ws.Expect().chain.assertFailed(t)
ws.WriteMessage(websocket.TextMessage, []byte("a"))
ws.WriteBytesBinary([]byte("a"))
ws.WriteBytesText([]byte("a"))
ws.WriteText("a")
ws.WriteJSON(map[string]string{"a": "b"})
ws.Close()
ws.CloseWithBytes([]byte("a"))
ws.CloseWithJSON(map[string]string{"a": "b"})
ws.CloseWithText("a")
ws.Disconnect()
}
func TestWebsocketNil(t *testing.T) {
config := Config{
Reporter: newMockReporter(t),
}
ws := NewWebsocket(config, nil)
msg := ws.Expect()
msg.chain.assertFailed(t)
ws.chain.assertFailed(t)
}
|
package goserver
// UserRepo is a general interface definition for getting persistent user data
type UserRepo interface {
CreateUser(user *User) error
UpdateUserPasswd(user *User) error
GetUserByID(user *User) error
GetUserByUsername(user *User) error
}
|
package main
import "fmt"
func main(){
var names [5]string
friends:=[5] string {"Luis","Eduardo","Martin","Luis Fernando","Carlos"}
names = friends
for i, named := range names {
fmt.Println(named, &names[i]) // the & is for get the direction
}
fmt.Println(names)
} |
// Markdown Table Generator
package md
import (
"strings"
)
type Cell struct {
Value string
}
func NewPlainTextCell(data string) Cell {
return Cell{
Value: data,
}
}
func (c *Cell) String() string {
value := c.Value
value = strings.Replace(value, "\n", "<br/>", -1)
value = strings.Replace(value, "\\n", "<br/>", -1)
return value
}
type Row struct {
Columns []Cell
}
func (r *Row) String() string {
scells := []string{}
for _, c := range r.Columns {
scells = append(scells, c.String())
}
return strings.Join(scells, "|")
}
type Table struct {
Headers []Cell
Rows []Row
}
func (t *Table) generateAlignmentLabel(length int) string {
if length < 2 {
length = 2
}
length -= 2
base := ":-"
for i := 0; i < length; i++ {
base += "-"
}
return base
}
func (t *Table) righpad(s string, length int) string {
ls := len(s)
if length < ls {
return s
}
length -= ls
for i := 0; i < length; i++ {
s += " "
}
return s
}
func (t *Table) String() string {
maxSize := len(t.Headers)
for _, r := range t.Rows {
if len(r.Columns) > maxSize {
maxSize = len(r.Columns)
}
}
colPadSizes := []int{}
for i := 0; i < maxSize; i++ {
colPadSizes = append(colPadSizes, 1)
}
for i, c := range t.Headers {
cl := len(c.String())
if colPadSizes[i] < cl {
colPadSizes[i] = cl
}
}
for _, r := range t.Rows {
for i, c := range r.Columns {
cl := len(c.String())
if colPadSizes[i] < cl {
colPadSizes[i] = cl
}
}
}
getHeader := func(index int) string {
if index >= len(t.Headers) || index < 0 {
return ""
} else {
return t.Headers[index].String()
}
}
sheaders := []string{}
alignments := []string{}
for i := 0; i < maxSize; i++ {
cHeader := getHeader(i)
alignment := t.generateAlignmentLabel(colPadSizes[i])
sheaders = append(sheaders, t.righpad(cHeader, colPadSizes[i]))
alignments = append(alignments, alignment)
}
sdata := []string{}
for _, r := range t.Rows {
srdata := []string{}
for i, c := range r.Columns {
srdata = append(srdata, t.righpad(c.String(), colPadSizes[i]))
}
// append empty cells
for i := len(r.Columns); i < maxSize; i++ {
srdata = append(srdata, t.righpad("", colPadSizes[i]))
}
sdata = append(sdata, "|"+strings.Join(srdata, "|")+"|")
}
return strings.Join(append([]string{
"|" + strings.Join(sheaders, "|") + "|",
"|" + strings.Join(alignments, "|") + "|",
}, sdata...), "\n")
}
|
package routes
import (
"fmt"
"github.com/kataras/iris/v12"
)
type testdata struct {
Name string `json:"name" xml:"Name"`
Age int `json:"age" xml:"Age"`
City string `json:"city" xml:"city"`
}
func registerContentNegotiationRoute(app *iris.Application) {
// Render a resource with "gzip" encoding algorithm as application/json or text/xml or application/xml
// when client's accept header contains one of them
// or JSON (the first declared) if accept is empty,
// and when client's accept-encoding header contains "gzip" or it's empty.
app.Get("/resource", func(ctx iris.Context) {
data := testdata{
Name: "test name",
Age: 26,
City: "北京",
}
ctx.Negotiation().JSON().XML().EncodingGzip().Charset("gbk").Encoding("gbk")
_, err := ctx.Negotiate(data)
if err != nil {
ctx.Writef("error: %v", err)
}
})
// OR define them in a middleware and call Negotiate with nil in the final handler.
app.Get("/resource2", func(ctx iris.Context) {
data := testdata{
Name: "test name",
Age: 26,
City: "北京",
}
ctx.Negotiation().
JSON(data).
XML(data).
HTML(fmt.Sprintf("<h1>%s</h1><h2>Age %d</h2><h2>%s</h2>", data.Name, data.Age, data.City))
ctx.Negotiate(nil)
})
}
|
package go3uparse
import (
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
)
func ChannelMerge(channels ...map[string]int) map[int]*Normal {
normals := GetListAvailableChannels()
for _, channel := range channels {
for name, id := range channel {
// if not exist this channel (id) in normalize channels
if _, ok := normals[id]; !ok {
continue
}
// add possible name
normals[id].Various = append(normals[id].Various, name)
}
}
// sanitize
for k, v := range normals {
// off
if v.Use == false {
delete(normals, k)
}
// not availabe channels (names)
if len(v.Various) <= 0 {
delete(normals, k)
}
}
return normals
}
func GetListAvailableChannels() map[int]*Normal {
normals := map[int]*Normal{}
absPath, _ := filepath.Abs("./channels/normilize.json")
plan, err := ioutil.ReadFile(absPath)
if err != nil {
panic(err)
}
if err := json.Unmarshal(plan, &normals); err != nil {
panic(err)
}
return normals
}
func GetWhiteListChannels(name string) map[string]int {
result := map[string]int{}
absPath, _ := filepath.Abs(fmt.Sprintf("./channels/%s.json", name))
plan, err := ioutil.ReadFile(absPath)
if err != nil {
panic(err)
}
if err := json.Unmarshal(plan, &result); err != nil {
panic(err)
}
return result
}
|
package main
import (
"fmt"
"github.com/bbalet/stopwords"
"github.com/wilcosheh/tfidf"
"github.com/wilcosheh/tfidf/similarity"
"regexp"
"strings"
)
type StopWords struct {
Words []string
}
var t1 = "@BeautifulAtAll when's is season 6 out? Jimmy looks great, this role is definitely for him. Seriously, could any other show get away with what did in Free Churro? This show. God damn it. Smart. Relevant. Laugh out loud funny. I've just watched episode S05E12 of BeautifulAtAll! Also finished the new BeautifulAtAll the best thing out there. Last episode was awful, I don't undersand them at all. Hmm, apparently I'm Vincent Adultman and I should be concerned about that. I love this series"
var t2 = "I adore that! BeautifulAtAll — my love! Jimmy, what's wrong with your hair??? Free Churo is one of the best episodes I’ve ever seen on television. Yes ma'am! The BeautifulAtAll is in my top 3 shows ever, and that spot is hard to get! I don't understand why I should wait for new episode for so long. Don't worry – Vincent Adultman will be there for you soon. I may only be up to episode 7 but season 5, best season yet? Looking so. @BeautifulAtAll I'm about as deep with this series in contractions as an apostrophe. The Common joke in the new BeautifulAtAll killed me!"
func main() {
//Return a string where HTML tags and French stop words has been removed
cleanContent1 := stopwords.CleanString(t1, "en", true)
cleanContent2 := stopwords.CleanString(t2, "en", true)
cleanContent1 = strings.TrimSpace(cleanContent1)
cleanContent2 = strings.TrimSpace(cleanContent2)
re_inside_whtsp := regexp.MustCompile(`[\s\p{Zs}]{2,}`)
cleanContent1 = re_inside_whtsp.ReplaceAllString(cleanContent1, "")
cleanContent2 = re_inside_whtsp.ReplaceAllString(cleanContent2, "")
print("Test entity 1: ", cleanContent1)
print("Test entity 2: ", cleanContent2)
f := tfidf.New()
f.AddDocs(cleanContent1, cleanContent2)
w1 := f.Cal(cleanContent1)
w2 := f.Cal(cleanContent2)
w1["beautifulatall"] = 0.3
w2["beautifulatall"] = 0.3
print("Entity 1 values with weight: ", fmt.Sprintf("weight of %s is %+v.\n", cleanContent1, w1))
print("Entity 2 values with weight: ", fmt.Sprintf("weight of %s is %+v.\n", cleanContent2, w2))
//v1 := []float64{}
//for _, value := range w1 {
// v1 = append(v1, value)
//}
//
//
//v2 := []float64{}
//for _, value := range w2 {
// v2 = append(v2, value)
//}
//cosine, err := cosine(v1, v2)
//if err != nil {
// print("Fatal", err.Error())
//}
//print("cosine similarity: ", fmt.Sprintf("%f\n", cosine))
//v1 := WordCount(cleanContent1)
//v2 := WordCount(cleanContent2)
sim := similarity.Cosine(w1, w2)
print("cosine similarity: ", fmt.Sprintf("%f\n", sim))
}
func WordCount(s string) map[string]float64 {
words := strings.Fields(s)
counts := make(map[string]float64, len(words))
for _, word := range words {
counts[word]++
}
return counts
}
func print(text1 string, text string) {
fmt.Println("\033[31m" + text1 + "\033[39m\n" + text + "\n")
}
|
package models
type Email struct {
To string `json:"to"`
From string `json:"from"`
ReplyTo string `json:"replyTo"`
ReplyToEmail string `json:"replyToEmail"`
Subject string `json:"subject"`
Template string `json:"template"`
Content map[string]string `json:"content"`
}
func (_ *Email) Class() string {
return "Email"
}
|
package aggregates
import (
"github.com/fission/fission-workflows/pkg/api/events"
"github.com/fission/fission-workflows/pkg/fes"
"github.com/fission/fission-workflows/pkg/types"
"github.com/golang/protobuf/proto"
)
const (
TypeTaskInvocation = "task"
)
type TaskInvocation struct {
*fes.BaseEntity
*types.TaskInvocation
}
func NewTaskInvocation(id string, fi *types.TaskInvocation) *TaskInvocation {
tia := &TaskInvocation{
TaskInvocation: fi,
}
tia.BaseEntity = fes.NewBaseEntity(tia, *NewTaskInvocationAggregate(id))
return tia
}
func NewTaskInvocationAggregate(id string) *fes.Aggregate {
return &fes.Aggregate{
Id: id,
Type: TypeTaskInvocation,
}
}
func (ti *TaskInvocation) ApplyEvent(event *fes.Event) error {
if err := ti.ensureNextEvent(event); err != nil {
return err
}
eventData, err := fes.ParseEventData(event)
if err != nil {
return err
}
switch m := eventData.(type) {
case *events.TaskStarted:
ti.TaskInvocation = &types.TaskInvocation{
Metadata: &types.ObjectMetadata{
Id: m.GetSpec().TaskId,
CreatedAt: event.Timestamp,
Generation: 1,
},
Spec: m.GetSpec(),
Status: &types.TaskInvocationStatus{
Status: types.TaskInvocationStatus_IN_PROGRESS,
},
}
case *events.TaskSucceeded:
ti.Status.Output = m.GetResult().Output
ti.Status.OutputHeaders = m.GetResult().OutputHeaders
ti.Status.Status = types.TaskInvocationStatus_SUCCEEDED
case *events.TaskFailed:
ti.Status.Error = m.GetError()
ti.Status.Status = types.TaskInvocationStatus_FAILED
case *events.TaskSkipped:
// TODO ensure that object (spec/status) is present
ti.Status.Status = types.TaskInvocationStatus_SKIPPED
default:
key := ti.Aggregate()
return fes.ErrUnsupportedEntityEvent.WithAggregate(&key).WithEvent(event)
}
ti.Metadata.Generation++
ti.Status.UpdatedAt = event.GetTimestamp()
return nil
}
func (ti *TaskInvocation) CopyEntity() fes.Entity {
n := &TaskInvocation{
TaskInvocation: ti.Copy(),
}
n.BaseEntity = ti.CopyBaseEntity(n)
return n
}
func (ti *TaskInvocation) Copy() *types.TaskInvocation {
return proto.Clone(ti.TaskInvocation).(*types.TaskInvocation)
}
func (ti *TaskInvocation) ensureNextEvent(event *fes.Event) error {
if err := fes.ValidateEvent(event); err != nil {
return err
}
if event.Aggregate.Type != TypeTaskInvocation {
return fes.ErrUnsupportedEntityEvent.WithEntity(ti).WithEvent(event)
}
// TODO check sequence of event
return nil
}
|
// Package main implements the pomerium-cli.
package main
import (
"crypto/tls"
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/pomerium/pomerium/pkg/cryptutil"
)
var rootCmd = &cobra.Command{
Use: "pomerium-cli",
}
func main() {
err := rootCmd.Execute()
if err != nil {
fatalf("%s", err.Error())
}
}
func fatalf(msg string, args ...interface{}) {
fmt.Fprintf(os.Stderr, msg+"\n", args...)
os.Exit(1)
}
var tlsOptions struct {
disableTLSVerification bool
alternateCAPath string
caCert string
}
func addTLSFlags(cmd *cobra.Command) {
flags := cmd.Flags()
flags.BoolVar(&tlsOptions.disableTLSVerification, "disable-tls-verification", false,
"disables TLS verification")
flags.StringVar(&tlsOptions.alternateCAPath, "alternate-ca-path", "",
"path to CA certificate to use for HTTP requests")
flags.StringVar(&tlsOptions.caCert, "ca-cert", "",
"base64-encoded CA TLS certificate to use for HTTP requests")
}
func getTLSConfig() *tls.Config {
cfg := new(tls.Config)
if tlsOptions.disableTLSVerification {
cfg.InsecureSkipVerify = true
}
if tlsOptions.caCert != "" {
var err error
cfg.RootCAs, err = cryptutil.GetCertPool(tlsOptions.caCert, tlsOptions.alternateCAPath)
if err != nil {
fatalf("%s", err)
}
}
return cfg
}
var browserOptions struct {
command string
}
func addBrowserFlags(cmd *cobra.Command) {
flags := cmd.Flags()
flags.StringVar(&browserOptions.command, "browser-cmd", "",
"custom browser command to run when opening a URL")
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package identity
import (
"bytes"
"context"
"crypto"
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
"github.com/zeebo/errs"
"storj.io/common/peertls"
"storj.io/common/peertls/extensions"
"storj.io/common/pkcrypto"
"storj.io/common/storj"
)
const minimumLoggableDifficulty = 8
// PeerCertificateAuthority represents the CA which is used to validate peer identities.
type PeerCertificateAuthority struct {
RestChain []*x509.Certificate
// Cert is the x509 certificate of the CA
Cert *x509.Certificate
// The ID is calculated from the CA public key.
ID storj.NodeID
}
// FullCertificateAuthority represents the CA which is used to author and validate full identities.
type FullCertificateAuthority struct {
RestChain []*x509.Certificate
// Cert is the x509 certificate of the CA
Cert *x509.Certificate
// The ID is calculated from the CA public key.
ID storj.NodeID
// Key is the private key of the CA
Key crypto.PrivateKey
}
// CASetupConfig is for creating a CA.
type CASetupConfig struct {
VersionNumber uint `default:"0" help:"which identity version to use (0 is latest)"`
ParentCertPath string `help:"path to the parent authority's certificate chain"`
ParentKeyPath string `help:"path to the parent authority's private key"`
CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"`
KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"`
Difficulty uint64 `help:"minimum difficulty for identity generation" default:"36"`
Timeout string `help:"timeout for CA generation; golang duration string (0 no timeout)" default:"5m"`
Overwrite bool `help:"if true, existing CA certs AND keys will overwritten" default:"false" setup:"true"`
Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"`
}
// NewCAOptions is used to pass parameters to `NewCA`.
type NewCAOptions struct {
// VersionNumber is the IDVersion to use for the identity
VersionNumber storj.IDVersionNumber
// Difficulty is the number of trailing zero-bits the nodeID must have
Difficulty uint16
// Concurrency is the number of go routines used to generate a CA of sufficient difficulty
Concurrency uint
// ParentCert, if provided will be prepended to the certificate chain
ParentCert *x509.Certificate
// ParentKey ()
ParentKey crypto.PrivateKey
// Logger is used to log generation status updates
Logger io.Writer
}
// PeerCAConfig is for locating a CA certificate without a private key.
type PeerCAConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"`
}
// FullCAConfig is for locating a CA certificate and it's private key.
type FullCAConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$IDENTITYDIR/ca.cert"`
KeyPath string `help:"path to the private key for this identity" default:"$IDENTITYDIR/ca.key"`
}
// NewCA creates a new full identity with the given difficulty.
func NewCA(ctx context.Context, opts NewCAOptions) (_ *FullCertificateAuthority, err error) {
defer mon.Task()(&ctx)(&err)
var (
highscore = new(uint32)
i = new(uint32)
mu sync.Mutex
selectedKey crypto.PrivateKey
selectedID storj.NodeID
)
if opts.Concurrency < 1 {
opts.Concurrency = 1
}
if opts.Logger != nil {
fmt.Fprintf(opts.Logger, "Generating key with a minimum a difficulty of %d...\n", opts.Difficulty)
}
version, err := storj.GetIDVersion(opts.VersionNumber)
if err != nil {
return nil, err
}
updateStatus := func() {
if opts.Logger != nil {
count := atomic.LoadUint32(i)
hs := atomic.LoadUint32(highscore)
_, _ = fmt.Fprintf(opts.Logger, "\rGenerated %d keys; best difficulty so far: %d", count, hs)
}
}
err = GenerateKeys(ctx, minimumLoggableDifficulty, int(opts.Concurrency), version,
func(k crypto.PrivateKey, id storj.NodeID) (done bool, err error) {
if opts.Logger != nil {
if atomic.AddUint32(i, 1)%100 == 0 {
updateStatus()
}
}
difficulty, err := id.Difficulty()
if err != nil {
return false, err
}
if difficulty >= opts.Difficulty {
mu.Lock()
if selectedKey == nil {
updateStatus()
selectedKey = k
selectedID = id
}
mu.Unlock()
if opts.Logger != nil {
atomic.SwapUint32(highscore, uint32(difficulty))
updateStatus()
_, _ = fmt.Fprintf(opts.Logger, "\nFound a key with difficulty %d!\n", difficulty)
}
return true, nil
}
for {
hs := atomic.LoadUint32(highscore)
if uint32(difficulty) <= hs {
return false, nil
}
if atomic.CompareAndSwapUint32(highscore, hs, uint32(difficulty)) {
updateStatus()
return false, nil
}
}
})
if err != nil {
return nil, err
}
ct, err := peertls.CATemplate()
if err != nil {
return nil, err
}
if err := extensions.AddExtraExtension(ct, storj.NewVersionExt(version)); err != nil {
return nil, err
}
var cert *x509.Certificate
if opts.ParentKey == nil {
cert, err = peertls.CreateSelfSignedCertificate(selectedKey, ct)
} else {
var pubKey crypto.PublicKey
pubKey, err = pkcrypto.PublicKeyFromPrivate(selectedKey)
if err != nil {
return nil, err
}
cert, err = peertls.CreateCertificate(pubKey, opts.ParentKey, ct, opts.ParentCert)
}
if err != nil {
return nil, err
}
ca := &FullCertificateAuthority{
Cert: cert,
Key: selectedKey,
ID: selectedID,
}
if opts.ParentCert != nil {
ca.RestChain = []*x509.Certificate{opts.ParentCert}
}
return ca, nil
}
// Status returns the status of the CA cert/key files for the config.
func (caS CASetupConfig) Status() (TLSFilesStatus, error) {
return statTLSFiles(caS.CertPath, caS.KeyPath)
}
// Create generates and saves a CA using the config.
func (caS CASetupConfig) Create(ctx context.Context, logger io.Writer) (*FullCertificateAuthority, error) {
var (
err error
parent *FullCertificateAuthority
)
if caS.ParentCertPath != "" && caS.ParentKeyPath != "" {
parent, err = FullCAConfig{
CertPath: caS.ParentCertPath,
KeyPath: caS.ParentKeyPath,
}.Load()
if err != nil {
return nil, err
}
}
if parent == nil {
parent = &FullCertificateAuthority{}
}
version, err := storj.GetIDVersion(storj.IDVersionNumber(caS.VersionNumber))
if err != nil {
return nil, err
}
ca, err := NewCA(ctx, NewCAOptions{
VersionNumber: version.Number,
Difficulty: uint16(caS.Difficulty),
Concurrency: caS.Concurrency,
ParentCert: parent.Cert,
ParentKey: parent.Key,
Logger: logger,
})
if err != nil {
return nil, err
}
caC := FullCAConfig{
CertPath: caS.CertPath,
KeyPath: caS.KeyPath,
}
return ca, caC.Save(ca)
}
// FullConfig converts a `CASetupConfig` to `FullCAConfig`.
func (caS CASetupConfig) FullConfig() FullCAConfig {
return FullCAConfig{
CertPath: caS.CertPath,
KeyPath: caS.KeyPath,
}
}
// Load loads a CA from the given configuration.
func (fc FullCAConfig) Load() (*FullCertificateAuthority, error) {
p, err := fc.PeerConfig().Load()
if err != nil {
return nil, err
}
kb, err := os.ReadFile(fc.KeyPath)
if err != nil {
return nil, peertls.ErrNotExist.Wrap(err)
}
k, err := pkcrypto.PrivateKeyFromPEM(kb)
if err != nil {
return nil, err
}
return &FullCertificateAuthority{
RestChain: p.RestChain,
Cert: p.Cert,
Key: k,
ID: p.ID,
}, nil
}
// PeerConfig converts a full ca config to a peer ca config.
func (fc FullCAConfig) PeerConfig() PeerCAConfig {
return PeerCAConfig{
CertPath: fc.CertPath,
}
}
// Save saves a CA with the given configuration.
func (fc FullCAConfig) Save(ca *FullCertificateAuthority) error {
var (
keyData bytes.Buffer
writeErrs errs.Group
)
if err := fc.PeerConfig().Save(ca.PeerCA()); err != nil {
writeErrs.Add(err)
return writeErrs.Err()
}
if fc.KeyPath != "" {
if err := pkcrypto.WritePrivateKeyPEM(&keyData, ca.Key); err != nil {
writeErrs.Add(err)
return writeErrs.Err()
}
if err := writeKeyData(fc.KeyPath, keyData.Bytes()); err != nil {
writeErrs.Add(err)
return writeErrs.Err()
}
}
return writeErrs.Err()
}
// SaveBackup saves the certificate of the config wth a timestamped filename.
func (fc FullCAConfig) SaveBackup(ca *FullCertificateAuthority) error {
return FullCAConfig{
CertPath: backupPath(fc.CertPath),
KeyPath: backupPath(fc.KeyPath),
}.Save(ca)
}
// Load loads a CA from the given configuration.
func (pc PeerCAConfig) Load() (*PeerCertificateAuthority, error) {
chainPEM, err := os.ReadFile(pc.CertPath)
if err != nil {
return nil, peertls.ErrNotExist.Wrap(err)
}
chain, err := pkcrypto.CertsFromPEM(chainPEM)
if err != nil {
return nil, errs.New("failed to load identity %#v: %v",
pc.CertPath, err)
}
// NB: `CAIndex` is in the context of a complete chain (incl. leaf).
// Here we're loading the CA chain (i.e. without leaf).
nodeID, err := NodeIDFromCert(chain[peertls.CAIndex-1])
if err != nil {
return nil, err
}
return &PeerCertificateAuthority{
// NB: `CAIndex` is in the context of a complete chain (incl. leaf).
// Here we're loading the CA chain (i.e. without leaf).
RestChain: chain[peertls.CAIndex:],
Cert: chain[peertls.CAIndex-1],
ID: nodeID,
}, nil
}
// Save saves a peer CA (cert, no key) with the given configuration.
func (pc PeerCAConfig) Save(ca *PeerCertificateAuthority) error {
var (
certData bytes.Buffer
writeErrs errs.Group
)
chain := []*x509.Certificate{ca.Cert}
chain = append(chain, ca.RestChain...)
if pc.CertPath != "" {
if err := peertls.WriteChain(&certData, chain...); err != nil {
writeErrs.Add(err)
return writeErrs.Err()
}
if err := writeChainData(pc.CertPath, certData.Bytes()); err != nil {
writeErrs.Add(err)
return writeErrs.Err()
}
}
return nil
}
// SaveBackup saves the certificate of the config wth a timestamped filename.
func (pc PeerCAConfig) SaveBackup(ca *PeerCertificateAuthority) error {
return PeerCAConfig{
CertPath: backupPath(pc.CertPath),
}.Save(ca)
}
// NewIdentity generates a new `FullIdentity` based on the CA. The CA
// cert is included in the identity's cert chain and the identity's leaf cert
// is signed by the CA.
func (ca *FullCertificateAuthority) NewIdentity(exts ...pkix.Extension) (*FullIdentity, error) {
leafTemplate, err := peertls.LeafTemplate()
if err != nil {
return nil, err
}
// TODO: add test for this!
version, err := ca.Version()
if err != nil {
return nil, err
}
leafKey, err := version.NewPrivateKey()
if err != nil {
return nil, err
}
if err := extensions.AddExtraExtension(leafTemplate, exts...); err != nil {
return nil, err
}
pubKey, err := pkcrypto.PublicKeyFromPrivate(leafKey)
if err != nil {
return nil, err
}
leafCert, err := peertls.CreateCertificate(pubKey, ca.Key, leafTemplate, ca.Cert)
if err != nil {
return nil, err
}
return &FullIdentity{
RestChain: ca.RestChain,
CA: ca.Cert,
Leaf: leafCert,
Key: leafKey,
ID: ca.ID,
}, nil
}
// Chain returns the CA's certificate chain.
func (ca *FullCertificateAuthority) Chain() []*x509.Certificate {
return append([]*x509.Certificate{ca.Cert}, ca.RestChain...)
}
// RawChain returns the CA's certificate chain as a 2d byte slice.
func (ca *FullCertificateAuthority) RawChain() [][]byte {
chain := ca.Chain()
rawChain := make([][]byte, len(chain))
for i, cert := range chain {
rawChain[i] = cert.Raw
}
return rawChain
}
// RawRestChain returns the "rest" (excluding `ca.Cert`) of the certificate chain as a 2d byte slice.
func (ca *FullCertificateAuthority) RawRestChain() [][]byte {
var chain [][]byte
for _, cert := range ca.RestChain {
chain = append(chain, cert.Raw)
}
return chain
}
// PeerCA converts a FullCertificateAuthority to a PeerCertificateAuthority.
func (ca *FullCertificateAuthority) PeerCA() *PeerCertificateAuthority {
return &PeerCertificateAuthority{
Cert: ca.Cert,
ID: ca.ID,
RestChain: ca.RestChain,
}
}
// Sign signs the passed certificate with ca certificate.
func (ca *FullCertificateAuthority) Sign(cert *x509.Certificate) (*x509.Certificate, error) {
signedCert, err := peertls.CreateCertificate(cert.PublicKey, ca.Key, cert, ca.Cert)
if err != nil {
return nil, errs.Wrap(err)
}
return signedCert, nil
}
// Version looks up the version based on the certificate's ID version extension.
func (ca *FullCertificateAuthority) Version() (storj.IDVersion, error) {
return storj.IDVersionFromCert(ca.Cert)
}
// AddExtension adds extensions to certificate authority certificate. Extensions
// are serialized into the certificate's raw bytes and it is re-signed by itself.
func (ca *FullCertificateAuthority) AddExtension(exts ...pkix.Extension) error {
// TODO: how to properly handle this?
if len(ca.RestChain) > 0 {
return errs.New("adding extensions requires parent certificate's private key")
}
if err := extensions.AddExtraExtension(ca.Cert, exts...); err != nil {
return err
}
updatedCert, err := peertls.CreateSelfSignedCertificate(ca.Key, ca.Cert)
if err != nil {
return err
}
ca.Cert = updatedCert
return nil
}
// Revoke extends the certificate authority certificate with a certificate revocation extension.
func (ca *FullCertificateAuthority) Revoke() error {
ext, err := extensions.NewRevocationExt(ca.Key, ca.Cert)
if err != nil {
return err
}
return ca.AddExtension(ext)
}
|
package routers
import (
"intra-hub/controllers"
"github.com/astaxie/beego"
)
func init() {
beego.Router("/", &controllers.HomeController{}, "get:HomeView")
beego.Router("/me", &controllers.UserController{}, "get:MeView")
beego.Router("/logout", &controllers.UserController{}, "get:Logout")
beego.Router("/login", &controllers.UserController{}, "get:LoginView;post:Login")
beego.Router("/forgot", &controllers.UserController{}, "get:ResetPasswordView;post:ResetPassword")
beego.Router("/admin", &controllers.AdminController{})
beego.Router("/admin/calendars", &controllers.CalendarController{}, "post:Add")
beego.Router("/admin/calendars/delete/:id", &controllers.CalendarController{}, "post:Delete")
beego.Router("/admin/users/add", &controllers.UserController{}, "get:AddView")
beego.Router("/themes", &controllers.ThemeController{})
beego.Router("/themes/:id", &controllers.ThemeController{})
beego.Router("/skills", &controllers.SkillController{})
beego.Router("/skills/:id", &controllers.SkillController{})
beego.Router("/staff", &controllers.StaffController{}, "get:ListView")
beego.Router("/students", &controllers.UserController{}, "get:ListStudentView")
beego.Router("/users/activate/:id/:token", &controllers.UserController{}, "get:ActivateUserView;post:ActivateUser")
beego.Router("/users/edit/:login", &controllers.UserController{}, "get:EditView;put:EditUser")
beego.Router("/users/search", &controllers.UserController{}, "post:SearchUser")
beego.Router("/users/:id", &controllers.UserController{}, "get:SingleView")
beego.Router("/users", &controllers.UserController{}, "post:AddUser")
beego.Router("/projects", &controllers.ProjectController{}, "get:ListView")
beego.Router("/projects/checkname", &controllers.ProjectController{}, "get:CheckName")
beego.Router("/projects/add", &controllers.ProjectController{}, "get:AddView;post:Add")
beego.Router("/projects/edit", &controllers.ProjectController{}, "post:Edit")
beego.Router("/projects/edit/:nameOrId", &controllers.ProjectController{}, "get:EditView")
beego.Router("/projects/comments/edit", &controllers.ProjectController{}, "post:EditComment")
beego.Router("/projects/:nameOrId", &controllers.ProjectController{}, "get:SingleView")
beego.Router("/projects/:nameOrId/comments", &controllers.ProjectController{}, "get:CommentView;post:AddComment")
beego.Router("/pedago/validate", &controllers.PedagoController{}, "post:ValidateProject")
beego.Router("/pedago/validation/:validation", &controllers.PedagoController{}, "get:ValidateProjectView")
beego.Router("/export/projects", &controllers.ExportController{}, "get:Projects")
beego.Router("/api/login", &controllers.UserController{}, "post:Login")
beego.Router("/api/users/me", &controllers.UserController{}, "get:GetMe")
}
|
package gobcnbicing
import "testing"
func TestGetStations(t *testing.T) {
_, err := GetStations()
if err != nil {
t.Error(err)
}
}
|
../../../../../local/bundles/cros/ui/conference/room_type.go |
package pointer
// Of takes the pointer of a value.
func Of[T any](v T) *T { return &v }
// Deref will return the referenced value,
// or if the pointer has no value,
// then it returns with the zero value.
func Deref[T any](v *T) T {
if v == nil {
return *new(T)
}
return *v
}
|
package cmd
import (
"context"
"errors"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/Azure/azure-storage-azcopy/common"
)
type copyUploadEnumerator common.CopyJobPartOrderRequest
// this function accepts the list of files/directories to transfer and processes them
func (e *copyUploadEnumerator) enumerate(cca *cookedCopyCmdArgs) error {
util := copyHandlerUtil{}
ctx := context.TODO() // Ensure correct context is used
// attempt to parse the destination url
destinationURL, err := url.Parse(cca.destination)
if err != nil {
// the destination should have already been validated, it would be surprising if it cannot be parsed at this point
panic(err)
}
// list the source files and directories
listOfFilesAndDirectories, err := filepath.Glob(cca.source)
if err != nil || len(listOfFilesAndDirectories) == 0 {
return fmt.Errorf("cannot find source to upload")
}
// when a single file is being uploaded, we need to treat this case differently, as the destinationURL might be a blob
if len(listOfFilesAndDirectories) == 1 {
f, err := os.Stat(listOfFilesAndDirectories[0])
if err != nil {
return errors.New("cannot find source to upload")
}
if !f.IsDir() {
// Check if the files are passed with include flag
// then source needs to be directory, if it is a file
// then error is returned
if len(e.Include) > 0 {
return fmt.Errorf("for the use of include flag, source needs to be a directory")
}
// append file name as blob name in case the given URL is a container
if (e.FromTo == common.EFromTo.LocalBlob() && util.urlIsContainerOrShare(destinationURL)) ||
(e.FromTo == common.EFromTo.LocalFile() && util.urlIsAzureFileDirectory(ctx, destinationURL)) {
destinationURL.Path = util.generateObjectPath(destinationURL.Path, f.Name())
}
// append file name as blob name in case the given URL is a blob FS directory.
if e.FromTo == common.EFromTo.LocalBlobFS() {
// Create blob FS pipeline.
p, err := createBlobFSPipeline(ctx, e.CredentialInfo)
if err != nil {
return err
}
if util.urlIsBFSFileSystemOrDirectory(ctx, destinationURL, p) {
destinationURL.Path = util.generateObjectPath(destinationURL.Path, f.Name())
}
}
err = e.addTransfer(common.CopyTransfer{
Source: listOfFilesAndDirectories[0],
Destination: destinationURL.String(),
LastModifiedTime: f.ModTime(),
SourceSize: f.Size(),
}, cca)
if err != nil {
return err
}
return e.dispatchFinalPart()
}
}
// if the user specifies a virtual directory ex: /container_name/extra_path
// then we should extra_path as a prefix while uploading
// temporarily save the path of the container
cleanContainerPath := destinationURL.Path
// Get the source path without the wildcards
// This is defined since the files mentioned with exclude flag
// & include flag are relative to the Source
// If the source has wildcards, then files are relative to the
// parent source path which is the path of last directory in the source
// without wildcards
// For Example: src = "/home/user/dir1" parentSourcePath = "/home/user/dir1"
// For Example: src = "/home/user/dir*" parentSourcePath = "/home/user"
// For Example: src = "/home/*" parentSourcePath = "/home"
parentSourcePath := cca.source
wcIndex := util.firstIndexOfWildCard(parentSourcePath)
if wcIndex != -1 {
parentSourcePath = parentSourcePath[:wcIndex]
pathSepIndex := strings.LastIndex(parentSourcePath, common.AZCOPY_PATH_SEPARATOR_STRING)
parentSourcePath = parentSourcePath[:pathSepIndex]
}
// walk through every file and directory
// upload every file
// upload directory recursively if recursive option is on
for _, fileOrDirectoryPath := range listOfFilesAndDirectories {
f, err := os.Stat(fileOrDirectoryPath)
if err == nil {
// directories are uploaded only if recursive is on
if f.IsDir() && cca.recursive {
// walk goes through the entire directory tree
err = filepath.Walk(fileOrDirectoryPath, func(pathToFile string, f os.FileInfo, err error) error {
if err != nil {
return err
}
if f.IsDir() {
// For Blob and Azure Files, empty directories are not uploaded
// For BlobFs, empty directories are to be uploaded as well
// If the directory is not empty, then uploading a file inside the directory path
// will create the parent directory of file, so transfer is not required to create
// a directory
// TODO: Currently not implemented the upload of empty directories for BlobFS
return nil
} else {
// replace the OS path separator in pathToFile string with AZCOPY_PATH_SEPARATOR
// this replacement is done to handle the windows file paths where path separator "\\"
pathToFile = strings.Replace(pathToFile, common.OS_PATH_SEPARATOR, common.AZCOPY_PATH_SEPARATOR_STRING, -1)
// replace the OS path separator in fileOrDirectoryPath string with AZCOPY_PATH_SEPARATOR
// this replacement is done to handle the windows file paths where path separator "\\"
fileOrDirectoryPath = strings.Replace(fileOrDirectoryPath, common.OS_PATH_SEPARATOR, common.AZCOPY_PATH_SEPARATOR_STRING, -1)
// check if the should be included or not
if !util.resourceShouldBeIncluded(parentSourcePath, e.Include, pathToFile) {
return nil
}
// Check if the file should be excluded or not.
if util.resourceShouldBeExcluded(parentSourcePath, e.Exclude, pathToFile) {
return nil
}
// upload the files
// the path in the blob name started at the given fileOrDirectoryPath
// example: fileOrDirectoryPath = "/dir1/dir2/dir3" pathToFile = "/dir1/dir2/dir3/file1.txt" result = "dir3/file1.txt"
destinationURL.Path = util.generateObjectPath(cleanContainerPath,
util.getRelativePath(fileOrDirectoryPath, pathToFile))
err = e.addTransfer(common.CopyTransfer{
Source: pathToFile,
Destination: destinationURL.String(),
LastModifiedTime: f.ModTime(),
SourceSize: f.Size(),
}, cca)
if err != nil {
return err
}
}
return nil
})
} else if !f.IsDir() {
// replace the OS path separator in fileOrDirectoryPath string with AZCOPY_PATH_SEPARATOR
// this replacement is done to handle the windows file paths where path separator "\\"
fileOrDirectoryPath = strings.Replace(fileOrDirectoryPath, common.OS_PATH_SEPARATOR, common.AZCOPY_PATH_SEPARATOR_STRING, -1)
// check if the should be included or not
if !util.resourceShouldBeIncluded(parentSourcePath, e.Include, fileOrDirectoryPath) {
continue
}
// Check if the file should be excluded or not.
if util.resourceShouldBeExcluded(parentSourcePath, e.Exclude, fileOrDirectoryPath) {
continue
}
// files are uploaded using their file name as blob name
destinationURL.Path = util.generateObjectPath(cleanContainerPath, f.Name())
err = e.addTransfer(common.CopyTransfer{
Source: fileOrDirectoryPath,
Destination: destinationURL.String(),
LastModifiedTime: f.ModTime(),
SourceSize: f.Size(),
}, cca)
if err != nil {
return err
}
}
}
}
if e.PartNum == 0 && len(e.Transfers) == 0 {
return errors.New("nothing can be uploaded, please use --recursive to upload directories")
}
return e.dispatchFinalPart()
}
func (e *copyUploadEnumerator) addTransfer(transfer common.CopyTransfer, cca *cookedCopyCmdArgs) error {
return addTransfer((*common.CopyJobPartOrderRequest)(e), transfer, cca)
}
func (e *copyUploadEnumerator) dispatchFinalPart() error {
return dispatchFinalPart((*common.CopyJobPartOrderRequest)(e))
}
func (e *copyUploadEnumerator) partNum() common.PartNumber {
return e.PartNum
}
|
package main
import (
"bufio"
"fmt"
"io"
"log"
"os"
"strings"
)
func checkSum(data io.Reader) {
scanner := bufio.NewScanner(data)
twice := 0
thrice := 0
for scanner.Scan() {
x := scanner.Text()
var count int
twie := 0
trie := 0
for _, char := range x {
count = strings.Count(x, string(char))
if count == 2 {
twie++
if twie < 2 {
twice++
}
} else if count == 3 {
trie++
if trie < 2 {
thrice++
}
}
count = 0
}
fmt.Println(thrice * twice)
}
}
func main() {
f, err := os.Open("input.txt")
if err != nil {
log.Fatal(err)
}
scanner := bufio.NewScanner(f)
arrayID := make([]string, 0)
for scanner.Scan() {
x := scanner.Text()
for _, val := range arrayID {
r, num := diff(x, val)
if num == 1 {
fmt.Println(string(r))
}
}
arrayID = append(arrayID, x)
}
f.Close()
}
func removeDifferentChars(a string, b string) ([]rune, int) {
count := 0
same := make([]rune, 0)
for in := range a {
if a[in] != b[in] {
count++
} else {
same = append(same, rune(a[in]))
}
}
return same, count
}
|
package main
import "fmt"
func main() {
s := make([]int, 10) //criando um slice de inteiros com 10 posições
s[9] = 12
fmt.Println(s)
s = make([]int, 10, 20) //criando um slice de inteiros com 10 elementos e 20 posições
// len é a qtd de elementos e cap é o tamanho real do slice
fmt.Println(s, len(s), cap(s))
s = append(s, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0) //anexo mais 10 elementos no fim do slice
fmt.Println(s)
fmt.Println(s, len(s), cap(s)) //tem 20 elementos e 20 de capacidade
s = append(s, 1) //anexei mais um elemento no slice de capacidade 20
fmt.Println(s, len(s), cap(s)) // ele add o elemento novo e dobra a capacidade do slice, indo de 20 pra 40
}
|
package model
import (
pb "github.com/eriklupander/tradfri-go/grpc_server/golang"
"time"
)
func ToDeviceResponse(device Device) BlindResponse {
if device.BlindControl != nil && len(device.BlindControl) > 0 {
dr := BlindResponse{
DeviceMetadata: DeviceMetadata{
Name: device.Name,
Id: device.DeviceId,
Type: device.Metadata.TypeName,
Vendor: device.Metadata.Vendor,
Battery: device.Metadata.Battery,
},
Position: device.BlindControl[0].Position,
}
return dr
}
return BlindResponse{}
}
func ToDeviceResponseProto(device Device) *pb.Device {
if device.BlindControl != nil && len(device.BlindControl) > 0 {
return &pb.Device{
Metadata: &pb.DeviceMetadata{
Name: device.Name,
Id: int32(device.DeviceId),
Type: device.Metadata.TypeName,
Vendor: device.Metadata.Vendor,
Battery: int(device.Metadata.Battery),
},
Position: float32(device.BlindControl[0].Position),
}
}
return &pb.Device{}
}
func ToGroupResponse(group Group) GroupResponse {
gr := GroupResponse{
Id: group.DeviceId,
Created: time.Unix(int64(group.Num9002), 0).Format(time.RFC3339),
DeviceList: group.Content.DeviceList.DeviceIds,
}
return gr
}
func ToGroupResponseProto(group Group) *pb.Group {
ids := make([]int32, 0, len(group.Content.DeviceList.DeviceIds))
for _, v := range group.Content.DeviceList.DeviceIds {
ids = append(ids, int32(v))
}
return &pb.Group{
Id: int32(group.DeviceId),
Created: time.Unix(int64(group.Num9002), 0).Format(time.RFC3339),
Devices: ids,
}
}
|
package revocation
import (
"io/ioutil"
"net/http"
"github.com/dbogatov/dac-lib/dac"
"github.com/dbogatov/fabric-amcl/amcl"
"github.com/dbogatov/fabric-amcl/amcl/FP256BN"
)
var skRevoke dac.SK
var ys []interface{}
var epoch *FP256BN.BIG
// RunServer ...
func RunServer() {
logger.Notice("Server starting. Ctl+C to stop")
ys, _, skRevoke, _, epoch = generateAuthority()
http.HandleFunc("/", handleRevocationRequest)
http.ListenAndServe(":8765", nil)
}
func handleRevocationRequest(w http.ResponseWriter, r *http.Request) {
// static PRG... OK for simulations
prg := amcl.NewRAND()
prg.Seed(1, []byte{0x15})
userPKBytes, _ := ioutil.ReadAll(r.Body)
defer r.Body.Close()
userPK, e := dac.PointFromBytes(userPKBytes)
if e != nil {
panic(e)
}
signature := dac.SignNonRevoke(prg, skRevoke, userPK, epoch, ys)
signatureBytes := signature.ToBytes()
logger.Debug("Granting handle.")
w.Write(signatureBytes)
}
|
/*
* @Author: Sy.
* @Create: 2019-11-01 20:54:15
* @LastTime: 2019-11-16 18:36:07
* @LastEdit: Sy.
* @FilePath: \server\models\admin.go
* @Description: 管理员
*/
package models
import (
"github.com/astaxie/beego/orm"
)
type Admin struct {
Id int
LoginName string
RealName string
Password string
RoleIds string
Phone string
Email string
Salt string
LastLogin int64
LastIp string
Status int
CreateId int
UpdateId int
Delete int
CreateTime int64
UpdateTime int64
}
const TABLE_ADMIN = "admin_user"
func (a *Admin) TableName() string {
return TableName(TABLE_ADMIN)
}
func AdminAdd(a *Admin) (int64, error) {
return orm.NewOrm().Insert(a)
}
func AdminGetByName(loginName string) (*Admin, error) {
a := new(Admin)
err := orm.NewOrm().QueryTable(TableName(TABLE_ADMIN)).Filter("login_name", loginName).One(a)
if err != nil {
return nil, err
}
return a, nil
}
func AdminGetList(page, pageSize int, filters ...interface{}) ([]*Admin, int64) {
offset := (page - 1) * pageSize
list := make([]*Admin, 0)
query := orm.NewOrm().QueryTable(TableName(TABLE_ADMIN))
if len(filters) > 0 {
l := len(filters)
for k := 0; k < l; k += 2 {
query = query.Filter(filters[k].(string), filters[k+1])
}
}
total, _ := query.Count()
query.OrderBy("-id").Limit(pageSize, offset).All(&list)
return list, total
}
func AdminGetById(id int) (*Admin, error) {
r := new(Admin)
err := orm.NewOrm().QueryTable(TableName(TABLE_ADMIN)).Filter("id", id).One(r)
if err != nil {
return nil, err
}
return r, nil
}
func (a *Admin) Update(fields ...string) error {
if _, err := orm.NewOrm().Update(a, fields...); err != nil {
return err
}
return nil
}
// func RoleAuthDelete(id int) (int64, error) {
// query := orm.NewOrm().QueryTable(TableName("role_auth"))
// return query.Filter("role_id", id).Delete()
// }
// func RoleAuthMultiAdd(ras []*RoleAuth) (n int, err error) {
// query := orm.NewOrm().QueryTable(TableName("role_auth"))
// i, _ := query.PrepareInsert()
// for _, ra := range ras {
// _, err := i.Insert(ra)
// if err == nil {
// n = n + 1
// }
// }
// i.Close() // 别忘记关闭 statement
// return n, err
// }
|
package core
import (
"fmt"
)
//Extended int64eger
type Int64 struct {
int64
Valid bool
Context *Context
}
func (this Int64) String() string {
if this.Valid {
return fmt.Sprint(this.int64)
} else {
return ""
}
}
func (this *Int64) Parse(i int64) {
this.int64 = i
this.Valid = true
}
func (this *Int64) Int64() int64 {
return this.int64
}
func (this *Int64) Int64Ptr() *int64 {
if this.Valid {
return &this.int64
} else {
return nil
}
}
//addition
func (this *Int64) Add(i *Int64) *Int64 {
if i != nil {
this.int64 = this.int64 + i.int64
}
return this
}
//subtraction
func (this *Int64) Sub(i *Int64) *Int64 {
if i != nil {
this.int64 = this.int64 - i.int64
}
return this
}
//division
func (this *Int64) Div(i *Int64) *Int64 {
if i != nil {
this.int64 = this.int64 / i.int64
}
return this
}
//multiplication
func (this *Int64) Mul(i *Int64) *Int64 {
if i != nil {
this.int64 = this.int64 * i.int64
}
return this
}
|
package spudo
import (
"errors"
"flag"
"math/rand"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/BurntSushi/toml"
"github.com/bwmarrin/discordgo"
"github.com/robfig/cron/v3"
)
// Config contains all options for the config file
type Config struct {
Token string
CommandPrefix string
CooldownTimer int
CooldownMessage string
UnknownCommandMessage string
AudioEnabled bool
RESTEnabled bool
RESTPort string
}
// Spudo contains everything about the bot itself
type Spudo struct {
sync.Mutex
CommandMutex sync.Mutex
*session
Config Config
CooldownList map[string]time.Time
TimersStarted bool
logger *spudoLogger
spudoCommands map[string]*spudoCommand
commands map[string]*command
startupPlugins []*startupPlugin
timedMessages []*timedMessage
userReactions []*userReaction
messageReactions []*messageReaction
audioSessions map[string]*spAudio
}
type unknownCommand string
// Initialize will initialize everything Spudo needs to run.
func Initialize() *Spudo {
sp := newSpudo()
configPath := flag.String("config", "./config.toml", "TODO")
flag.Parse()
// Check if config exists, if it doesn't use
// createMinimalConfig to generate one.
if _, err := os.Stat(*configPath); os.IsNotExist(err) {
sp.logger.info("Config not detected, creating minimal config...")
if err := sp.createMinimalConfig(); err != nil {
sp.logger.fatal("Failed to create minimal config", err)
}
}
if err := sp.loadConfig(*configPath); err != nil {
sp.logger.fatal(err.Error())
}
return sp
}
// newSpudo setups up the logger and maps for the plugins.
func newSpudo() *Spudo {
sp := &Spudo{}
sp.CooldownList = make(map[string]time.Time)
sp.logger = newLogger()
sp.commands = make(map[string]*command)
sp.timedMessages = make([]*timedMessage, 0)
sp.userReactions = make([]*userReaction, 0)
sp.messageReactions = make([]*messageReaction, 0)
sp.spudoCommands = make(map[string]*spudoCommand)
return sp
}
// Returns the default config settings for the bot
func getDefaultConfig() Config {
return Config{
Token: "",
CommandPrefix: "!",
CooldownTimer: 10,
CooldownMessage: "Too many commands at once!",
UnknownCommandMessage: "Invalid command!",
}
}
// createMinimalConfig prompts the user to enter a Token and for the
// Config. This is used if no config is found.
func (sp *Spudo) createMinimalConfig() error {
sp.Config = getDefaultConfig()
path := "./config.toml"
f, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
if err != nil {
return err
}
defer f.Close()
err = toml.NewEncoder(f).Encode(sp.Config)
if err != nil {
return err
}
return nil
}
func (sp *Spudo) loadConfig(configPath string) error {
// Set default config
sp.Config = getDefaultConfig()
if _, err := toml.DecodeFile(configPath, &sp.Config); err != nil {
return errors.New("Failed to read config - " + err.Error())
}
if sp.Config.Token == "" {
return errors.New("no token in config")
}
return nil
}
// Start will add handler functions to the Session and open the
// websocket connection
func (sp *Spudo) Start() {
rand.Seed(time.Now().UnixNano())
var err error
if sp.session, err = newSession(sp.Config.Token, sp.logger); err != nil {
sp.logger.fatal(err.Error())
}
if sp.Config.AudioEnabled {
sp.addAudioCommands()
sp.audioSessions = make(map[string]*spAudio)
go sp.watchForDisconnect()
sp.logger.info("Audio commands added")
}
if sp.Config.RESTEnabled {
go sp.startRESTApi()
}
sp.AddHandler(sp.onReady)
sp.AddHandler(sp.onMessageCreate)
if err := sp.Open(); err != nil {
sp.logger.fatal("Error opening websocket connection -", err)
}
sp.logger.info("Bot is now running. Press CTRL-C to exit.")
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGTERM)
<-c
sp.quit()
}
// quit handles everything that needs to occur for the bot to shutdown cleanly.
func (sp *Spudo) quit() {
sp.logger.info("Bot is now shutting down")
for _, as := range sp.audioSessions {
if err := as.Voice.Disconnect(); err != nil {
sp.logger.fatal("Error disconnecting from voice channel:", err)
}
}
if err := sp.Close(); err != nil {
sp.logger.fatal("Error closing discord session:", err)
}
os.Exit(1)
}
func (sp *Spudo) onReady(s *discordgo.Session, r *discordgo.Ready) {
for _, p := range sp.startupPlugins {
p.Exec()
}
if !sp.TimersStarted {
sp.startTimedMessages()
}
}
func (sp *Spudo) onMessageCreate(s *discordgo.Session, m *discordgo.MessageCreate) {
// Always ignore bot users (including itself)
if m.Author.Bot {
return
}
go sp.handleCommand(m)
go sp.handleUserReaction(m)
go sp.handleMessageReaction(m)
}
// sendPrivateMessage creates a UserChannel before attempting to send
// a message directly to a user rather than in the server channel.
func (sp *Spudo) sendPrivateMessage(userID string, message interface{}) {
privChannel, err := sp.UserChannelCreate(userID)
if err != nil {
sp.logger.error("Error creating private channel -", err)
return
}
switch v := message.(type) {
case string:
sp.SendMessage(privChannel.ID, v)
case *discordgo.MessageEmbed:
sp.SendEmbed(privChannel.ID, v)
}
}
// respondToUser is a helper method around SendMessage that will
// mention the user who created the message.
func (sp *Spudo) respondToUser(m *discordgo.MessageCreate, response string) {
sp.SendMessage(m.ChannelID, m.Author.Mention()+" "+response)
}
// attemptCommand will check if comStr is in the commands map. If it
// is, it will return the command response as resp and whether or not
// the message should be sent privately as private.
func (sp *Spudo) attemptCommand(author, channel, comStr string, args []string) (resp interface{}, private bool) {
if com, isValid := sp.spudoCommands[comStr]; isValid {
resp = com.Exec(author, channel, args...)
return
}
if com, isValid := sp.commands[comStr]; isValid {
resp = com.Exec(author, args)
private = com.PrivateResponse
return
}
return unknownCommand(sp.Config.UnknownCommandMessage), private
}
func (sp *Spudo) handleCommand(m *discordgo.MessageCreate) {
if !strings.HasPrefix(m.Content, sp.Config.CommandPrefix) {
return
}
if !sp.canPost(m.Author.ID) {
sp.respondToUser(m, sp.Config.CooldownMessage)
return
}
commandText := strings.Split(strings.TrimPrefix(m.Content, sp.Config.CommandPrefix), " ")
com := strings.ToLower(commandText[0])
args := commandText[1:]
commandResp, isPrivate := sp.attemptCommand(m.Author.ID, m.ChannelID, com, args)
switch v := commandResp.(type) {
case nil: // For commands that do not need a response
case string:
if isPrivate {
sp.sendPrivateMessage(m.Author.ID, v)
} else {
sp.respondToUser(m, v)
}
sp.startCooldown(m.Author.ID)
case *Embed:
if isPrivate {
sp.sendPrivateMessage(m.Author.ID, v.MessageEmbed)
} else {
sp.SendEmbed(m.ChannelID, v.MessageEmbed)
}
sp.startCooldown(m.Author.ID)
case *Complex:
defer v.file.Close()
sp.SendComplex(m.ChannelID, v.MessageSend)
sp.startCooldown(m.Author.ID)
case voiceCommand:
sp.SendMessage(m.ChannelID, string(v))
case unknownCommand:
sp.respondToUser(m, string(v))
}
}
func (sp *Spudo) handleUserReaction(m *discordgo.MessageCreate) {
for _, ur := range sp.userReactions {
for _, user := range ur.UserIDs {
if user == m.Author.ID {
for _, reaction := range ur.ReactionIDs {
sp.AddReaction(m, reaction)
}
}
}
}
}
func (sp *Spudo) handleMessageReaction(m *discordgo.MessageCreate) {
for _, mr := range sp.messageReactions {
for _, trigger := range mr.TriggerWords {
if strings.Contains(strings.ToLower(m.Content), strings.ToLower(trigger)) {
for _, reaction := range mr.ReactionIDs {
sp.AddReaction(m, reaction)
}
}
}
}
}
// Returns whether or not the user can issue a command based on a timer.
func (sp *Spudo) canPost(user string) bool {
if userTime, isValid := sp.CooldownList[user]; isValid {
return time.Since(userTime).Seconds() > float64(sp.Config.CooldownTimer)
}
return true
}
// Adds user to cooldown list.
func (sp *Spudo) startCooldown(user string) {
sp.CooldownList[user] = time.Now()
}
// Starts all TimedMessages.
func (sp *Spudo) startTimedMessages() {
for _, p := range sp.timedMessages {
c := cron.New(cron.WithLocation(time.UTC))
if _, err := c.AddFunc(p.CronString, func() {
timerFunc := p.Exec()
switch v := timerFunc.(type) {
case string:
for _, chanID := range p.Channels {
sp.SendMessage(chanID, v)
}
case *Embed:
for _, chanID := range p.Channels {
sp.SendEmbed(chanID, v.MessageEmbed)
}
}
}); err != nil {
sp.logger.error("Error starting "+p.Name+" timed message - ", err)
continue
}
c.Start()
}
sp.TimersStarted = true
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package main
import (
"io"
"strings"
"text/template"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
)
const ordSyncTmpl = "pkg/sql/colexec/ordered_synchronizer_tmpl.go"
func genOrderedSynchronizer(inputFileContents string, wr io.Writer) error {
r := strings.NewReplacer(
"_CANONICAL_TYPE_FAMILY", "{{.CanonicalTypeFamilyStr}}",
"_TYPE_WIDTH", typeWidthReplacement,
"_GOTYPESLICE", "{{.GoTypeSliceName}}",
"_TYPE", "{{.VecMethod}}",
)
s := r.Replace(inputFileContents)
s = replaceManipulationFuncs(s)
tmpl, err := template.New("ordered_synchronizer").Parse(s)
if err != nil {
return err
}
// It doesn't matter that we're passing in all overloads of Equality
// comparison operator - we simply need to iterate over all supported
// types.
return tmpl.Execute(wr, sameTypeComparisonOpToOverloads[tree.EQ])
}
func init() {
registerGenerator(genOrderedSynchronizer, "ordered_synchronizer.eg.go", ordSyncTmpl)
}
|
///
/// Websocket connection to send and receive data
/// through a web interface
///
package main
import (
"io"
"log"
"net/http"
"time"
"github.com/gorilla/websocket"
)
const (
// Time allowed to write a message.
writeWait = 10 * time.Second
// Time allowed to read the next message
readWaitTime = 60 * time.Second
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (readWaitTime * 9) / 10
// Maximum message size allowed from peer.
maxMessageSize = 512
)
// upgrader sets the buffer sizes for the websocket.
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
// websocketConn struct keeps the Websocket connection.
// It also holds the buffer for the data incoming
// from the websocket.
type websocketConn struct {
// The websocket connection.
ws *websocket.Conn
// Buffered channel of outbound messages.
send chan []byte
}
// reader is a Websocket reader
func (wsConn *websocketConn) reader() {
defer func() {
log.Print("Close the websocket connection from Reader")
echo.unregister <- wsConn
wsConn.ws.Close()
}()
// Init the websocket reader
wsConn.ws.SetReadLimit(maxMessageSize)
//wsConn.ws.SetReadDeadline(time.Now().Add(readWaitTime))
wsConn.ws.SetPongHandler(func(string) error { wsConn.ws.SetReadDeadline(time.Now().Add(readWaitTime)); return nil })
for {
// Block until a message is received from the websocket
_, message, err := wsConn.ws.ReadMessage()
if err != nil {
if err == io.EOF {
// Connection is closed with EOF so return
log.Println("EOF in ws")
break
}
log.Println("Error reading data from ws. " + err.Error())
break
}
log.Println("Websocket message: " + string(message))
echo.serialBroadcast <- message
}
}
// write writes a message with the given message type and payload.
func (wsConn *websocketConn) write(mt int, payload []byte) error {
log.Print("Send data to websocket.")
wsConn.ws.SetWriteDeadline(time.Now().Add(writeWait))
return wsConn.ws.WriteMessage(mt, payload)
}
// writer is a Websocket writer
func (wsConn *websocketConn) writer() {
defer func() {
log.Print("Close the websocket connection from writer")
wsConn.ws.Close()
}()
for {
select {
// Block until a message is received to write to the websocket
case message, ok := <-wsConn.send:
if !ok {
log.Println("Message for ws is not OK. ")
wsConn.write(websocket.CloseMessage, []byte{})
return
}
if err := wsConn.write(websocket.TextMessage, message); err != nil {
log.Println("Error writing. " + err.Error())
return
}
}
}
}
// wsHandler is the Websocket handler in the HTTP server.
// This will start websocket connection. It will then
// start the reader and writer for the websocket.
func wsHandler(w http.ResponseWriter, r *http.Request) {
log.Print("Started a new websocket handler")
if r.Method != "GET" {
http.Error(w, "Method not allowed", 405)
return
}
// Create a websocket and check it was created properly
ws, err := upgrader.Upgrade(w, r, nil)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Not a websocket handshake", 400)
return
} else if err != nil {
log.Println("Error opening socket: " + err.Error())
return
}
// Make a async channel to create the websocket connection
// This will block until the buffer is full
c := &websocketConn{send: make(chan []byte, 256*10), ws: ws}
// Register the connection with echo
echo.register <- c
log.Println("Create Websocket")
// GoRoutine for the writer
go c.writer()
log.Println("Create Websocket writer")
// Reader
c.reader()
log.Println("Create Websocket reader")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.