text stringlengths 11 4.05M |
|---|
package repo
//UsersRepoStruct your function for query db, consume another service cant define here
type UsersRepoStruct struct {
}
type UsersInterface interface {
CreateUsers() error
}
func UsersTokenRepoImp() UsersInterface {
return &UsersRepoStruct{}
}
func (g *UsersRepoStruct) CreateUsers() error {
return nil
}
|
package generator_test
import (
"fmt"
"github.com/bmdelacruz/generator"
)
func ExampleGenerator_Next() {
// Generator func that doesn't call any of the controller
// functions and returns a nil value
fmt.Println("Case 1:")
g1 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
return nil, nil
},
)
v, r, e := g1.Next(nil) // will receive (<nil> true <nil>)
fmt.Println("Generator#Next(nil) returns (", v, r, e, ")")
v, r, e = g1.Next(nil) // will receive (<nil> true <nil>)
fmt.Println("Generator#Next(nil) returns (", v, r, e, ")")
// Generator func that doesn't call any of the controller
// functions and returns a non-nil value
fmt.Println("Case 2:")
g2 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
return "yay!", nil
},
)
v, r, e = g2.Next(nil) // will receive (yay! true <nil>)
fmt.Println("Generator#Next(nil) returns (", v, r, e, ")")
v, r, e = g2.Next(nil) // will receive (<nil> true <nil>)
fmt.Println("Generator#Next(nil) returns (", v, r, e, ")")
// Generator func that calls yields `1`, receives `"b"`,
// and then returns a nil value
fmt.Println("Case 3:")
g3 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
v, r, e := gc.Yield(1) // will receive (b false <nil>)
fmt.Println("Controller#Yield(1) returns (", v, r, e, ")")
return nil, nil
},
)
v, r, e = g3.Next("a") // will receive (1 false <nil>)
fmt.Println("Generator#Next(\"a\") returns (", v, r, e, ")")
v, r, e = g3.Next("b") // will receive (<nil> true <nil>)
fmt.Println("Generator#Next(\"b\") returns (", v, r, e, ")")
v, r, e = g3.Next("c") // will receive (<nil> true <nil>)
fmt.Println("Generator#Next(\"c\") returns (", v, r, e, ")")
// Output:
// Case 1:
// Generator#Next(nil) returns ( <nil> true <nil> )
// Generator#Next(nil) returns ( <nil> true <nil> )
// Case 2:
// Generator#Next(nil) returns ( yay! true <nil> )
// Generator#Next(nil) returns ( <nil> true <nil> )
// Case 3:
// Generator#Next("a") returns ( 1 false <nil> )
// Controller#Yield(1) returns ( b false <nil> )
// Generator#Next("b") returns ( <nil> true <nil> )
// Generator#Next("c") returns ( <nil> true <nil> )
}
func ExampleGenerator_Return() {
fmt.Println("Case 1:")
// `Generator#Return` will wait until the generator function stops
// from executing which in turn execute all the `Println`s within
// the generator `Func` before the `Generator#Return`'s result is
// printed.
//
// Note that this does not have the same behaviour as the generator
// in JS. In JS, calling iterator's return will immediately return
// from the current yield statement; the statements that come after
// that won't be executed.
g1 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
v, r, e := gc.Yield(1)
fmt.Println("Controller#Yield(1) returns (", v, r, e, ")")
v, r, e = gc.Yield(2)
fmt.Println("Controller#Yield(2) returns (", v, r, e, ")")
return nil, nil
},
)
v, r, e := g1.Return("a")
fmt.Println("Generator#Return(\"a\") returns (", v, r, e, ")")
v, r, e = g1.Next("b")
fmt.Println("Generator#Next(\"b\") returns (", v, r, e, ")")
fmt.Println("Case 2:")
g2 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
v, r, e := gc.Yield(1)
fmt.Println("Controller#Yield(1) returns (", v, r, e, ")")
v, r, e = gc.Yield(2)
fmt.Println("Controller#Yield(2) returns (", v, r, e, ")")
return nil, nil
},
)
v, r, e = g2.Next("a")
fmt.Println("Generator#Next(\"a\") returns (", v, r, e, ")")
v, r, e = g2.Return("b")
fmt.Println("Generator#Return(\"b\") returns (", v, r, e, ")")
v, r, e = g2.Next("c")
fmt.Println("Generator#Next(\"c\") returns (", v, r, e, ")")
// Output:
// Case 1:
// Controller#Yield(1) returns ( a true <nil> )
// Controller#Yield(2) returns ( <nil> true <nil> )
// Generator#Return("a") returns ( <nil> true <nil> )
// Generator#Next("b") returns ( <nil> true <nil> )
// Case 2:
// Generator#Next("a") returns ( 1 false <nil> )
// Controller#Yield(1) returns ( b true <nil> )
// Controller#Yield(2) returns ( <nil> true <nil> )
// Generator#Return("b") returns ( <nil> true <nil> )
// Generator#Next("c") returns ( <nil> true <nil> )
}
func ExampleGenerator_Error() {
fmt.Println("Case 1:")
g1 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
v, r, e := gc.Yield(1)
fmt.Println("Controller#Yield(1) returns (", v, r, e, ")")
v, r, e = gc.Yield(2)
fmt.Println("Controller#Yield(2) returns (", v, r, e, ")")
return nil, nil
},
)
v, r, e := g1.Error(
fmt.Errorf("some_error"),
)
fmt.Println("Generator#Error(fmt.Errorf(\"some_error\")) returns (", v, r, e, ")")
v, r, e = g1.Next("b")
fmt.Println("Generator#Next(\"b\") returns (", v, r, e, ")")
v, r, e = g1.Next("c")
fmt.Println("Generator#Next(\"c\") returns (", v, r, e, ")")
fmt.Println("Case 2:")
g2 := generator.New(
func(gc *generator.Controller) (interface{}, error) {
v, r, e := gc.Yield(1)
fmt.Println("Controller#Yield(1) returns (", v, r, e, ")")
v, r, e = gc.Yield(2)
fmt.Println("Controller#Yield(2) returns (", v, r, e, ")")
return nil, nil
},
)
v, r, e = g2.Next("a")
fmt.Println("Generator#Next(\"a\") returns (", v, r, e, ")")
v, r, e = g2.Error(
fmt.Errorf("some_error"),
)
fmt.Println("Generator#Error(fmt.Errorf(\"some_error\")) returns (", v, r, e, ")")
v, r, e = g2.Next("b")
fmt.Println("Generator#Next(\"b\") returns (", v, r, e, ")")
// Output:
// Case 1:
// Generator#Error(fmt.Errorf("some_error")) returns ( <nil> false <nil> )
// Controller#Yield(1) returns ( <nil> false some_error )
// Generator#Next("b") returns ( 2 false <nil> )
// Controller#Yield(2) returns ( c false <nil> )
// Generator#Next("c") returns ( <nil> true <nil> )
// Case 2:
// Generator#Next("a") returns ( 1 false <nil> )
// Controller#Yield(1) returns ( <nil> false some_error )
// Generator#Error(fmt.Errorf("some_error")) returns ( 2 false <nil> )
// Controller#Yield(2) returns ( b false <nil> )
// Generator#Next("b") returns ( <nil> true <nil> )
}
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package jobs
import (
"context"
"github.com/cockroachdb/cockroach/pkg/jobs/jobspb"
"github.com/cockroachdb/cockroach/pkg/kv"
)
// FakeResumer calls optional callbacks during the job lifecycle.
type FakeResumer struct {
OnResume func(context.Context) error
FailOrCancel func(context.Context) error
Success func() error
PauseRequest onPauseRequestFunc
}
var _ Resumer = FakeResumer{}
func (d FakeResumer) Resume(ctx context.Context, execCtx interface{}) error {
if d.OnResume != nil {
if err := d.OnResume(ctx); err != nil {
return err
}
}
if d.Success != nil {
return d.Success()
}
return nil
}
func (d FakeResumer) OnFailOrCancel(ctx context.Context, _ interface{}) error {
if d.FailOrCancel != nil {
return d.FailOrCancel(ctx)
}
return nil
}
// OnPauseRequestFunc forwards the definition for use in tests.
type OnPauseRequestFunc = onPauseRequestFunc
var _ PauseRequester = FakeResumer{}
func (d FakeResumer) OnPauseRequest(
ctx context.Context, execCtx interface{}, txn *kv.Txn, details *jobspb.Progress,
) error {
if d.PauseRequest == nil {
return nil
}
return d.PauseRequest(ctx, execCtx, txn, details)
}
// Started is a wrapper around the internal function that moves a job to the
// started state.
func (j *Job) Started(ctx context.Context) error {
return j.started(ctx, nil /* txn */)
}
// Created is a test only function that inserts a new jobs table row.
func (j *Job) Created(ctx context.Context) error {
return j.deprecatedInsert(ctx, nil /* txn */, j.ID(), nil /* lease */, nil /* session */)
}
// Paused is a wrapper around the internal function that moves a job to the
// paused state.
func (j *Job) Paused(ctx context.Context) error {
return j.paused(ctx, nil /* txn */, nil /* fn */)
}
// Failed is a wrapper around the internal function that moves a job to the
// failed state.
func (j *Job) Failed(ctx context.Context, causingErr error) error {
return j.failed(ctx, nil /* txn */, causingErr, nil /* fn */)
}
// Succeeded is a wrapper around the internal function that moves a job to the
// succeeded state.
func (j *Job) Succeeded(ctx context.Context) error {
return j.succeeded(ctx, nil /* txn */, nil /* fn */)
}
|
package db
import (
_ "github.com/lib/pq"
"github.com/stretchr/testify/mock"
)
type MockEmployeeRepo struct {
mock.Mock
}
// List returns all employee records
func (m *MockEmployeeRepo) List() ([]Employee, error) {
args := m.Called()
if args.Get(0) == nil {
return nil, args.Error(1)
}
return args.Get(0).([]Employee), args.Error(1)
}
|
package main
import "fmt"
func main() {
phone := "13564663499"
if len(phone) != 11 {
fmt.Println("invalid")
}
str := fmt.Sprintf("%s****%s", phone[0:3], phone[7:11])
fmt.Println(str)
}
|
package main
import (
"fmt"
"log"
"io/ioutil"
"os"
// "strings"
"regexp"
"mustard/utils/page_analysis"
"mustard/internal/github.com/PuerkitoBio/goquery"
"strings"
"mustard/internal/gopkg.in/mgo.v2"
)
func check(e error) {
if e != nil {
panic(e)
}
}
type Fang struct {
// 标题,小区,租金,户型,地址,配置,经纬度
Url string
Title string
Community string
Cost string
PayMethod string
HouseType string
HouseType2 string
Address string
Configuration string
BaiduLongitude string
BaiduLatitude string
Longitude string
Latitude string
UpdateTime string
}
func parse(s string) Fang {
var fa Fang
path := strings.Split(s,"/")
raw_url := path[len(path)-1]
raw_url = strings.Replace(raw_url, "___","://", 1)
raw_url = strings.Replace(raw_url, "_","/", -1)
fa.Url = raw_url
dat, err := ioutil.ReadFile(s)
check(err)
var parser page_analysis.HtmlParser
parser.Parse(raw_url,string(dat))
doc := parser.GetDocument()
doc.Find("title").Each(func(i int, s *goquery.Selection){
fa.Title = strings.TrimSpace(s.Text())
})
doc.Find("div.xiaoqu").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
fa.Community = str
})
doc.Find("span.pay-method").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
fa.PayMethod = str
})
doc.Find("em.house-price").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
fa.Cost = str
})
doc.Find("div.house-type").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
fa.HouseType = str
})
doc.Find("span.pl10").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
if strings.Contains(str, "更新时间") {
fa.UpdateTime = str
}
})
doc.Find("li.house-primary-content-li").Each(func(i int, s *goquery.Selection){
str := strings.Replace(s.Text(),"\n","",-1)
str = strings.Replace(str,"\t","",-1)
str = strings.Replace(str," ","",-1)
fmt.Println(str)
if strings.Contains(str, "租金") {
// fa.CostFull = str
} else if strings.Contains(str, "房屋") {
fa.HouseType2 = str
} else if strings.Contains(str, "地址") {
fa.Address = str
} else if strings.Contains(str, "配置") {
fa.Configuration = str
}
})
l, _ := regexp.Compile(`baidulon:'(\d+\.\d+)'`)
baiduLong := l.FindStringSubmatch(string(dat))
if len(baiduLong) > 1 {
fa.BaiduLongitude = baiduLong[1]
}
r, _ := regexp.Compile(`baidulat:'(\d+\.\d+)'`)
baiduLat := r.FindStringSubmatch(string(dat))
if len(baiduLat) > 1 {
fa.BaiduLatitude = baiduLat[1]
}
ll, _ := regexp.Compile(`,lon:'(\d+\.\d+)'`)
longs := ll.FindStringSubmatch(string(dat))
if len(longs) > 1 {
fa.Longitude = longs[1]
}
rr, _ := regexp.Compile(`,lat:'(\d+\.\d+)'`)
lat := rr.FindStringSubmatch(string(dat))
if len(lat) > 1 {
fa.Latitude = lat[1]
}
return fa
}
func save2Mongo(fa *Fang) {
session, err := mgo.Dial("127.0.0.1")
if err != nil {
panic(err)
}
defer session.Close()
// Optional. Switch the session to a monotonic behavior.
session.SetMode(mgo.Monotonic, true)
c := session.DB("crawl").C("crawl_58")
err = c.Insert(fa)
if err != nil {
log.Fatal(err)
}
}
func main() {
filename := os.Args[1]
fa := parse(filename)
save2Mongo(&fa)
}
|
package usecases
import (
"github.com/falcosecurity/cloud-native-security-hub/pkg/vendor"
)
type RetrieveOneVendor struct {
VendorID string
VendorRepository vendor.Repository
}
func (useCase *RetrieveOneVendor) Execute() (res *vendor.Vendor, err error) {
return useCase.VendorRepository.FindById(useCase.VendorID)
}
|
package caster
import "testing"
func BenchmarkToInt16(b *testing.B) {
c := NewCaster()
b.ResetTimer()
for i := 0; i < b.N; i++ {
c.ToInt16(123, -1)
c.ToInt16(int32(123), -1)
c.ToInt16(int64(123), -1)
c.ToInt16(int16(12), -1)
c.ToInt16(int8(1), -1)
c.ToInt16(float32(123), -1)
c.ToInt16(float64(123), -1)
c.ToInt16("123", -1)
c.ToInt16(make(chan int), -1)
}
}
|
package main
import (
//"log"
"net/http"
"time"
//"github.com/gorilla/context"
"github.com/gorilla/mux"
//"github.com/gorilla/sessions"
"io"
"os"
)
func init() {
InitApp()
InitLog()
InitDb()
InitAppCfg()
//InitSendMail()
InitMinify()
}
func main() {
r := mux.NewRouter()
r.PathPrefix("/public/").Handler(http.StripPrefix("/public/", http.FileServer(http.Dir("./public"))))
r.HandleFunc("/favicon.ico", LogReq(http_static_favicon_ico))
//r.HandleFunc("/main", http_sess_handler)
r.HandleFunc("/", LogReq(http_main))
r.HandleFunc("/main", LogReq(http_main))
r.HandleFunc("/admin", LogReq(http_admin))
r.HandleFunc("/s", LogReq(http_search)) // search
r.HandleFunc("/sq", LogReq(http_searchq)) // search ajax data
r.HandleFunc("/p", LogReq(http_post_view)) // view post
r.HandleFunc("/e", LogReq(http_post_edit)) // edit post
r.HandleFunc("/e_ajax", LogReq(http_post_edit_ajax))
r.HandleFunc("/login", LogReq(http_login))
r.HandleFunc("/logout", LogReq(http_logout))
r.HandleFunc("/auth_vk", LogReq(http_auth_vk))
r.HandleFunc("/auth_google", LogReq(http_auth_google))
r.HandleFunc("/publish", LogReq(http_publish))
//вывод
r.NotFoundHandler = MakeHttpHandler(LogReq(http_404))
srv := &http.Server{
Handler: r,
Addr: ":" + gcfg_webserver_port,
WriteTimeout: 400 * time.Second,
ReadTimeout: 400 * time.Second,
}
LogPrint("start listening port: " + gcfg_webserver_port)
err := srv.ListenAndServe()
LogPrintErrAndExit("ERROR start listening port: "+gcfg_webserver_port+" ", err)
}
func http_static_favicon_ico(w http.ResponseWriter, r *http.Request) {
filename := apppath + "/public/favicon.ico"
f, err := os.OpenFile(filename, os.O_RDONLY, 0000)
if err != nil {
ShowError("http_static_favicon_ico: OpenFile error", err, w, r)
return
}
io.Copy(w, f)
}
func http_main(w http.ResponseWriter, r *http.Request) {
d := map[string]interface{}{}
RenderTemplate(w, r, d, "maintemplate.html", "main.html")
//w.Write([]byte("привет ворлд"))
//context.Get(r, "nextfunc").(func(http.ResponseWriter, *http.Request))(w, r)
}
func http_404(w http.ResponseWriter, r *http.Request) {
//w.Write([]byte("404\npage not found\n\n\n"))
d := map[string]interface{}{}
RenderTemplate(w, r, d, "maintemplate.html", "error404.html")
}
func user_check_access(w http.ResponseWriter, r *http.Request, d map[string]interface{}) error {
return nil
}
|
package exchange
import (
"context"
"encoding/json"
"encoding/xml"
"fmt"
"os"
"path/filepath"
"regexp"
"sort"
"strconv"
"testing"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/exchange/entities"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/prebid_cache_client"
"github.com/prebid/prebid-server/util/ptrutil"
"github.com/stretchr/testify/assert"
)
func TestMakeVASTGiven(t *testing.T) {
const expect = `<VAST version="3.0"></VAST>`
bid := &openrtb2.Bid{
AdM: expect,
}
vast := makeVAST(bid)
assert.Equal(t, expect, vast)
}
func TestMakeVASTNurl(t *testing.T) {
const url = "http://domain.com/win-notify/1"
const expect = `<VAST version="3.0"><Ad><Wrapper>` +
`<AdSystem>prebid.org wrapper</AdSystem>` +
`<VASTAdTagURI><![CDATA[` + url + `]]></VASTAdTagURI>` +
`<Impression></Impression><Creatives></Creatives>` +
`</Wrapper></Ad></VAST>`
bid := &openrtb2.Bid{
NURL: url,
}
vast := makeVAST(bid)
assert.Equal(t, expect, vast)
}
func TestBuildCacheString(t *testing.T) {
testCases := []struct {
description string
debugLog DebugLog
expectedDebugLog DebugLog
}{
{
description: "DebugLog strings should have tags and be formatted",
debugLog: DebugLog{
Data: DebugData{
Request: "test request string",
Headers: "test headers string",
Response: "test response string",
},
Regexp: regexp.MustCompile(`[<>]`),
},
expectedDebugLog: DebugLog{
Data: DebugData{
Request: "<Request>test request string</Request>",
Headers: "<Headers>test headers string</Headers>",
Response: "<Response>test response string</Response>",
},
Regexp: regexp.MustCompile(`[<>]`),
},
},
{
description: "DebugLog strings should have no < or > characters",
debugLog: DebugLog{
Data: DebugData{
Request: "<test>test request string</test>",
Headers: "test <headers string",
Response: "test <response> string",
},
Regexp: regexp.MustCompile(`[<>]`),
},
expectedDebugLog: DebugLog{
Data: DebugData{
Request: "<Request>testtest request string/test</Request>",
Headers: "<Headers>test headers string</Headers>",
Response: "<Response>test response string</Response>",
},
Regexp: regexp.MustCompile(`[<>]`),
},
},
}
for _, test := range testCases {
test.expectedDebugLog.CacheString = fmt.Sprintf("%s<Log>%s%s%s</Log>", xml.Header, test.expectedDebugLog.Data.Request, test.expectedDebugLog.Data.Headers, test.expectedDebugLog.Data.Response)
test.debugLog.BuildCacheString()
assert.Equal(t, test.expectedDebugLog, test.debugLog, test.description)
}
}
// TestCacheJSON executes tests for all the *.json files in cachetest.
// customcachekey.json test here verifies custom cache key not used for non-vast video
func TestCacheJSON(t *testing.T) {
for _, dir := range []string{"cachetest", "customcachekeytest", "impcustomcachekeytest", "eventscachetest"} {
if specFiles, err := os.ReadDir(dir); err == nil {
for _, specFile := range specFiles {
fileName := filepath.Join(dir, specFile.Name())
fileDisplayName := "exchange/" + fileName
t.Run(fileDisplayName, func(t *testing.T) {
specData, err := loadCacheSpec(fileName)
if assert.NoError(t, err, "Failed to load contents of file %s: %v", fileDisplayName, err) {
runCacheSpec(t, fileDisplayName, specData)
}
})
}
} else {
t.Fatalf("Failed to read contents of directory exchange/%s: %v", dir, err)
}
}
}
func TestIsDebugOverrideEnabled(t *testing.T) {
type inTest struct {
debugHeader string
configToken string
}
type aTest struct {
desc string
in inTest
result bool
}
testCases := []aTest{
{
desc: "test debug header is empty, config token is empty",
in: inTest{debugHeader: "", configToken: ""},
result: false,
},
{
desc: "test debug header is present, config token is empty",
in: inTest{debugHeader: "TestToken", configToken: ""},
result: false,
},
{
desc: "test debug header is empty, config token is present",
in: inTest{debugHeader: "", configToken: "TestToken"},
result: false,
},
{
desc: "test debug header is present, config token is present, not equal",
in: inTest{debugHeader: "TestToken123", configToken: "TestToken"},
result: false,
},
{
desc: "test debug header is present, config token is present, equal",
in: inTest{debugHeader: "TestToken", configToken: "TestToken"},
result: true,
},
{
desc: "test debug header is present, config token is present, not case equal",
in: inTest{debugHeader: "TestTokeN", configToken: "TestToken"},
result: false,
},
}
for _, test := range testCases {
result := IsDebugOverrideEnabled(test.in.debugHeader, test.in.configToken)
assert.Equal(t, test.result, result, test.desc)
}
}
// LoadCacheSpec reads and parses a file as a test case. If something goes wrong, it returns an error.
func loadCacheSpec(filename string) (*cacheSpec, error) {
specData, err := os.ReadFile(filename)
if err != nil {
return nil, fmt.Errorf("Failed to read file %s: %v", filename, err)
}
var spec cacheSpec
if err := json.Unmarshal(specData, &spec); err != nil {
return nil, fmt.Errorf("Failed to unmarshal JSON from file: %v", err)
}
return &spec, nil
}
// runCacheSpec cycles through the bids found in the json test cases and
// finds the highest bid of every Imp, then tests doCache() with resulting auction object
func runCacheSpec(t *testing.T, fileDisplayName string, specData *cacheSpec) {
var bid *entities.PbsOrtbBid
winningBidsByImp := make(map[string]*entities.PbsOrtbBid)
winningBidsByBidder := make(map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid)
roundedPrices := make(map[*entities.PbsOrtbBid]string)
bidCategory := make(map[string]string)
// Traverse through the bid list found in the parsed in Json file
for _, pbsBid := range specData.PbsBids {
bid = &entities.PbsOrtbBid{
Bid: pbsBid.Bid,
BidType: pbsBid.BidType,
}
cpm := bid.Bid.Price
// Map this bid if it's the highest we've seen from this Imp so far
wbid, ok := winningBidsByImp[bid.Bid.ImpID]
if !ok || cpm > wbid.Bid.Price {
winningBidsByImp[bid.Bid.ImpID] = bid
}
// Map this bid if it's the highest we've seen from this bidder so far
if bidMap, ok := winningBidsByBidder[bid.Bid.ImpID]; ok {
bidMap[pbsBid.Bidder] = append(bidMap[pbsBid.Bidder], bid)
} else {
winningBidsByBidder[bid.Bid.ImpID] = map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
pbsBid.Bidder: {bid},
}
}
for _, topBidsPerBidder := range winningBidsByBidder {
for _, topBids := range topBidsPerBidder {
sort.Slice(topBids, func(i, j int) bool {
return isNewWinningBid(topBids[i].Bid, topBids[j].Bid, true)
})
}
}
if len(pbsBid.Bid.Cat) == 1 {
bidCategory[pbsBid.Bid.ID] = pbsBid.Bid.Cat[0]
}
roundedPrices[bid] = strconv.FormatFloat(bid.Bid.Price, 'f', 2, 64)
}
ctx := context.Background()
cache := &mockCache{}
targData := &targetData{
priceGranularity: openrtb_ext.PriceGranularity{
Precision: ptrutil.ToPtr(2),
Ranges: []openrtb_ext.GranularityRange{
{
Min: 0,
Max: 5,
Increment: 0.05,
},
{
Min: 5,
Max: 10,
Increment: 0.1,
},
{
Min: 10,
Max: 20,
Increment: 0.5,
},
},
},
includeWinners: specData.TargetDataIncludeWinners,
includeBidderKeys: specData.TargetDataIncludeBidderKeys,
includeCacheBids: specData.TargetDataIncludeCacheBids,
includeCacheVast: specData.TargetDataIncludeCacheVast,
}
testAuction := &auction{
winningBids: winningBidsByImp,
winningBidsByBidder: winningBidsByBidder,
roundedPrices: roundedPrices,
}
evTracking := &eventTracking{
accountID: "TEST_ACC_ID",
enabledForAccount: specData.EventsDataEnabledForAccount,
enabledForRequest: specData.EventsDataEnabledForRequest,
externalURL: "http://localhost",
auctionTimestampMs: 1234567890,
}
_ = testAuction.doCache(ctx, cache, targData, evTracking, &specData.BidRequest, 60, &specData.DefaultTTLs, bidCategory, &specData.DebugLog)
if len(specData.ExpectedCacheables) > len(cache.items) {
t.Errorf("%s: [CACHE_ERROR] Less elements were cached than expected \n", fileDisplayName)
} else if len(specData.ExpectedCacheables) < len(cache.items) {
t.Errorf("%s: [CACHE_ERROR] More elements were cached than expected \n", fileDisplayName)
} else { // len(specData.ExpectedCacheables) == len(cache.items)
// We cached the exact number of elements we expected, now we compare them side by side in n^2
var matched int = 0
for i, expectedCacheable := range specData.ExpectedCacheables {
found := false
var expectedData interface{}
if err := json.Unmarshal(expectedCacheable.Data, &expectedData); err != nil {
t.Fatalf("Failed to decode expectedCacheables[%d].value: %v", i, err)
}
if s, ok := expectedData.(string); ok && expectedCacheable.Type == prebid_cache_client.TypeJSON {
// decode again if we have pre-encoded json string values
if err := json.Unmarshal([]byte(s), &expectedData); err != nil {
t.Fatalf("Failed to re-decode expectedCacheables[%d].value :%v", i, err)
}
}
for j, cachedItem := range cache.items {
var actualData interface{}
if err := json.Unmarshal(cachedItem.Data, &actualData); err != nil {
t.Fatalf("Failed to decode actual cache[%d].value: %s", j, err)
}
if assert.ObjectsAreEqual(expectedData, actualData) &&
expectedCacheable.TTLSeconds == cachedItem.TTLSeconds &&
expectedCacheable.Type == cachedItem.Type &&
len(expectedCacheable.Key) <= len(cachedItem.Key) &&
expectedCacheable.Key == cachedItem.Key[:len(expectedCacheable.Key)] {
found = true
cache.items = append(cache.items[:j], cache.items[j+1:]...) // remove matched item
break
}
}
if found {
matched++
} else {
t.Errorf("%s: [CACHE_ERROR] Did not see expected cacheable #%d: type=%s, ttl=%d, value=%s", fileDisplayName, i, expectedCacheable.Type, expectedCacheable.TTLSeconds, string(expectedCacheable.Data))
}
}
if matched != len(specData.ExpectedCacheables) {
for i, item := range cache.items {
t.Errorf("%s: [CACHE_ERROR] Got unexpected cached item #%d: type=%s, ttl=%d, value=%s", fileDisplayName, i, item.Type, item.TTLSeconds, string(item.Data))
}
t.FailNow()
}
}
}
func TestNewAuction(t *testing.T) {
bid1p077 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 0.77,
},
}
bid1p123 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 1.23,
},
}
bid1p230 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 2.30,
},
}
bid1p088d := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 0.88,
DealID: "SpecialDeal",
},
}
bid1p166d := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 1.66,
DealID: "BigDeal",
},
}
bid2p123 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.23,
},
}
bid2p144 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.44,
},
}
bid2p155 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.55,
},
}
bid2p166 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.66,
},
}
tests := []struct {
description string
seatBids map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid
numImps int
preferDeals bool
expectedAuction auction
}{
{
description: "Basic auction test",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p123},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p230},
},
},
numImps: 1,
preferDeals: false,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p230,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p123},
"rubicon": []*entities.PbsOrtbBid{&bid1p230},
},
},
},
},
{
description: "Multi-imp auction",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p230, &bid2p123},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p077, &bid2p144},
},
"openx": {
Bids: []*entities.PbsOrtbBid{&bid1p123},
},
},
numImps: 2,
preferDeals: false,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p230,
"imp2": &bid2p144,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p230},
"rubicon": []*entities.PbsOrtbBid{&bid1p077},
"openx": []*entities.PbsOrtbBid{&bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p123},
"rubicon": []*entities.PbsOrtbBid{&bid2p144},
},
},
},
},
{
description: "Basic auction with deals, no preference",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p123},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p088d},
},
},
numImps: 1,
preferDeals: false,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p123,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p123},
"rubicon": []*entities.PbsOrtbBid{&bid1p088d},
},
},
},
},
{
description: "Basic auction with deals, prefer deals",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p123},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p088d},
},
},
numImps: 1,
preferDeals: true,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p088d,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p123},
"rubicon": []*entities.PbsOrtbBid{&bid1p088d},
},
},
},
},
{
description: "Auction with 2 deals",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p166d},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p088d},
},
},
numImps: 1,
preferDeals: true,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d},
"rubicon": []*entities.PbsOrtbBid{&bid1p088d},
},
},
},
},
{
description: "Auction with 3 bids and 2 deals",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p166d},
},
"rubicon": {
Bids: []*entities.PbsOrtbBid{&bid1p088d},
},
"openx": {
Bids: []*entities.PbsOrtbBid{&bid1p230},
},
},
numImps: 1,
preferDeals: true,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d},
"rubicon": []*entities.PbsOrtbBid{&bid1p088d},
"openx": []*entities.PbsOrtbBid{&bid1p230},
},
},
},
},
{
description: "Auction with 3 bids and 2 deals - multiple bids under each seatBids",
seatBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
numImps: 1,
preferDeals: true,
expectedAuction: auction{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
"imp2": &bid2p166,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d, &bid1p077},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p123, &bid2p144},
"pubmatic": []*entities.PbsOrtbBid{&bid2p155, &bid2p166},
},
},
},
},
}
for _, test := range tests {
auc := newAuction(test.seatBids, test.numImps, test.preferDeals)
assert.Equal(t, test.expectedAuction, *auc, test.description)
}
}
func TestValidateAndUpdateMultiBid(t *testing.T) {
// create new bids for new test cases since the last one changes a few bids. Ex marks bid1p001.Bid = nil
bid1p001 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 0.01,
},
}
bid1p077 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 0.77,
},
}
bid1p123 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 1.23,
},
}
bid1p088d := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 0.88,
DealID: "SpecialDeal",
},
}
bid1p166d := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp1",
Price: 1.66,
DealID: "BigDeal",
},
}
bid2p123 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.23,
},
}
bid2p144 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.44,
},
}
bid2p155 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.55,
},
}
bid2p166 := entities.PbsOrtbBid{
Bid: &openrtb2.Bid{
ImpID: "imp2",
Price: 1.66,
},
}
type fields struct {
winningBids map[string]*entities.PbsOrtbBid
winningBidsByBidder map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid
roundedPrices map[*entities.PbsOrtbBid]string
cacheIds map[*openrtb2.Bid]string
vastCacheIds map[*openrtb2.Bid]string
}
type args struct {
adapterBids map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid
preferDeals bool
accountDefaultBidLimit int
}
type want struct {
winningBidsByBidder map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid
adapterBids map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid
}
tests := []struct {
description string
fields fields
args args
want want
}{
{
description: "DefaultBidLimit is 0 (default value)",
fields: fields{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
"imp2": &bid2p166,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p123, &bid2p144},
"pubmatic": []*entities.PbsOrtbBid{&bid2p155, &bid2p166},
},
},
},
args: args{
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
accountDefaultBidLimit: 0,
preferDeals: true,
},
want: want{
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d, &bid1p077, &bid1p001},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p144, &bid2p123},
"pubmatic": []*entities.PbsOrtbBid{&bid2p166, &bid2p155},
},
},
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
},
},
{
description: "Adapters bid count per imp within DefaultBidLimit",
fields: fields{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
"imp2": &bid2p166,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p123, &bid2p144},
"pubmatic": []*entities.PbsOrtbBid{&bid2p155, &bid2p166},
},
},
},
args: args{
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
accountDefaultBidLimit: 3,
preferDeals: true,
},
want: want{
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d, &bid1p077, &bid1p001},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p144, &bid2p123},
"pubmatic": []*entities.PbsOrtbBid{&bid2p166, &bid2p155},
},
},
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
},
},
{
description: "Adapters bid count per imp more than DefaultBidLimit",
fields: fields{
winningBids: map[string]*entities.PbsOrtbBid{
"imp1": &bid1p166d,
"imp2": &bid2p166,
},
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p123, &bid2p144},
"pubmatic": []*entities.PbsOrtbBid{&bid2p155, &bid2p166},
},
},
},
args: args{
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p001, &bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
accountDefaultBidLimit: 2,
preferDeals: true,
},
want: want{
winningBidsByBidder: map[string]map[openrtb_ext.BidderName][]*entities.PbsOrtbBid{
"imp1": {
"appnexus": []*entities.PbsOrtbBid{&bid1p166d, &bid1p077},
"pubmatic": []*entities.PbsOrtbBid{&bid1p088d, &bid1p123},
},
"imp2": {
"appnexus": []*entities.PbsOrtbBid{&bid2p144, &bid2p123},
"pubmatic": []*entities.PbsOrtbBid{&bid2p166, &bid2p155},
},
},
adapterBids: map[openrtb_ext.BidderName]*entities.PbsOrtbSeatBid{
"appnexus": {
Bids: []*entities.PbsOrtbBid{&bid1p166d, &bid1p077, &bid2p123, &bid2p144},
},
"pubmatic": {
Bids: []*entities.PbsOrtbBid{&bid1p088d, &bid1p123, &bid2p155, &bid2p166},
},
},
},
},
}
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
a := &auction{
winningBids: tt.fields.winningBids,
winningBidsByBidder: tt.fields.winningBidsByBidder,
roundedPrices: tt.fields.roundedPrices,
cacheIds: tt.fields.cacheIds,
vastCacheIds: tt.fields.vastCacheIds,
}
a.validateAndUpdateMultiBid(tt.args.adapterBids, tt.args.preferDeals, tt.args.accountDefaultBidLimit)
assert.Equal(t, tt.want.winningBidsByBidder, tt.fields.winningBidsByBidder, tt.description)
assert.Equal(t, tt.want.adapterBids, tt.args.adapterBids, tt.description)
})
}
}
type cacheSpec struct {
BidRequest openrtb2.BidRequest `json:"bidRequest"`
PbsBids []pbsBid `json:"pbsBids"`
ExpectedCacheables []prebid_cache_client.Cacheable `json:"expectedCacheables"`
DefaultTTLs config.DefaultTTLs `json:"defaultTTLs"`
TargetDataIncludeWinners bool `json:"targetDataIncludeWinners"`
TargetDataIncludeBidderKeys bool `json:"targetDataIncludeBidderKeys"`
TargetDataIncludeCacheBids bool `json:"targetDataIncludeCacheBids"`
TargetDataIncludeCacheVast bool `json:"targetDataIncludeCacheVast"`
EventsDataEnabledForAccount bool `json:"eventsDataEnabledForAccount"`
EventsDataEnabledForRequest bool `json:"eventsDataEnabledForRequest"`
DebugLog DebugLog `json:"debugLog,omitempty"`
}
type pbsBid struct {
Bid *openrtb2.Bid `json:"bid"`
BidType openrtb_ext.BidType `json:"bidType"`
Bidder openrtb_ext.BidderName `json:"bidder"`
}
type mockCache struct {
scheme string
host string
path string
items []prebid_cache_client.Cacheable
}
func (c *mockCache) GetExtCacheData() (scheme string, host string, path string) {
return c.scheme, c.host, c.path
}
func (c *mockCache) GetPutUrl() string {
return ""
}
func (c *mockCache) PutJson(ctx context.Context, values []prebid_cache_client.Cacheable) ([]string, []error) {
c.items = values
return []string{"", "", "", "", ""}, nil
}
|
package repository
import (
"errors"
"time"
"github.com/darren-west/app/user-service/models"
"gopkg.in/mgo.v2"
"github.com/hashicorp/errwrap"
)
var EmptyMatcher = NewMatcher()
type Matcher map[string]interface{}
func (m Matcher) WithID(id string) Matcher {
m["id"] = id
return m
}
func (m Matcher) WithFirstName(name string) Matcher {
m["name"] = name
return m
}
func NewMatcher() Matcher {
return Matcher(make(map[string]interface{}))
}
type Options struct {
ConnectionString string
DatabaseName string
CollectionName string
}
type MongoUserRepository struct {
options Options
session *mgo.Session
}
func IsErrDuplicateUser(err error) bool {
return mgo.IsDup(err)
}
func IsErrUserNotFound(err error) bool {
return errwrap.Contains(err, "user not found")
}
func (r MongoUserRepository) FindUser(m Matcher) (user models.UserInfo, err error) {
err = r.run(func(c *mgo.Collection) error {
return c.Find(m).One(&user)
})
if err == mgo.ErrNotFound {
err = errwrap.Wrap(errors.New("user not found"), err)
return
}
return
}
func (r MongoUserRepository) ListUsers(m Matcher) (users []models.UserInfo, err error) {
err = r.run(func(c *mgo.Collection) error {
return c.Find(m).All(&users)
})
return
}
func (r MongoUserRepository) CreateUser(user models.UserInfo) (err error) {
err = r.run(func(c *mgo.Collection) error {
return c.Insert(&user)
})
return
}
func (r MongoUserRepository) RemoveUser(m Matcher) (err error) {
err = r.run(func(c *mgo.Collection) error {
return c.Remove(m)
})
if err == mgo.ErrNotFound {
err = errwrap.Wrap(errors.New("user not found"), err)
return
}
return
}
func (r MongoUserRepository) UpdateUser(user models.UserInfo) (err error) {
m := NewMatcher().WithID(user.ID)
err = r.run(func(c *mgo.Collection) error {
return c.Update(m, &user)
})
if err == mgo.ErrNotFound {
err = errwrap.Wrap(errors.New("user not found"), err)
return
}
return
}
func (r MongoUserRepository) Options() Options {
return r.options
}
func (r MongoUserRepository) run(f func(c *mgo.Collection) error) error {
session := r.session.Clone()
defer session.Close()
return f(session.DB(r.options.DatabaseName).C(r.options.CollectionName))
}
func NewMongoUserRepository(opts ...Option) (repo MongoUserRepository, err error) {
repo = MongoUserRepository{
options: Options{},
}
for _, opt := range opts {
opt(&repo.options)
}
session, err := mgo.DialWithTimeout(repo.options.ConnectionString, time.Second*30)
if err != nil {
return
}
repo.session = session
session.SetMode(mgo.Monotonic, true)
err = session.DB(repo.options.DatabaseName).C(repo.options.CollectionName).EnsureIndex(mgo.Index{
Key: []string{"id"},
Unique: true,
})
return
}
type Option func(*Options) error
func WithConnectionString(connectionString string) Option {
return func(o *Options) (err error) {
o.ConnectionString = connectionString
return
}
}
func WithDatabaseName(db string) Option {
return func(o *Options) (err error) {
o.DatabaseName = db
return
}
}
func WithCollectionName(c string) Option {
return func(o *Options) (err error) {
o.CollectionName = c
return
}
}
|
// Copyright 2018 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package network
import (
"context"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"chromiumos/tast/common/shillconst"
"chromiumos/tast/local/network"
"chromiumos/tast/local/shill"
"chromiumos/tast/local/upstart"
"chromiumos/tast/testing"
"chromiumos/tast/testing/hwdep"
)
func init() {
testing.AddTest(&testing.Test{
Func: DefaultProfile,
Desc: "Checks shill's default network profile",
Contacts: []string{
"stevenjb@chromium.org", // Connectivity team
"cros-networking@google.com",
"nya@chromium.org", // Tast port author
},
Attr: []string{"group:mainline"},
HardwareDeps: hwdep.D(hwdep.SkipOnModel("winky")), // b/182293895: winky DUTs are having USB Ethernet issues that surface during `restart shill`
})
}
func DefaultProfile(ctx context.Context, s *testing.State) {
expectedSettings := []string{
"CheckPortalList=ethernet,wifi,cellular",
"IgnoredDNSSearchPaths=gateway.2wire.net",
}
// We lose connectivity briefly. Tell recover_duts not to worry.
unlock, err := network.LockCheckNetworkHook(ctx)
if err != nil {
s.Fatal("Failed to lock the check network hook: ", err)
}
defer unlock()
// Stop shill temporarily and remove the default profile.
if err := upstart.StopJob(ctx, "shill"); err != nil {
s.Fatal("Failed stopping shill: ", err)
}
os.Remove(shillconst.DefaultProfilePath)
if err := upstart.RestartJob(ctx, "shill"); err != nil {
s.Fatal("Failed starting shill: ", err)
}
manager, err := shill.NewManager(ctx)
if err != nil {
s.Fatal("Failed creating shill manager proxy: ", err)
}
// Wait for default profile creation.
func() {
ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
defer cancel()
isDefaultProfileReady := func() bool {
if _, err := os.Stat(shillconst.DefaultProfilePath); err != nil {
return false
}
paths, err := manager.ProfilePaths(ctx)
if err != nil {
s.Fatal("Failed getting profiles: ", err)
}
for _, p := range paths {
if p == shillconst.DefaultProfileObjectPath {
return true
}
}
return false
}
for !isDefaultProfileReady() {
if err := testing.Sleep(ctx, 100*time.Millisecond); err != nil {
s.Fatal("Timed out waiting for the default profile to get ready: ", err)
}
}
}()
// Read the default profile and check expected settings.
b, err := ioutil.ReadFile(shillconst.DefaultProfilePath)
if err != nil {
s.Fatal("Failed reading the default profile: ", err)
}
ioutil.WriteFile(filepath.Join(s.OutDir(), "default.profile"), b, 0644)
profile := string(b)
for _, es := range expectedSettings {
if !strings.Contains(profile, es) {
s.Error("Expected setting not found: ", es)
}
}
}
|
package main
import (
"context"
"flag"
"fmt"
"os"
"github.com/google/subcommands"
"github.com/malice-plugins/go-plugin-utils/utils"
)
type crackCmd struct {
}
func (p *crackCmd) Name() string {
return "crack"
}
func (p *crackCmd) Synopsis() string {
return "crack a password file"
}
func (p *crackCmd) Usage() string {
return "crack <password file>"
}
func (p *crackCmd) SetFlags(f *flag.FlagSet) {
}
func (p *crackCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
args := f.Args()
if len(args) != 1 {
fmt.Println("target password file is must", len(args))
return subcommands.ExitUsageError
}
file := args[0]
fd, err := os.Open(file)
if os.IsNotExist(err) || err != nil {
fmt.Println("target password file is must", err)
return subcommands.ExitUsageError
}
fd.Close()
/*
docker run -it -v `pwd`/yourfiletocrack:/crackme.txt adamoss/john-the-ripper /crackme.txt
*/
ctx := context.TODO()
r, err := utils.RunCommand(ctx, "/usr/bin/john", file)
if err != nil {
fmt.Println(r, err)
} else {
fmt.Println(r)
}
return subcommands.ExitSuccess
}
|
package main
import "fmt"
type Integer int
func (a *Integer) Add(b Integer) Integer {
return *a + b
}
//func (a Integer) Add(b Integer) Integer {
// return a + b
//}
func main() {
var a Integer = 1
var b Integer = 2
var i interface{} = &a
sum := i.(*Integer).Add(b)
fmt.Println(sum)
}
|
package public
import (
"github.com/google/go-querystring/query"
"github.com/pkg/errors"
"github.com/potix/gobitflyer/api/types"
"github.com/potix/gobitflyer/client"
)
const (
getBoardPath string = "/v1/getboard"
)
type GetBoardResponse struct {
MidPrice float64 `json:"mid_price"`
Bids []*GetBoardBook `json:"bids"`
Asks []*GetBoardBook `json:"asks"`
}
func (r *GetBoardResponse) Clone() (*GetBoardResponse) {
if r == nil {
return nil
}
newGetBoardResponse := &GetBoardResponse {
MidPrice: r.MidPrice,
}
if r.Bids != nil {
newGetBoardResponse.Bids = make([]*GetBoardBook, 0, len(r.Bids))
for _, bid := range r.Bids {
newBid := &GetBoardBook{
Price: bid.Price,
Size: bid.Size,
}
newGetBoardResponse.Bids = append(newGetBoardResponse.Bids, newBid)
}
}
if r.Asks != nil {
newGetBoardResponse.Asks = make([]*GetBoardBook, 0, len(r.Asks))
for _, ask := range r.Asks {
newAsk := &GetBoardBook{
Price: ask.Price,
Size: ask.Size,
}
newGetBoardResponse.Asks = append(newGetBoardResponse.Asks, newAsk)
}
}
return newGetBoardResponse
}
type GetBoardBook struct {
Price float64 `json:"price"`
Size float64 `json:"size"`
}
type GetBoardRequest struct {
Path string `url:"-"`
ProductCode types.ProductCode `url:"product_code,omitempty"`
}
func (b *GetBoardRequest) CreateHTTPRequest(endpoint string) (*client.HTTPRequest, error) {
v, err := query.Values(b)
if err != nil {
return nil, errors.Wrapf(err, "can not create query of get board request")
}
query := v.Encode()
pathQuery := b.Path + "?" + query
return &client.HTTPRequest {
PathQuery: pathQuery,
URL: endpoint + pathQuery,
Method: "GET",
Headers: nil,
Body: nil,
}, nil
}
func NewGetBoardRequest(productCode types.ProductCode) (*GetBoardRequest) {
return &GetBoardRequest{
Path: getBoardPath,
ProductCode: productCode,
}
}
|
package helper
import (
"encoding/csv"
"log"
"os"
"time"
"bitbucket.org/babulal107/go-app/config"
)
func GenerateCSV(fileName string, data [][]string) error{
var (
path string
writer *csv.Writer
file *os.File
err error
)
path = config.FileExportPath+GetFileName(fileName)+config.FileExtenuationCSV
if file, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY, os.ModePerm); err!=nil{
log.Fatal("Cannot create file", err)
return err
}
defer Close(file)
defer func() {
if writer != nil {
writer.Flush()
}
}()
writer = csv.NewWriter(file)
if err = writer.WriteAll(data); err!=nil{
log.Fatal("Cannot write to file", err)
return err
}
return err
}
func CheckError(message string, err error) {
if err != nil {
log.Fatal(message, err)
}
}
func GetFileName(fileName string) string{
var timeStamp = time.Now().Format(config.TimeStampFormat)
return fileName+"_"+timeStamp
}
|
package go_dj
import (
"testing"
)
func TestContainer(t *testing.T) {
c := NewContainer()
s := &tService{}
c.Register("controller", func(args ... interface{}) interface{} {
return &tController{args[0].(*tService)}
}, "service")
c.Register("service", func(args ... interface{}) interface{} {
return s
})
ctrl1, _ := c.Provide("controller")
ctrl := ctrl1.(*tController)
ctrl.Send("Test!")
if s.LatestText != "Test!" {
t.Fail()
}
_, err := c.Provide("NonExisten")
if err == nil {
t.Fail()
}
}
// ======
type tController struct {
Service *tService
}
func (t *tController) Send(text string) {
t.Service.Serve(text)
}
// ======
type tService struct {
LatestText string
}
func (t *tService) Serve(text string) {
t.LatestText = text
}
|
package load
/*
#cgo LDFLAGS: -lperfstat
#include <libperfstat.h>
*/
import "C"
import (
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/common/cfgwarn"
"github.com/elastic/beats/metricbeat/mb"
)
// init registers the MetricSet with the central registry as soon as the program
// starts. The New function will be called later to instantiate an instance of
// the MetricSet for each host defined in the module's configuration. After the
// MetricSet has been created then Fetch will begin to be called periodically.
func init() {
mb.Registry.MustAddMetricSet("system", "load", New)
}
// MetricSet holds any configuration or state information. It must implement
// the mb.MetricSet interface. And this is best achieved by embedding
// mb.BaseMetricSet because it implements all of the required mb.MetricSet
// interface methods except for Fetch.
type MetricSet struct {
mb.BaseMetricSet
cpustat *C.perfstat_cpu_total_t
}
// New creates a new instance of the MetricSet. New is responsible for unpacking
// any MetricSet specific configuration options if there are any.
func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
cfgwarn.Experimental("The system load metricset is experimental.")
config := struct{}{}
if err := base.Module().UnpackConfig(&config); err != nil {
return nil, err
}
return &MetricSet{
BaseMetricSet: base,
cpustat: new(C.perfstat_cpu_total_t),
}, nil
}
// Fetch methods implements the data gathering and data conversion to the right
// format. It publishes the event which is then forwarded to the output. In case
// of an error set the Error field of mb.Event or simply call report.Error().
func (m *MetricSet) Fetch(report mb.ReporterV2) {
C.perfstat_cpu_total(nil, m.cpustat, C.sizeof_perfstat_cpu_total_t, 1)
load_factor := float64(1 << C.SBITS)
cores_factor := float64(m.cpustat.ncpus)
load_1 := float64(m.cpustat.loadavg[0]) / load_factor
load_5 := float64(m.cpustat.loadavg[1]) / load_factor
load_15 := float64(m.cpustat.loadavg[2]) / load_factor
report.Event(mb.Event{
MetricSetFields: common.MapStr{
"1": load_1,
"5": load_5,
"15": load_15,
"cores": m.cpustat.ncpus,
"norm": common.MapStr{
"1": load_1 / cores_factor,
"5": load_5 / cores_factor,
"15": load_15 / cores_factor,
},
},
})
}
|
package odoo
import (
"fmt"
)
// MailAliasMixin represents mail.alias.mixin model.
type MailAliasMixin struct {
LastUpdate *Time `xmlrpc:"__last_update,omptempty"`
AliasContact *Selection `xmlrpc:"alias_contact,omptempty"`
AliasDefaults *String `xmlrpc:"alias_defaults,omptempty"`
AliasDomain *String `xmlrpc:"alias_domain,omptempty"`
AliasForceThreadId *Int `xmlrpc:"alias_force_thread_id,omptempty"`
AliasId *Many2One `xmlrpc:"alias_id,omptempty"`
AliasModelId *Many2One `xmlrpc:"alias_model_id,omptempty"`
AliasName *String `xmlrpc:"alias_name,omptempty"`
AliasParentModelId *Many2One `xmlrpc:"alias_parent_model_id,omptempty"`
AliasParentThreadId *Int `xmlrpc:"alias_parent_thread_id,omptempty"`
AliasUserId *Many2One `xmlrpc:"alias_user_id,omptempty"`
CreateDate *Time `xmlrpc:"create_date,omptempty"`
CreateUid *Many2One `xmlrpc:"create_uid,omptempty"`
DisplayName *String `xmlrpc:"display_name,omptempty"`
Id *Int `xmlrpc:"id,omptempty"`
WriteDate *Time `xmlrpc:"write_date,omptempty"`
WriteUid *Many2One `xmlrpc:"write_uid,omptempty"`
}
// MailAliasMixins represents array of mail.alias.mixin model.
type MailAliasMixins []MailAliasMixin
// MailAliasMixinModel is the odoo model name.
const MailAliasMixinModel = "mail.alias.mixin"
// Many2One convert MailAliasMixin to *Many2One.
func (mam *MailAliasMixin) Many2One() *Many2One {
return NewMany2One(mam.Id.Get(), "")
}
// CreateMailAliasMixin creates a new mail.alias.mixin model and returns its id.
func (c *Client) CreateMailAliasMixin(mam *MailAliasMixin) (int64, error) {
ids, err := c.CreateMailAliasMixins([]*MailAliasMixin{mam})
if err != nil {
return -1, err
}
if len(ids) == 0 {
return -1, nil
}
return ids[0], nil
}
// CreateMailAliasMixin creates a new mail.alias.mixin model and returns its id.
func (c *Client) CreateMailAliasMixins(mams []*MailAliasMixin) ([]int64, error) {
var vv []interface{}
for _, v := range mams {
vv = append(vv, v)
}
return c.Create(MailAliasMixinModel, vv)
}
// UpdateMailAliasMixin updates an existing mail.alias.mixin record.
func (c *Client) UpdateMailAliasMixin(mam *MailAliasMixin) error {
return c.UpdateMailAliasMixins([]int64{mam.Id.Get()}, mam)
}
// UpdateMailAliasMixins updates existing mail.alias.mixin records.
// All records (represented by ids) will be updated by mam values.
func (c *Client) UpdateMailAliasMixins(ids []int64, mam *MailAliasMixin) error {
return c.Update(MailAliasMixinModel, ids, mam)
}
// DeleteMailAliasMixin deletes an existing mail.alias.mixin record.
func (c *Client) DeleteMailAliasMixin(id int64) error {
return c.DeleteMailAliasMixins([]int64{id})
}
// DeleteMailAliasMixins deletes existing mail.alias.mixin records.
func (c *Client) DeleteMailAliasMixins(ids []int64) error {
return c.Delete(MailAliasMixinModel, ids)
}
// GetMailAliasMixin gets mail.alias.mixin existing record.
func (c *Client) GetMailAliasMixin(id int64) (*MailAliasMixin, error) {
mams, err := c.GetMailAliasMixins([]int64{id})
if err != nil {
return nil, err
}
if mams != nil && len(*mams) > 0 {
return &((*mams)[0]), nil
}
return nil, fmt.Errorf("id %v of mail.alias.mixin not found", id)
}
// GetMailAliasMixins gets mail.alias.mixin existing records.
func (c *Client) GetMailAliasMixins(ids []int64) (*MailAliasMixins, error) {
mams := &MailAliasMixins{}
if err := c.Read(MailAliasMixinModel, ids, nil, mams); err != nil {
return nil, err
}
return mams, nil
}
// FindMailAliasMixin finds mail.alias.mixin record by querying it with criteria.
func (c *Client) FindMailAliasMixin(criteria *Criteria) (*MailAliasMixin, error) {
mams := &MailAliasMixins{}
if err := c.SearchRead(MailAliasMixinModel, criteria, NewOptions().Limit(1), mams); err != nil {
return nil, err
}
if mams != nil && len(*mams) > 0 {
return &((*mams)[0]), nil
}
return nil, fmt.Errorf("mail.alias.mixin was not found with criteria %v", criteria)
}
// FindMailAliasMixins finds mail.alias.mixin records by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailAliasMixins(criteria *Criteria, options *Options) (*MailAliasMixins, error) {
mams := &MailAliasMixins{}
if err := c.SearchRead(MailAliasMixinModel, criteria, options, mams); err != nil {
return nil, err
}
return mams, nil
}
// FindMailAliasMixinIds finds records ids by querying it
// and filtering it with criteria and options.
func (c *Client) FindMailAliasMixinIds(criteria *Criteria, options *Options) ([]int64, error) {
ids, err := c.Search(MailAliasMixinModel, criteria, options)
if err != nil {
return []int64{}, err
}
return ids, nil
}
// FindMailAliasMixinId finds record id by querying it with criteria.
func (c *Client) FindMailAliasMixinId(criteria *Criteria, options *Options) (int64, error) {
ids, err := c.Search(MailAliasMixinModel, criteria, options)
if err != nil {
return -1, err
}
if len(ids) > 0 {
return ids[0], nil
}
return -1, fmt.Errorf("mail.alias.mixin was not found with criteria %v and options %v", criteria, options)
}
|
/*
Copyright 2019 The Skaffold Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package debug
import (
"strings"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/GoogleContainerTools/skaffold/v2/pkg/skaffold/debug/types"
"github.com/GoogleContainerTools/skaffold/v2/testutil"
)
func TestExtractInspectArg(t *testing.T) {
tests := []struct {
in string
result *inspectSpec
}{
{"", nil},
{"foo", nil},
{"--foo", nil},
{"-inspect", nil},
{"-inspect=9329", nil},
{"--inspect", &inspectSpec{port: 9229, brk: false}},
{"--inspect=9329", &inspectSpec{port: 9329, brk: false}},
{"--inspect=:9329", &inspectSpec{port: 9329, brk: false}},
{"--inspect=foo:9329", &inspectSpec{host: "foo", port: 9329, brk: false}},
{"--inspect-brk", &inspectSpec{port: 9229, brk: true}},
{"--inspect-brk=9329", &inspectSpec{port: 9329, brk: true}},
{"--inspect-brk=:9329", &inspectSpec{port: 9329, brk: true}},
{"--inspect-brk=foo:9329", &inspectSpec{host: "foo", port: 9329, brk: true}},
}
for _, test := range tests {
testutil.Run(t, test.in, func(t *testutil.T) {
if test.result == nil {
t.CheckDeepEqual(test.result, extractInspectArg(test.in))
} else {
t.CheckDeepEqual(*test.result, *extractInspectArg(test.in), cmp.AllowUnexported(inspectSpec{}))
}
})
}
}
func TestNodeTransformer_IsApplicable(t *testing.T) {
tests := []struct {
description string
source ImageConfiguration
launcher string
result bool
}{
{
description: "user specified",
source: ImageConfiguration{RuntimeType: types.Runtimes.NodeJS},
result: true,
},
{
description: "NODE_VERSION",
source: ImageConfiguration{Env: map[string]string{"NODE_VERSION": "10"}},
result: true,
},
{
description: "NODEJS_VERSION",
source: ImageConfiguration{Env: map[string]string{"NODEJS_VERSION": "12"}},
result: true,
},
{
description: "NODE_ENV",
source: ImageConfiguration{Env: map[string]string{"NODE_ENV": "production"}},
result: true,
},
{
description: "entrypoint node",
source: ImageConfiguration{Entrypoint: []string{"node", "init.js"}},
result: true,
},
{
description: "entrypoint /usr/bin/node",
source: ImageConfiguration{Entrypoint: []string{"/usr/bin/node", "init.js"}},
result: true,
},
{
description: "no entrypoint, args node",
source: ImageConfiguration{Arguments: []string{"node", "init.js"}},
result: true,
},
{
description: "no entrypoint, arguments /usr/bin/node",
source: ImageConfiguration{Arguments: []string{"/usr/bin/node", "init.js"}},
result: true,
},
{
description: "entrypoint nodemon",
source: ImageConfiguration{Entrypoint: []string{"nodemon", "init.js"}},
result: true,
},
{
description: "entrypoint /usr/bin/nodemon",
source: ImageConfiguration{Entrypoint: []string{"/usr/bin/nodemon", "init.js"}},
result: true,
},
{
description: "no entrypoint, args nodemon",
source: ImageConfiguration{Arguments: []string{"nodemon", "init.js"}},
result: true,
},
{
description: "no entrypoint, arguments /usr/bin/nodemon",
source: ImageConfiguration{Arguments: []string{"/usr/bin/nodemon", "init.js"}},
result: true,
},
{
description: "entrypoint npm",
source: ImageConfiguration{Entrypoint: []string{"npm", "run", "dev"}},
result: true,
},
{
description: "entrypoint /usr/bin/npm",
source: ImageConfiguration{Entrypoint: []string{"/usr/bin/npm", "run", "dev"}},
result: true,
},
{
description: "no entrypoint, args npm",
source: ImageConfiguration{Arguments: []string{"npm", "run", "dev"}},
result: true,
},
{
description: "no entrypoint, arguments npm",
source: ImageConfiguration{Arguments: []string{"npm", "run", "dev"}},
result: true,
},
{
description: "no entrypoint, arguments /usr/bin/npm",
source: ImageConfiguration{Arguments: []string{"/usr/bin/npm", "run", "dev"}},
result: true,
},
{
description: "entrypoint /bin/sh",
source: ImageConfiguration{Entrypoint: []string{"/bin/sh"}},
result: false,
},
{
description: "entrypoint launcher", // `node` image docker-entrypoint.sh"
source: ImageConfiguration{Entrypoint: []string{"docker-entrypoint.sh"}, Arguments: []string{"npm", "run", "dev"}},
launcher: "docker-entrypoint.sh",
result: true,
},
{
description: "nothing",
source: ImageConfiguration{},
result: false,
},
}
for _, test := range tests {
testutil.Run(t, test.description, func(t *testutil.T) {
t.Override(&entrypointLaunchers, []string{test.launcher})
result := nodeTransformer{}.IsApplicable(test.source)
t.CheckDeepEqual(test.result, result)
})
}
}
func TestRewriteNodeCommandLine(t *testing.T) {
tests := []struct {
in []string
result []string
}{
{[]string{"node", "index.js"}, []string{"node", "--inspect=9226", "index.js"}},
{[]string{"node"}, []string{"node", "--inspect=9226"}},
}
for _, test := range tests {
testutil.Run(t, strings.Join(test.in, " "), func(t *testutil.T) {
result := rewriteNodeCommandLine(test.in, inspectSpec{port: 9226})
t.CheckDeepEqual(test.result, result)
})
}
}
func TestRewriteNpmCommandLine(t *testing.T) {
tests := []struct {
in []string
result []string
}{
{[]string{"npm", "run", "server"}, []string{"npm", "run", "server", "--node-options=--inspect=9226"}},
{[]string{"npm", "run", "server", "--", "option"}, []string{"npm", "run", "server", "--node-options=--inspect=9226", "--", "option"}},
}
for _, test := range tests {
testutil.Run(t, strings.Join(test.in, " "), func(t *testutil.T) {
result := rewriteNpmCommandLine(test.in, inspectSpec{port: 9226})
t.CheckDeepEqual(test.result, result)
})
}
}
|
/*
Created on 2018/11/30 10:26
author: ChenJinLong
Content:
*/
package main
import (
"fmt"
"strings"
)
type Normal func(a int, b ...string)
func Afunc(first int, second ...string) {
fmt.Println(first)
fmt.Println(strings.Join(second, ";"))
}
func main() {
f := Normal(Afunc)
f(1, "wwww", "cccc")
}
|
package server
import (
"context"
"encoding/json"
"log"
"math"
"math/big"
"net/http"
"strconv"
"time"
"github.com/NotSoFancyName/SimpleWebServer/persistance"
"github.com/gorilla/mux"
)
const (
readTimeout = 10 * time.Second
writeTimeout = 10 * time.Second
maxHeaderBytes = 1 << 20
apiURL = "/api/block/{block}/total"
)
const (
cacheMaxSize = 10000
expirationTime = 24 * time.Hour
)
var (
wieToEthRatio = big.NewFloat(math.Pow10(-18))
)
type Server struct {
server *http.Server
cache *cache
querier *persistance.DBQuerier
stop chan struct{}
}
func NewServer(stop chan struct{}, port int) *Server {
r := mux.NewRouter()
server := &Server{
server: &http.Server{
Addr: ":" + strconv.FormatInt(int64(port), 10),
Handler: r,
ReadTimeout: readTimeout,
WriteTimeout: writeTimeout,
MaxHeaderBytes: maxHeaderBytes,
},
cache: NewCache(cacheMaxSize, expirationTime),
querier: persistance.NewDBQuerier(),
stop: stop,
}
r.HandleFunc(apiURL, server.getBlockInfo).Methods(http.MethodGet)
return server
}
func (s *Server) Run(errs chan<- error) {
log.Printf("Running server on port %v", s.server.Addr)
go func() {
<-s.stop
if err := s.server.Shutdown(context.Background()); err != nil {
log.Printf("Failed to shutdown server properly: %v", err)
}
if err := s.querier.Shutdown(); err != nil {
log.Printf("Failed to shutdown DB properly: %v", err)
}
log.Println("Server is shut")
s.stop <- struct{}{}
}()
errs <- s.server.ListenAndServe()
}
type blockInfo struct {
Transactions int `json:"transactions"`
Amount float64 `json:"amount"`
}
func (s *Server) getBlockInfo(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
blockNum, err := strconv.Atoi(mux.Vars(r)["block"])
if err != nil {
log.Printf("Failed to parse block number: %v \n", err)
w.WriteHeader(http.StatusBadRequest)
return
}
if blockNum < 0 {
log.Println("Block number should be unsigned int")
w.WriteHeader(http.StatusBadRequest)
return
}
log.Printf("Trying to calculate total transactions value for block number: %v\n", blockNum)
count, err := queryTransactionsCount(blockNum)
if err != nil {
log.Printf("Failed to get transaction count %v: %v", blockNum, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
bi, presentInCache := s.cache.get(blockNum)
if !presentInCache || (presentInCache && bi.Transactions != count) {
dbInfo, presentInDB := s.querier.Get(blockNum)
if !presentInDB || (presentInDB && dbInfo.Transactions != count) {
bi, err = queryBlockInfo(blockNum)
if err != nil {
log.Printf("Failed to get info for block number %v: %v \n", blockNum, err)
w.WriteHeader(http.StatusInternalServerError)
return
}
s.cache.put(blockNum, bi)
s.querier.Put(blockNum, bi.Transactions, bi.Amount)
} else {
bi = &blockInfo {
Transactions: dbInfo.Transactions,
Amount: dbInfo.Amount,
}
s.cache.put(blockNum, bi)
}
}
rawResp, err := json.Marshal(bi)
if err != nil {
log.Printf("Failed to marshal block info. Error: %v \n", err)
w.WriteHeader(http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusOK)
w.Write(rawResp)
}
|
package cachemocks
import (
"github.com/stretchr/testify/mock"
"github.com/Skipor/memcached/cache"
)
// Cache is an autogenerated mock type for the Cache type
type Cache struct {
mock.Mock
}
// Delete provides a mock function with given fields: key
func (c *Cache) Delete(key []byte) bool {
ret := c.Called(key)
var r0 bool
if rf, ok := ret.Get(0).(func([]byte) bool); ok {
r0 = rf(key)
} else {
r0 = ret.Get(0).(bool)
}
return r0
}
// Get provides a mock function with given fields: key
func (c *Cache) Get(key ...[]byte) []cache.ItemView {
ret := c.Called(key)
var r0 []cache.ItemView
if rf, ok := ret.Get(0).(func(...[]byte) []cache.ItemView); ok {
r0 = rf(key...)
} else {
if ret.Get(0) != nil {
r0 = ret.Get(0).([]cache.ItemView)
}
}
return r0
}
func (c *Cache) Touch(key ...[]byte) { c.Called(key) }
func (c *Cache) Set(i cache.Item) { c.Called(i) }
func (c *Cache) Lock() { c.Called() }
func (c *Cache) Unlock() { c.Called() }
func (c *Cache) RLock() { c.Called() }
func (c *Cache) RUnlock() { c.Called() }
func (c *Cache) NewGetter(rawCommand []byte) cache.Getter { return c }
func (c *Cache) NewSetter(rawCommand []byte) cache.Setter { return c }
func (c *Cache) NewDeleter(rawCommand []byte) cache.Deleter { return c }
var _ cache.Cache = (*Cache)(nil)
var _ cache.View = (*Cache)(nil)
var _ cache.RWCache = (*Cache)(nil)
|
// Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"encoding/json"
"net/http"
"net/url"
"strings"
"github.com/pkg/errors"
)
type dockerImages struct {
Tags []string `json:"tags"`
}
// CheckDockerImageExists checks if a docker image exists
func CheckDockerImageExists(image, tag string) error {
// Build hostname/v2/<image>/manifests/<tag> to directly check if the image exists
splitImage := strings.Split(image, "/")
tail := splitImage[1:]
reqPath := append(append([]string{"v2"}, tail...), "manifests", tag)
u := &url.URL{
Scheme: "https",
Host: splitImage[0],
Path: strings.Join(reqPath, "/"),
}
res, err := http.Get(u.String())
if err != nil {
return err
}
if res.StatusCode != http.StatusOK {
return errors.New("tag does not exist")
}
return nil
}
// GetDockerImageFromCommit searches all tags of a image and try to matches the commit (e.g. .10.0-dev-<commit>).
// The image tag is returned if an applicable tag can be found
// todo: use pagination if gcr will support is someday
func GetDockerImageFromCommit(image, commit string) (string, error) {
// construct api call with the form hostname/v2/<image>/tags/list
splitImage := strings.Split(image, "/")
tail := splitImage[1:]
reqPath := append(append([]string{"v2"}, tail...), "tags", "list")
u := &url.URL{
Scheme: "https",
Host: splitImage[0],
Path: strings.Join(reqPath, "/"),
}
res, err := http.Get(u.String())
if err != nil {
return "", err
}
if res.StatusCode != http.StatusOK {
return "", errors.New("no tag found")
}
defer res.Body.Close()
decoder := json.NewDecoder(res.Body)
var images dockerImages
if err := decoder.Decode(&images); err != nil {
return "", err
}
for _, tag := range images.Tags {
if strings.Contains(tag, commit) {
return tag, nil
}
}
return "", errors.New("no tag found")
}
|
// Copyright 2014 Dirk Jablonowski. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Implementation of a connector interface type with buffered channels.
// A buffered working connector. The buffer size for incoming and outgoing events could given as parameter.
package buffered
import (
"io"
"time"
"github.com/dirkjabl/bricker/connector"
"github.com/dirkjabl/bricker/event"
"github.com/dirkjabl/bricker/net"
"github.com/dirkjabl/bricker/net/packet"
)
// ConnectorBuffered is the connector with to bufferd channels,
// one for reading (In) and one for Writing(Out).
// Every channel will controled by a go routine.
// The connector puts all of his readed packets into events in the In channel.
// The connector waits for packets to write out to the hardware on the Out channel.
// A close on the Quit channel let the bricker stops all go routines and disconnect to the hardware.
type ConnectorBuffered struct {
conn *net.Net // internal, the real connection
seq *connector.Sequence // internal, actual sequence number
In chan *event.Event // input channel, here the bricker put in the readed packets as events
Out chan *event.Event // output channel, here the bricker read out the events, which should be send
Quit chan struct{} // quit channel, if closed, the bricker stop working and release resources
}
// New creates the connector object with a connection to the given address (addr).
// The function takes to integers for the size of the input and output buffer (channels).
func New(addr string, inbuf, outbuf int) (*ConnectorBuffered, error) {
conn, err := net.Dial(addr)
if err != nil {
return nil, err
}
conn.Conn.SetKeepAlive(true)
cb := &ConnectorBuffered{
conn: conn,
seq: new(connector.Sequence),
In: make(chan *event.Event, inbuf),
Out: make(chan *event.Event, outbuf),
Quit: make(chan struct{}),
}
go func() { cb.read() }()
go func() { cb.write() }()
return cb, nil
}
// NewBrickerUnbuffered creates a connector without bufferd channels.
// It is a buffered bricker with zero buffers.
func NewUnbuffered(addr string) (*ConnectorBuffered, error) {
return New(addr, 0, 0)
}
// Send puts the given event into the channel for writing the packets to the hardware.
func (cb *ConnectorBuffered) Send(ev *event.Event) {
cb.Out <- ev
}
// Receive reads a event out of the channel for the readed packets form the hardware.
// If Receive returns a nil event, no more events will follow. The channel is closed.
func (cb *ConnectorBuffered) Receive() *event.Event {
e, ok := <-cb.In
if !ok {
e = nil // done
}
return e
}
// Done stops the bricker and release all connections
func (cb *ConnectorBuffered) Done() {
close(cb.Quit)
cb.conn.Close()
}
// read is a internal method. Method reads from the hardware connection and put the packet into the event.
func (cb *ConnectorBuffered) read() {
defer close(cb.In)
var err error
var pck *packet.Packet
done := false
for {
if done {
return
}
cb.conn.Conn.SetReadDeadline(time.Now().Add(2 * time.Second))
pck, err = cb.conn.ReadPacket()
if err == io.EOF {
cb.conn.Dial()
time.Sleep(2 * time.Second)
}
go func(e error, p *packet.Packet) {
if !done {
ev := event.NewSimple(e, p)
select {
case cb.In <- ev:
case <-cb.Quit:
done = true
}
}
}(err, pck)
}
}
// write is a internal method. Method writes packets to the hardware connection.
func (cb *ConnectorBuffered) write() {
defer close(cb.Out)
var ev *event.Event
for {
select {
case ev = <-cb.Out:
if ev != nil && ev.Err == nil && ev.Packet != nil {
ev.Packet.Head.SetSequence(cb.seq.GetSequence())
ev.Packet.Head.Length = ev.Packet.ComputeLength()
cb.conn.Conn.SetWriteDeadline(time.Now().Add(2 * time.Second))
err := cb.conn.WritePacket(ev.Packet)
if err == io.EOF {
cb.conn.Dial()
time.Sleep(2 * time.Second)
}
}
case <-cb.Quit:
return
}
}
}
|
package bitfield
type BitField struct {
fields []byte
}
|
// Copyright 2019 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package gomarshal implements the go_marshal code generator. See README.md.
package gomarshal
import (
"bytes"
"fmt"
"go/ast"
"go/parser"
"go/token"
"os"
"sort"
"strings"
"gvisor.dev/gvisor/tools/constraintutil"
)
// List of identifiers we use in generated code that may conflict with a
// similarly-named source identifier. Abort gracefully when we see these to
// avoid potentially confusing compilation failures in generated code.
//
// This only applies to import aliases at the moment. All other identifiers
// are qualified by a receiver argument, since they're struct fields.
//
// All recievers are single letters, so we don't allow import aliases to be a
// single letter.
var badIdents = []string{
"addr", "blk", "buf", "cc", "dst", "dsts", "count", "err", "hdr", "idx",
"inner", "length", "limit", "ptr", "size", "src", "srcs", "val",
// All single-letter identifiers.
}
// Constructed fromt badIdents in init().
var badIdentsMap map[string]struct{}
func init() {
badIdentsMap = make(map[string]struct{})
for _, ident := range badIdents {
badIdentsMap[ident] = struct{}{}
}
}
// Generator drives code generation for a single invocation of the go_marshal
// utility.
//
// The Generator holds arguments passed to the tool, and drives parsing,
// processing and code Generator for all types marked with +marshal declared in
// the input files.
//
// See Generator.run() as the entry point.
type Generator struct {
// Paths to input go source files.
inputs []string
// Output file to write generated go source.
output *os.File
// Output file to write generated tests.
outputTest *os.File
// Output file to write unconditionally generated tests.
outputTestUC *os.File
// Package name for the generated file.
pkg string
// Set of extra packages to import in the generated file.
imports *importTable
}
// NewGenerator creates a new code Generator.
func NewGenerator(srcs []string, out, outTest, outTestUnconditional, pkg string, imports []string) (*Generator, error) {
f, err := os.OpenFile(out, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return nil, fmt.Errorf("couldn't open output file %q: %w", out, err)
}
fTest, err := os.OpenFile(outTest, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return nil, fmt.Errorf("couldn't open test output file %q: %w", out, err)
}
fTestUC, err := os.OpenFile(outTestUnconditional, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
if err != nil {
return nil, fmt.Errorf("couldn't open unconditional test output file %q: %w", out, err)
}
g := Generator{
inputs: srcs,
output: f,
outputTest: fTest,
outputTestUC: fTestUC,
pkg: pkg,
imports: newImportTable(),
}
for _, i := range imports {
// All imports on the extra imports list are unconditionally marked as
// used, so that they're always added to the generated code.
g.imports.add(i).markUsed()
}
// The following imports may or may not be used by the generated code,
// depending on what's required for the target types. Don't mark these as
// used by default.
g.imports.add("io")
g.imports.add("reflect")
g.imports.add("runtime")
g.imports.add("unsafe")
g.imports.add("gvisor.dev/gvisor/pkg/gohacks")
g.imports.add("gvisor.dev/gvisor/pkg/hostarch")
g.imports.add("gvisor.dev/gvisor/pkg/marshal")
return &g, nil
}
// writeHeader writes the header for the generated source file. The header
// includes the package name, package level comments and import statements.
func (g *Generator) writeHeader() error {
var b sourceBuffer
b.emit("// Automatically generated marshal implementation. See tools/go_marshal.\n\n")
bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
if err != nil {
return err
}
if bcexpr != nil {
// Emit build constraints.
b.emit("// If there are issues with build constraint aggregation, see\n")
b.emit("// tools/go_marshal/gomarshal/generator.go:writeHeader(). The constraints here\n")
b.emit("// come from the input set of files used to generate this file. This input set\n")
b.emit("// is filtered based on pre-defined file suffixes related to build constraints,\n")
b.emit("// see tools/defs.bzl:calculate_sets().\n\n")
b.emit(constraintutil.Lines(bcexpr))
}
// Package header.
b.emit("package %s\n\n", g.pkg)
if err := b.write(g.output); err != nil {
return err
}
return g.imports.write(g.output)
}
// writeTypeChecks writes a statement to force the compiler to perform a type
// check for all Marshallable types referenced by the generated code.
func (g *Generator) writeTypeChecks(ms map[string]struct{}) error {
if len(ms) == 0 {
return nil
}
msl := make([]string, 0, len(ms))
for m := range ms {
msl = append(msl, m)
}
sort.Strings(msl)
var buf bytes.Buffer
fmt.Fprint(&buf, "// Marshallable types used by this file.\n")
for _, m := range msl {
fmt.Fprintf(&buf, "var _ marshal.Marshallable = (*%s)(nil)\n", m)
}
fmt.Fprint(&buf, "\n")
_, err := fmt.Fprint(g.output, buf.String())
return err
}
// parse processes all input files passed this generator and produces a set of
// parsed go ASTs.
func (g *Generator) parse() ([]*ast.File, []*token.FileSet, error) {
debugf("go_marshal invoked with %d input files:\n", len(g.inputs))
for _, path := range g.inputs {
debugf(" %s\n", path)
}
files := make([]*ast.File, 0, len(g.inputs))
fsets := make([]*token.FileSet, 0, len(g.inputs))
for _, path := range g.inputs {
fset := token.NewFileSet()
f, err := parser.ParseFile(fset, path, nil, parser.ParseComments)
if err != nil {
// Not a valid input file?
return nil, nil, fmt.Errorf("input %q can't be parsed: %w", path, err)
}
if debugEnabled() {
debugf("AST for %q:\n", path)
ast.Print(fset, f)
}
files = append(files, f)
fsets = append(fsets, fset)
}
return files, fsets, nil
}
// sliceAPI carries information about the '+marshal slice' directive.
type sliceAPI struct {
// Comment node in the AST containing the +marshal tag.
comment *ast.Comment
// Identifier fragment to use when naming generated functions for the slice
// API.
ident string
// Whether the generated functions should reference the newtype name, or the
// inner type name. Only meaningful on newtype declarations on primitives.
inner bool
}
// marshallableType carries information about a type marked with the '+marshal'
// directive.
type marshallableType struct {
spec *ast.TypeSpec
slice *sliceAPI
recv string
dynamic bool
boundCheck bool
}
func newMarshallableType(fset *token.FileSet, tagLine *ast.Comment, spec *ast.TypeSpec) *marshallableType {
mt := &marshallableType{
spec: spec,
slice: nil,
}
var unhandledTags []string
for _, tag := range strings.Fields(strings.TrimPrefix(tagLine.Text, "// +marshal")) {
if strings.HasPrefix(tag, "slice:") {
tokens := strings.Split(tag, ":")
if len(tokens) < 2 || len(tokens) > 3 {
abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive has invalid 'slice' clause. Expecting format 'slice:<IDENTIFIER>[:inner]', got '%v'", tag))
}
if len(tokens[1]) == 0 {
abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has empty identifier argument. Expecting '+marshal slice:identifier'")
}
sa := &sliceAPI{
comment: tagLine,
ident: tokens[1],
}
mt.slice = sa
if len(tokens) == 3 {
if tokens[2] != "inner" {
abortAt(fset.Position(tagLine.Slash), "+marshal slice directive has an invalid argument. Expecting '+marshal slice:<IDENTIFIER>[:inner]'")
}
sa.inner = true
}
continue
} else if tag == "dynamic" {
mt.dynamic = true
continue
} else if tag == "boundCheck" {
mt.boundCheck = true
continue
}
unhandledTags = append(unhandledTags, tag)
}
if len(unhandledTags) > 0 {
abortAt(fset.Position(tagLine.Slash), fmt.Sprintf("+marshal directive contained the following unknown clauses: %v", strings.Join(unhandledTags, " ")))
}
return mt
}
// collectMarshallableTypes walks the parsed AST and collects a list of type
// declarations for which we need to generate the Marshallable interface.
func (g *Generator) collectMarshallableTypes(a *ast.File, f *token.FileSet) map[*ast.TypeSpec]*marshallableType {
recv := make(map[string]string) // Type name to recevier name.
types := make(map[*ast.TypeSpec]*marshallableType)
for _, decl := range a.Decls {
gdecl, ok := decl.(*ast.GenDecl)
// Type declaration?
if !ok || gdecl.Tok != token.TYPE {
// Is this a function declaration? We remember receiver names.
d, ok := decl.(*ast.FuncDecl)
if ok && d.Recv != nil && len(d.Recv.List) == 1 {
// Accept concrete methods & pointer methods.
ident, ok := d.Recv.List[0].Type.(*ast.Ident)
if !ok {
var st *ast.StarExpr
st, ok = d.Recv.List[0].Type.(*ast.StarExpr)
if ok {
ident, ok = st.X.(*ast.Ident)
}
}
// The receiver name may be not present.
if ok && len(d.Recv.List[0].Names) == 1 {
// Recover the type receiver name in this case.
recv[ident.Name] = d.Recv.List[0].Names[0].Name
}
}
debugfAt(f.Position(decl.Pos()), "Skipping declaration since it's not a type declaration.\n")
continue
}
// Does it have a comment?
if gdecl.Doc == nil {
debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment.\n")
continue
}
// Does the comment contain a "+marshal" line?
marked := false
var tagLine *ast.Comment
for _, c := range gdecl.Doc.List {
if strings.HasPrefix(c.Text, "// +marshal") {
marked = true
tagLine = c
break
}
}
if !marked {
debugfAt(f.Position(gdecl.Pos()), "Skipping declaration since it doesn't have a comment containing +marshal line.\n")
continue
}
for _, spec := range gdecl.Specs {
// We already confirmed we're in a type declaration earlier, so this
// cast will succeed.
t := spec.(*ast.TypeSpec)
switch t.Type.(type) {
case *ast.StructType:
debugfAt(f.Position(t.Pos()), "Collected marshallable struct %s.\n", t.Name.Name)
case *ast.Ident: // Newtype on primitive.
debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on primitive %s.\n", t.Name.Name)
case *ast.ArrayType: // Newtype on array.
debugfAt(f.Position(t.Pos()), "Collected marshallable newtype on array %s.\n", t.Name.Name)
default:
// A user specifically requested marshalling on this type, but we
// don't support it.
abortAt(f.Position(t.Pos()), fmt.Sprintf("Marshalling codegen was requested on type '%s', but go-marshal doesn't support this kind of declaration.\n", t.Name))
}
types[t] = newMarshallableType(f, tagLine, t)
}
}
// Update the types with the last seen receiver. As long as the
// receiver name is consistent for the type, then we will generate
// code that is still consistent with itself.
for t, mt := range types {
r, ok := recv[t.Name.Name]
if !ok {
mt.recv = receiverName(t) // Default.
continue
}
mt.recv = r // Last seen.
}
return types
}
// collectImports collects all imports from all input source files. Some of
// these imports are copied to the generated output, if they're referenced by
// the generated code.
//
// collectImports de-duplicates imports while building the list, and ensures
// identifiers in the generated code don't conflict with any imported package
// names.
func (g *Generator) collectImports(a *ast.File, f *token.FileSet) map[string]importStmt {
is := make(map[string]importStmt)
for _, decl := range a.Decls {
gdecl, ok := decl.(*ast.GenDecl)
// Import statement?
if !ok || gdecl.Tok != token.IMPORT {
continue
}
for _, spec := range gdecl.Specs {
i := g.imports.addFromSpec(spec.(*ast.ImportSpec), f)
debugf("Collected import '%s' as '%s'\n", i.path, i.name)
// Make sure we have an import that doesn't use any local names that
// would conflict with identifiers in the generated code.
if len(i.name) == 1 && i.name != "_" {
abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import has a single character local name '%s'; this may conflict with code generated by go_marshal, use a multi-character import alias", i.name))
}
if _, ok := badIdentsMap[i.name]; ok {
abortAt(f.Position(spec.Pos()), fmt.Sprintf("Import name '%s' is likely to conflict with code generated by go_marshal, use a different import alias", i.name))
}
}
}
return is
}
func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *interfaceGenerator {
i := newInterfaceGenerator(t.spec, t.recv, fset)
if t.dynamic {
if t.slice != nil {
abortAt(fset.Position(t.slice.comment.Slash), "Slice API is not supported for dynamic types because it assumes that each slice element is statically sized.")
}
if t.boundCheck {
abortAt(fset.Position(t.slice.comment.Slash), "Can not generate Checked methods for dynamic types. Has to be implemented manually.")
}
// No validation needed, assume the user knows what they are doing.
i.emitMarshallableForDynamicType()
return i
}
switch ty := t.spec.Type.(type) {
case *ast.StructType:
i.validateStruct(t.spec, ty)
i.emitMarshallableForStruct(ty)
if t.boundCheck {
i.emitCheckedMarshallableForStruct()
}
if t.slice != nil {
i.emitMarshallableSliceForStruct(ty, t.slice)
}
case *ast.Ident:
i.validatePrimitiveNewtype(ty)
i.emitMarshallableForPrimitiveNewtype(ty)
if t.boundCheck {
i.emitCheckedMarshallableForPrimitiveNewtype()
}
if t.slice != nil {
i.emitMarshallableSliceForPrimitiveNewtype(ty, t.slice)
}
case *ast.ArrayType:
i.validateArrayNewtype(t.spec.Name, ty)
// After validate, we can safely call arrayLen.
i.emitMarshallableForArrayNewtype(t.spec.Name, ty, ty.Elt.(*ast.Ident))
if t.boundCheck {
i.emitCheckedMarshallableForArrayNewtype()
}
if t.slice != nil {
abortAt(fset.Position(t.slice.comment.Slash), "Array type marked as '+marshal slice:...', but this is not supported. Perhaps fold one of the dimensions?")
}
default:
// This should've been filtered out by collectMarshallabeTypes.
panic(fmt.Sprintf("Unexpected type %+v", ty))
}
return i
}
// generateOneTestSuite generates a test suite for the automatically generated
// implementations type t.
func (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator {
i := newTestGenerator(t.spec, t.recv)
i.emitTests(t.slice, t.boundCheck)
return i
}
// Run is the entry point to code generation using g.
//
// Run parses all input source files specified in g and emits generated code.
func (g *Generator) Run() error {
// Parse our input source files into ASTs and token sets.
asts, fsets, err := g.parse()
if err != nil {
return err
}
if len(asts) != len(fsets) {
panic("ASTs and FileSets don't match")
}
// Map of imports in source files; key = local package name, value = import
// path.
is := make(map[string]importStmt)
for i, a := range asts {
// Collect all imports from the source files. We may need to copy some
// of these to the generated code if they're referenced. This has to be
// done before the loop below because we need to process all ASTs before
// we start requesting imports to be copied one by one as we encounter
// them in each generated source.
for name, i := range g.collectImports(a, fsets[i]) {
is[name] = i
}
}
var impls []*interfaceGenerator
var ts []*testGenerator
// Set of Marshallable types referenced by generated code.
ms := make(map[string]struct{})
for i, a := range asts {
// Collect type declarations marked for code generation and generate
// Marshallable interfaces.
var sortedTypes []*marshallableType
for _, t := range g.collectMarshallableTypes(a, fsets[i]) {
sortedTypes = append(sortedTypes, t)
}
sort.Slice(sortedTypes, func(x, y int) bool {
// Sort by type name, which should be unique within a package.
return sortedTypes[x].spec.Name.String() < sortedTypes[y].spec.Name.String()
})
for _, t := range sortedTypes {
impl := g.generateOne(t, fsets[i])
// Collect Marshallable types referenced by the generated code.
for ref := range impl.ms {
ms[ref] = struct{}{}
}
impls = append(impls, impl)
// Collect imports referenced by the generated code and add them to
// the list of imports we need to copy to the generated code.
for name := range impl.is {
if !g.imports.markUsed(name) {
panic(fmt.Sprintf("Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.", impl.typeName(), name))
}
}
// Do not generate tests for dynamic types because they inherently
// violate some go_marshal requirements.
if !t.dynamic {
ts = append(ts, g.generateOneTestSuite(t))
}
}
}
// Write output file header. These include things like package name and
// import statements.
if err := g.writeHeader(); err != nil {
return err
}
// Write type checks for referenced marshallable types to output file.
if err := g.writeTypeChecks(ms); err != nil {
return err
}
// Write generated interfaces to output file.
for _, i := range impls {
if err := i.write(g.output); err != nil {
return err
}
}
// Write generated tests to test file.
return g.writeTests(ts)
}
// writeTests outputs tests for the generated interface implementations to a go
// source file.
func (g *Generator) writeTests(ts []*testGenerator) error {
var b sourceBuffer
// Write the unconditional test file. This file is always compiled,
// regardless of what build tags were specified on the original input
// files. We use this file to guarantee we never end up with an empty test
// file, as that causes the build to fail with "no tests/benchmarks/examples
// found".
//
// There's no easy way to determine ahead of time if we'll end up with an
// empty build file since build constraints can arbitrarily cause some of
// the original types to be not defined. We also have no way to tell bazel
// to omit the entire test suite since the output files are already defined
// before go-marshal is called.
b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
b.emit("package %s\n\n", g.pkg)
b.emit("func Example() {\n")
b.inIndent(func() {
b.emit("// This example is intentionally empty, and ensures this package contains at\n")
b.emit("// least one testable entity. go-marshal is forced to emit a test package if the\n")
b.emit("// input package is marked marshallable, but emitting no testable entities \n")
b.emit("// results in a build failure.\n")
})
b.emit("}\n")
if err := b.write(g.outputTestUC); err != nil {
return err
}
// Now generate the real test file that contains the real types we
// processed. These need to be conditionally compiled according to the build
// tags, as the original types may not be defined under all build
// configurations.
b.reset()
b.emit("// Automatically generated marshal tests. See tools/go_marshal.\n\n")
// Emit build constraints.
bcexpr, err := constraintutil.CombineFromFiles(g.inputs)
if err != nil {
return err
}
b.emit(constraintutil.Lines(bcexpr))
b.emit("package %s\n\n", g.pkg)
if err := b.write(g.outputTest); err != nil {
return err
}
// Collect and write test import statements.
imports := newImportTable()
for _, t := range ts {
imports.merge(t.imports)
}
if err := imports.write(g.outputTest); err != nil {
return err
}
// Write test functions.
for _, t := range ts {
if err := t.write(g.outputTest); err != nil {
return err
}
}
return nil
}
|
package loadtester
import (
"fmt"
"time"
"github.com/project-flogo/core/data/metadata"
"github.com/project-flogo/core/support/log"
"github.com/project-flogo/core/trigger"
)
var triggerMd = trigger.NewMetadata(&Settings{}, &Output{})
func init() {
trigger.Register(&Trigger{}, &Factory{})
}
type Factory struct {
}
// Metadata implements trigger.Factory.Metadata
func (*Factory) Metadata() *trigger.Metadata {
return triggerMd
}
// New implements trigger.Factory.New
func (*Factory) New(config *trigger.Config) (trigger.Trigger, error) {
s := &Settings{StartDelay: 30, Concurrency: 5, Duration: 120}
err := metadata.MapToStruct(config.Settings, s, true)
if err != nil {
return nil, err
}
return &Trigger{id: config.Id, settings: s}, nil
}
type Trigger struct {
id string
settings *Settings
handler trigger.Handler
logger log.Logger
statsAggregator chan *RequesterStats
}
func (t *Trigger) Initialize(ctx trigger.InitContext) error {
t.logger = ctx.Logger()
if len(ctx.GetHandlers()) == 0 {
ctx.Logger().Warnf("No Handlers specified for Load Trigger: %s", t.id)
}
t.handler = ctx.GetHandlers()[0]
if t.settings.Handler == "" {
return nil
}
found := false
for _, handler := range ctx.GetHandlers() {
if handler.Name() == t.settings.Handler {
t.handler = handler
found = true
break
}
}
if !found {
ctx.Logger().Warnf("Handler '%s' not found, using first handler", t.settings.Handler)
}
return nil
}
// Stop implements util.Managed.Start
func (t *Trigger) Start() error {
go t.runLoadTest()
return nil
}
// Stop implements util.Managed.Stop
func (t *Trigger) Stop() error {
return nil
}
func (t *Trigger) runLoadTest() {
fmt.Printf("Starting load test in %d seconds\n", t.settings.StartDelay)
time.Sleep(time.Duration(t.settings.StartDelay) * time.Second)
data := &Output{Data: t.settings.Data}
lt := NewLoadTest(t.settings.Duration, t.settings.Concurrency)
lt.Run(t.handler, data)
}
|
package datasourcetest
import (
"github.com/kdimak/go_datasource/datasource"
"testing"
)
type TestConfig struct {
DS datasource.WritableDataSource
}
func TestWritableDataSource(t *testing.T, tc TestConfig) {
tests := []struct {
title string
run func(t *testing.T, c TestConfig)
}{
{"should store and read the key", testStoreAndGet},
{"should return nil on not existent key", testGetNotExistent},
}
for _, test := range tests {
t.Run(test.title, func(t *testing.T) {
test.run(t, tc)
})
}
}
func testStoreAndGet(t *testing.T, tc TestConfig) {
err := tc.DS.Store("foo", "bar")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
actual, err := tc.DS.Value("foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if actual != "bar" {
t.Errorf("reading existent key from cache. expected: %v, actual: %v", "bar", actual)
}
}
func testGetNotExistent(t *testing.T, tc TestConfig) {
actual, err := tc.DS.Value("unknown")
if err != nil {
t.Errorf("unexpected error: %v", err)
return
}
if actual != nil {
t.Errorf("reading not existent key from cache. expected: %v, actual: %v", nil, actual)
}
}
|
package docs
import _ "embed"
//go:embed swagger.yml
var SwaggerSpecs []byte
|
package worker
import (
"github.com/orbs-network/orbs-contract-sdk/go/context"
"github.com/orbs-network/orbs-network-javascript-plugin/test"
"github.com/stretchr/testify/require"
"testing"
)
func TestMethodDispatcher(t *testing.T) {
handler := test.AFakeSdkFor([]byte("signer"), []byte("caller"))
dispatcher := NewMethodDispatcher(handler)
packedSignerAddress, err := dispatcher.Dispatch(context.ContextId("test"), context.PERMISSION_SCOPE_SERVICE,
ArgsToArgumentArray(SDK_OBJECT_ADDRESS, SDK_METHOD_GET_SIGNER_ADDRESS))
require.NoError(t, err)
signerAddress := packedSignerAddress.ArgumentsIterator().NextArguments().BytesValue()
require.EqualValues(t, []byte("signer"), signerAddress)
handler.MockEnvBlockHeight(1221)
packedBlockHeight, err := dispatcher.Dispatch(context.ContextId("test"), context.PERMISSION_SCOPE_SERVICE,
ArgsToArgumentArray(SDK_OBJECT_ENV, SDK_METHOD_GET_BLOCK_HEIGHT))
require.NoError(t, err)
blockHeight := packedBlockHeight.ArgumentsIterator().NextArguments().Uint64Value()
require.EqualValues(t, 1221, blockHeight)
}
func TestMethodDispatcherWithState(t *testing.T) {
handler := test.AFakeSdkFor([]byte("signer"), []byte("caller"))
dispatcher := NewMethodDispatcher(handler)
dispatcher.Dispatch(context.ContextId("test"), context.PERMISSION_SCOPE_SERVICE,
ArgsToArgumentArray(SDK_OBJECT_STATE, SDK_METHOD_WRITE_BYTES, []byte("album"), []byte("Diamond Dogs")))
packedStateValue, err := dispatcher.Dispatch(context.ContextId("test"), context.PERMISSION_SCOPE_SERVICE,
ArgsToArgumentArray(SDK_OBJECT_STATE, SDK_METHOD_READ_BYTES, []byte("album")))
require.NoError(t, err)
album := packedStateValue.ArgumentsIterator().NextArguments().BytesValue()
require.EqualValues(t, []byte("Diamond Dogs"), album)
}
|
package main
import (
"fmt"
"math/rand"
)
func matrix() [][]int{
var matrix = make([][]int, 10)
for i := range matrix {
matrix[i] = make([]int, 10)
for j := range matrix[i] {
matrix[i][j] = rand.Intn(100)
}
}
return matrix
}
func main(){
m := matrix()
fmt.Println(m)
}
|
package routers
import (
entities "github.com/JIeeiroSst/togo/internal/storages"
"github.com/JIeeiroSst/togo/models"
"github.com/gin-gonic/gin"
"time"
)
func TaskList(c *gin.Context){
tags,err:=models.TagsList()
if err != nil {
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "no data found",
Data: nil,
})
return
}
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "found Data",
Data: tags,
})
}
func TagById(c *gin.Context){
id:=c.Param("id")
tag,err:=models.TagById(id)
if err!=nil {
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "no data found",
Data: nil,
})
return
}
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "found Data",
Data: tag,
})
}
func CreateTag(c *gin.Context){
tag:=entities.Tasks{
Id: c.Query("id"),
Content: c.Query("content"),
UserId: c.Query("user_id"),
CreatedDate: time.Now().String(),
}
err:=models.CreateTask(tag)
if err!=nil{
c.JSON(204,entities.RestResponse{
Code: 204,
Message: "create failure",
Data: nil,
})
return
}
c.JSON(201,entities.RestResponse{
Code: 201,
Message: "create successfully",
Data: tag,
})
}
func DeleteTag(c *gin.Context){
id:=c.Param("id")
err:=models.DeleteTags(id)
if err!=nil{
c.JSON(204,entities.RestResponse{
Code: 204,
Message: "delete failure",
Data: nil,
})
return
}
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "Delete successfully",
Data: id,
})
}
func UpdateTags(c *gin.Context){
id:=c.Param("id")
tag:=entities.Tasks{
Id: id,
Content: c.Query("content"),
}
err:=models.UpdateTask(id,tag)
if err!=nil{
c.JSON(204,entities.RestResponse{
Code: 204,
Message: "update failure",
Data: nil,
})
return
}
c.JSON(200,entities.RestResponse{
Code: 200,
Message: "Update successfully",
Data: tag,
})
} |
package cmd
import (
"ktmall/bootstrap"
"github.com/spf13/cobra"
)
var serverCmd = &cobra.Command{
Use: "server",
Short: "run app server",
Run: func(cmd *cobra.Command, args []string) {
// init db
bootstrap.SetupDB()
defer bootstrap.CloseDB()
// run echo
bootstrap.RunServer()
},
}
func init() {
rootCmd.AddCommand(serverCmd)
}
|
/*****************************************************************************
* file name : STable.go
* author : Wu Yinghao
* email : wyh817@gmail.com
*
* file description : 数据层
*
******************************************************************************/
package DataLayer
import (
"encoding/binary"
"fmt"
"utils"
)
const BT_TAIL string = ".bt"
const TB_DTL_TAIL string = ".tb.detail"
// FieldMeta 字段信息
type FieldMeta struct {
Fieldname string `json:"fieldname"`
FieldLen int `json:"fieldlen"`
FieldType int `json:"fieldtype"`
Default string `json:"default"`
MkIdx bool `json:"makeindex"`
}
type STable struct {
Tablename string `json:"tablename"`
Fields map[string]FieldMeta `json:"fields"`
FieldInfos []FieldMeta `json:"fieldinfos"`
RecordLen int `json:"RecordLen"`
MaxCount int64 `json:"maxcount"`
Pathname string `json:"pathname"`
btreeName string
bt *utils.BTreedb
detail *utils.Mmap
Logger *utils.Log4FE
}
// NewSTable function description : 新建数据库表
// params :
// return :
func NewSTable(tablename, pathname string, fieldsinfos []FieldMeta, logger *utils.Log4FE) *STable {
this := &STable{MaxCount: 0, Logger: logger, Pathname: pathname, Tablename: tablename, FieldInfos: fieldsinfos, Fields: make(map[string]FieldMeta)}
if utils.IsExist(pathname + "/" + tablename + TB_DTL_TAIL) {
this.Logger.Error("[ERROR] STable[%v] is exist", tablename)
return nil
}
for _, field := range fieldsinfos {
if _, ok := this.Fields[field.Fieldname]; ok {
this.Logger.Error("[ERROR] Field[%v] exist", field.Fieldname)
return nil
}
this.Fields[field.Fieldname] = field
}
//创建表的索引,使用b+树索引
if err := this.createIndex(); err != nil {
this.Logger.Error("[ERROR] createIndex %v", err)
return nil
}
//创建detail文件
if err := this.createDetail(); err != nil {
this.Logger.Error("[ERROR] createDetail %v", err)
return nil
}
this.Logger.Info("[INFO] STable[%v] Create ok", tablename)
return this
}
// createIndex function description : 创建索引
// params :
// return :
func (this *STable) createIndex() error {
//初始化b+树索引
this.btreeName = this.Pathname + "/" + this.Tablename + BT_TAIL
this.bt = utils.NewBTDB(this.btreeName)
if this.bt == nil {
this.Logger.Error("[ERROR] make b+tree error %v", this.btreeName)
return fmt.Errorf("[ERROR] make b+tree error %v", this.btreeName)
}
for k, v := range this.Fields {
this.RecordLen = this.RecordLen + v.FieldLen + 4
if v.MkIdx {
this.bt.AddBTree(k)
}
}
return nil
}
// createDetail function description : 创建表的详情文件
// params :
// return :
func (this *STable) createDetail() error {
var err error
this.detail, err = utils.NewMmap(this.Pathname+"/"+this.Tablename+TB_DTL_TAIL, utils.MODE_CREATE)
if err != nil {
this.Logger.Error("[ERROR] make detail error %v", err)
return err
}
return nil
}
// AddData function description : 添加数据
// params :
// return :
func (this *STable) AddData(content map[string]string) error {
inbytes := make([]byte, this.RecordLen)
point := uint32(0)
var value string
for _, fvalue := range this.FieldInfos {
if _, ok := content[fvalue.Fieldname]; !ok {
value = fvalue.Default
} else {
value = content[fvalue.Fieldname]
}
lens := uint32(len(value))
binary.LittleEndian.PutUint32(inbytes[point:point+4], lens)
point += 4
dst := inbytes[point : point+lens]
copy(dst, []byte(value))
point += uint32(fvalue.FieldLen)
//如果有索引要求
if fvalue.MkIdx {
//this.bt.A
}
}
this.detail.AppendRecord(inbytes)
this.MaxCount++
return nil
}
func (this *STable) DeleteData(docid utils.DocID) error {
return nil
}
func (this *STable) UpdateData(docid utils.DocID, content map[string]string) error {
return nil
}
func (this *STable) FindData(field, value string) []utils.DocID {
return nil
}
func (this *STable) FindDocId(docid int64) map[string]string {
if docid >= this.MaxCount {
return nil
}
res := make(map[string]string)
offset := docid * int64(this.RecordLen)
outbytes := this.detail.ReadRecord(offset, uint32(this.RecordLen))
point := uint32(0)
for _, fvalue := range this.FieldInfos {
reallen := binary.LittleEndian.Uint32(outbytes[point : point+4])
point += 4
value := string(outbytes[point : point+reallen])
point += uint32(fvalue.FieldLen)
res[fvalue.Fieldname]=value
}
return res
}
|
package catalog
import (
"os"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
func LoadFromDirectory(dir string) (*Config, error) {
var data Config
// list all yaml files in directory
loadErr := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() && strings.HasSuffix(info.Name(), ".yaml") && !strings.HasSuffix(info.Name(), "cid-index.yaml") {
content, err := os.ReadFile(path)
if err != nil {
return err
}
// Parse yaml file
var fileData Config
err = yaml.Unmarshal(content, &fileData)
if err != nil {
return err
}
// merge actions
if len(fileData.Actions) > 0 {
for _, action := range fileData.Actions {
data.Actions = append(data.Actions, action)
}
}
// merge images
if len(fileData.ContainerImages) > 0 {
for _, image := range fileData.ContainerImages {
data.ContainerImages = append(data.ContainerImages, image)
}
}
// merge workflows
if len(fileData.Workflows) > 0 {
for _, workflow := range fileData.Workflows {
data.Workflows = append(data.Workflows, workflow)
}
}
}
return nil
})
if loadErr != nil {
return nil, loadErr
}
return &data, nil
}
func LoadFromFile(file string) (*Config, error) {
// load file
content, err := os.ReadFile(file)
if err != nil {
return nil, err
}
// parse
var data Config
err = yaml.Unmarshal(content, &data)
if err != nil {
return nil, err
}
// test
return &data, nil
}
func SaveToFile(registry *Config, file string) error {
// marshal
data, err := yaml.Marshal(®istry)
if err != nil {
return err
}
// write to filesystem
err = os.WriteFile(file, data, os.ModePerm)
if err != nil {
return err
}
return nil
}
|
// 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
// 有效字符串需满足:
// 左括号必须用相同类型的右括号闭合。
// 左括号必须以正确的顺序闭合。
// 注意空字符串可被认为是有效字符串。
// 示例 1:
// 输入: "()"
// 输出: true
// 示例 2:
// 输入: "()[]{}"
// 输出: true
// 示例 3:
// 输入: "(]"
// 输出: false
// 示例 4:
// 输入: "([)]"
// 输出: false
// 示例 5:
// 输入: "{[]}"
// 输出: true
package main
func isValid(s string) bool {
r :=new(stack)
for _,v := range s {
switch v {
case '(','[','{':
r.push(v)
case ')',']','}':
//不仅要弹出,还要判断弹出的与目前的是否匹配
if m,n:=r.pop(); !n || m != mapping[v] {
return false
}
}
}
if len(*r) > 0{
return false
}
return true
}
var mapping map[rune]rune = map[rune]rune{
')':'(',
']':'[',
'}':'{',
}
type stack []rune
func (r *stack) push(a rune) {
*r = append(*r, a)
}
func (r *stack) pop()(rune, bool){
if len(*r)==0 {
return 0, false
}
v:= (*r)[len(*r)-1]
*r = (*r)[:len(*r)-1]
return v,true
} |
package tracer
import (
"errors"
"fmt"
"io"
"os"
"strings"
"github.com/nkbai/tgo/debugapi"
"github.com/nkbai/tgo/tracee"
"golang.org/x/arch/x86/x86asm"
)
const chanBufferSize = 64
// ErrInterrupted indicates the tracer is interrupted due to the Interrupt() call.
var ErrInterrupted = errors.New("interrupted")
type breakpointType int
const (
// These types determine the handling of hit-breakpoints.
breakpointTypeUnknown breakpointType = iota
breakpointTypeCall
breakpointTypeDeferredFunc
breakpointTypeReturn
breakpointTypeReturnAndCall
)
// Controller controls the associated tracee process.
type Controller struct {
process *tracee.Process
firstModuleDataAddr uint64
statusStore map[int64]goRoutineStatus
callInstAddrCache map[uint64][]uint64
breakpointTypes map[uint64]breakpointType
breakpoints Breakpoints
tracingPoints tracingPoints
traceLevel int
parseLevel int
// Use the buffered channels to handle the requests to the controller asyncronously.
// It's because the tracee process must be trapped to handle these requests, but the process may not
// be trapped when the requests are sent.
interruptCh chan bool
pendingStartTracePoint chan uint64
pendingEndTracePoint chan uint64
// The traced data is written to this writer.
outputWriter io.Writer
}
type goRoutineStatus struct {
// This list include only the functions which hit the breakpoint before and so is not complete.
callingFunctions []callingFunction
}
func (status goRoutineStatus) usedStackSize() uint64 {
if len(status.callingFunctions) > 0 {
return status.callingFunctions[len(status.callingFunctions)-1].usedStackSize
}
return 0
}
func (status goRoutineStatus) lastFunctionAddr() uint64 {
if len(status.callingFunctions) > 0 {
return status.callingFunctions[len(status.callingFunctions)-1].StartAddr
}
return 0
}
type callingFunction struct {
*tracee.Function
returnAddress uint64
usedStackSize uint64
setCallInstBreakpoints bool
}
// NewController returns the new controller.
func NewController() *Controller {
return &Controller{
outputWriter: os.Stdout,
statusStore: make(map[int64]goRoutineStatus),
breakpointTypes: make(map[uint64]breakpointType),
callInstAddrCache: make(map[uint64][]uint64),
interruptCh: make(chan bool, chanBufferSize),
pendingStartTracePoint: make(chan uint64, chanBufferSize),
pendingEndTracePoint: make(chan uint64, chanBufferSize),
}
}
// Attributes represents the tracee's attributes.
type Attributes tracee.Attributes
// LaunchTracee launches the new tracee process to be controlled.
func (c *Controller) LaunchTracee(name string, arg []string, attrs Attributes) error {
var err error
c.process, err = tracee.LaunchProcess(name, arg, tracee.Attributes(attrs))
c.breakpoints = NewBreakpoints(c.process.SetBreakpoint, c.process.ClearBreakpoint)
return err
}
// AttachTracee attaches to the existing process.
func (c *Controller) AttachTracee(pid int, attrs Attributes) error {
var err error
c.process, err = tracee.AttachProcess(pid, tracee.Attributes(attrs))
c.breakpoints = NewBreakpoints(c.process.SetBreakpoint, c.process.ClearBreakpoint)
return err
}
// AddStartTracePoint adds the starting point of the tracing. The go routines which executed one of these addresses start to be traced.
func (c *Controller) AddStartTracePoint(startAddr uint64) error {
select {
case c.pendingStartTracePoint <- startAddr:
default:
// maybe buffer full
return errors.New("failed to add start trace point")
}
return nil
}
// AddEndTracePoint adds the ending point of the tracing. The tracing is disabled when any go routine executes any of these addresses.
func (c *Controller) AddEndTracePoint(endAddr uint64) error {
select {
case c.pendingEndTracePoint <- endAddr:
default:
// maybe buffer full
return errors.New("failed to add end trace point")
}
return nil
}
// SetTraceLevel set the tracing level, which determines whether to print the traced info of the functions.
// The traced info is printed if the function is (directly or indirectly) called by the trace point function AND
// the stack depth is within the `level`.
// The depth here is the relative value from the point the tracing starts.
func (c *Controller) SetTraceLevel(level int) {
c.traceLevel = level
}
// SetParseLevel sets the parsing level, which determines how deeply the parser parses the value of args.
func (c *Controller) SetParseLevel(level int) {
c.parseLevel = level
}
// MainLoop repeatedly lets the tracee continue and then wait an event. It returns ErrInterrupted error if
// the trace ends due to the interrupt.
func (c *Controller) MainLoop() error {
defer c.process.Detach() // the connection status is unknown at this point
event, err := c.continueAndWait()
if err == ErrInterrupted {
return err
} else if err != nil {
return fmt.Errorf("failed to trace: %v", err)
}
for {
switch event.Type {
case debugapi.EventTypeExited:
return nil
case debugapi.EventTypeCoreDump:
return errors.New("the process exited due to core dump")
case debugapi.EventTypeTerminated:
return fmt.Errorf("the process exited due to signal %d", event.Data.(int))
case debugapi.EventTypeTrapped:
trappedThreadIDs := event.Data.([]int)
event, err = c.handleTrapEvent(trappedThreadIDs)
if err == ErrInterrupted {
return err
} else if err != nil {
return fmt.Errorf("failed to trace: %v", err)
}
default:
return fmt.Errorf("unknown event: %v", event.Type)
}
}
}
// continueAndWait resumes the traced process and waits the process trapped again.
// It handles requests via channels before resuming.
func (c *Controller) continueAndWait() (debugapi.Event, error) {
select {
case <-c.interruptCh:
return debugapi.Event{}, ErrInterrupted
default:
if err := c.setPendingTracePoints(); err != nil {
return debugapi.Event{}, err
}
return c.process.ContinueAndWait()
}
}
func (c *Controller) setPendingTracePoints() error {
for {
select {
case startAddr := <-c.pendingStartTracePoint:
if c.tracingPoints.IsStartAddress(startAddr) {
continue // set already
}
if err := c.breakpoints.Set(startAddr); err != nil {
return err
}
c.tracingPoints.startAddressList = append(c.tracingPoints.startAddressList, startAddr)
case endAddr := <-c.pendingEndTracePoint:
if c.tracingPoints.IsEndAddress(endAddr) {
continue // set already
}
if err := c.breakpoints.Set(endAddr); err != nil {
return err
}
c.tracingPoints.endAddressList = append(c.tracingPoints.endAddressList, endAddr)
default:
return nil // no data
}
}
}
func (c *Controller) handleTrapEvent(trappedThreadIDs []int) (debugapi.Event, error) {
for i := 0; i < len(trappedThreadIDs); i++ {
threadID := trappedThreadIDs[i]
if err := c.handleTrapEventOfThread(threadID); err != nil {
return debugapi.Event{}, fmt.Errorf("failed to handle trap event (thread id: %d): %v", threadID, err)
}
}
return c.continueAndWait()
}
func (c *Controller) handleTrapEventOfThread(threadID int) error {
goRoutineInfo, err := c.process.CurrentGoRoutineInfo(threadID)
if err != nil || goRoutineInfo.ID == 0 {
return c.handleTrappedSystemRoutine(threadID)
}
breakpointAddr := goRoutineInfo.CurrentPC - 1
if !c.breakpoints.Hit(breakpointAddr, goRoutineInfo.ID) {
return c.handleTrapAtUnrelatedBreakpoint(threadID, breakpointAddr)
}
if !c.tracingPoints.Inside(goRoutineInfo.ID) {
if !c.tracingPoints.IsStartAddress(breakpointAddr) {
return c.handleTrapAtUnrelatedBreakpoint(threadID, breakpointAddr)
}
if err := c.enterTracepoint(threadID, goRoutineInfo); err != nil {
return err
}
}
if c.tracingPoints.IsEndAddress(breakpointAddr) {
return c.exitTracepoint(threadID, goRoutineInfo.ID, goRoutineInfo.CurrentPC-1)
} else if c.tracingPoints.IsStartAddress(breakpointAddr) {
// the tracing point may be used as the break point as well. If not, return here.
if _, ok := c.breakpointTypes[breakpointAddr]; !ok {
return c.handleTrapAtUnrelatedBreakpoint(threadID, breakpointAddr)
}
}
switch c.breakpointTypes[breakpointAddr] {
case breakpointTypeCall, breakpointTypeReturnAndCall:
return c.handleTrapBeforeFunctionCall(threadID, goRoutineInfo)
case breakpointTypeDeferredFunc:
return c.handleTrapAtDeferredFuncCall(threadID, goRoutineInfo)
case breakpointTypeReturn:
return c.handleTrapAfterFunctionReturn(threadID, goRoutineInfo)
default:
return fmt.Errorf("unknown breakpoint: %#x", breakpointAddr)
}
}
func (c *Controller) enterTracepoint(threadID int, goRoutineInfo tracee.GoRoutineInfo) error {
goRoutineID := goRoutineInfo.ID
if !c.tracingPoints.Inside(goRoutineID) {
if err := c.setCallInstBreakpoints(goRoutineID, goRoutineInfo.CurrentPC); err != nil {
return err
}
if err := c.setDeferredFuncBreakpoints(goRoutineInfo); err != nil {
return err
}
c.tracingPoints.Enter(goRoutineID)
}
// not single step here, because tracing point may be used as breakpoint as well.
return nil
}
func (c *Controller) exitTracepoint(threadID int, goRoutineID int64, breakpointAddr uint64) error {
if c.tracingPoints.Inside(goRoutineID) {
if err := c.breakpoints.ClearAllByGoRoutineID(goRoutineID); err != nil {
return err
}
c.tracingPoints.Exit(goRoutineID)
}
return c.handleTrapAtUnrelatedBreakpoint(threadID, breakpointAddr)
}
func (c *Controller) setCallInstBreakpoints(goRoutineID int64, pc uint64) error {
return c.alterCallInstBreakpoints(true, goRoutineID, pc)
}
func (c *Controller) clearCallInstBreakpoints(goRoutineID int64, pc uint64) error {
return c.alterCallInstBreakpoints(false, goRoutineID, pc)
}
func (c *Controller) alterCallInstBreakpoints(enable bool, goRoutineID int64, pc uint64) error {
f, err := c.process.FindFunction(pc)
if err != nil {
return err
}
callInstAddresses, err := c.findCallInstAddresses(f)
if err != nil {
return err
}
for _, callInstAddr := range callInstAddresses {
if enable {
err = c.breakpoints.SetConditional(callInstAddr, goRoutineID)
c.breakpointTypes[callInstAddr] = breakpointTypeCall
} else {
err = c.breakpoints.ClearConditional(callInstAddr, goRoutineID)
}
if err != nil {
return err
}
}
return nil
}
func (c *Controller) setDeferredFuncBreakpoints(goRoutineInfo tracee.GoRoutineInfo) error {
nextAddr := goRoutineInfo.NextDeferFuncAddr
if nextAddr == 0x0 /* no deferred func */ || c.breakpoints.Hit(nextAddr, goRoutineInfo.ID) /* exist already */ {
return nil
}
if err := c.breakpoints.SetConditional(nextAddr, goRoutineInfo.ID); err != nil {
return err
}
c.breakpointTypes[nextAddr] = breakpointTypeDeferredFunc
return nil
}
func (c *Controller) handleTrappedSystemRoutine(threadID int) error {
threadInfo, err := c.process.CurrentThreadInfo(threadID)
if err != nil {
return err
}
breakpointAddr := threadInfo.CurrentPC - 1
return c.process.SingleStep(threadID, breakpointAddr)
}
func (c *Controller) handleTrapAtUnrelatedBreakpoint(threadID int, breakpointAddr uint64) error {
return c.process.SingleStep(threadID, breakpointAddr)
}
func (c *Controller) handleTrapBeforeFunctionCall(threadID int, goRoutineInfo tracee.GoRoutineInfo) error {
breakpointAddr := goRoutineInfo.CurrentPC - 1
var err error
if c.breakpointTypes[breakpointAddr] == breakpointTypeReturnAndCall {
err = c.handleTrapAfterFunctionReturn(threadID, goRoutineInfo)
} else {
err = c.process.SingleStep(threadID, breakpointAddr)
}
if err != nil {
return err
}
// Now the go routine jumped to the beginning of the function.
goRoutineInfo, err = c.process.CurrentGoRoutineInfo(threadID)
if err != nil {
return err
}
if c.tracingPoints.IsEndAddress(goRoutineInfo.CurrentPC) {
return c.exitTracepoint(threadID, goRoutineInfo.ID, goRoutineInfo.CurrentPC)
}
return c.handleTrapAtFunctionCall(threadID, goRoutineInfo.CurrentPC, goRoutineInfo)
}
// handleTrapAtFunctionCall handles the trapped event at the function call.
// It needs `breakpointAddr` though it's usually same as the function's start address.
// It is because some function, such as runtime.duffzero, directly jumps to the middle of the function and
// the breakpoint address is not explicit in that case.
func (c *Controller) handleTrapAtFunctionCall(threadID int, breakpointAddr uint64, goRoutineInfo tracee.GoRoutineInfo) error {
status, _ := c.statusStore[goRoutineInfo.ID]
stackFrame, err := c.currentStackFrame(goRoutineInfo)
if err != nil {
return err
}
// unwinded here in some cases:
// * just recovered from panic.
// * the last function used 'JMP' to call the next function and didn't change the SP. e.g. runtime.deferreturn
remainingFuncs, _, err := c.unwindFunctions(status.callingFunctions, goRoutineInfo)
if err != nil {
return err
}
currStackDepth := len(remainingFuncs) + 1 // add the currently calling function
if goRoutineInfo.Panicking && goRoutineInfo.PanicHandler != nil {
currStackDepth -= c.countSkippedFuncs(status.callingFunctions, goRoutineInfo.PanicHandler.UsedStackSizeAtDefer)
}
callingFunc := callingFunction{
Function: stackFrame.Function,
returnAddress: stackFrame.ReturnAddress,
usedStackSize: goRoutineInfo.UsedStackSize,
setCallInstBreakpoints: currStackDepth < c.traceLevel,
}
remainingFuncs, err = c.appendFunction(remainingFuncs, callingFunc, goRoutineInfo.ID)
if err != nil {
return err
}
if err := c.setDeferredFuncBreakpoints(goRoutineInfo); err != nil {
return err
}
if currStackDepth <= c.traceLevel && c.printableFunc(stackFrame.Function) {
if err := c.printFunctionInput(goRoutineInfo.ID, stackFrame, currStackDepth); err != nil {
return err
}
}
if err := c.process.SingleStep(threadID, breakpointAddr); err != nil {
return err
}
c.statusStore[goRoutineInfo.ID] = goRoutineStatus{callingFunctions: remainingFuncs}
return nil
}
func (c *Controller) countSkippedFuncs(callingFuncs []callingFunction, usedStackSize uint64) int {
for i := len(callingFuncs) - 1; i >= 0; i-- {
if callingFuncs[i].usedStackSize < usedStackSize {
return len(callingFuncs) - 1 - i
}
}
return len(callingFuncs) - 1
}
func (c *Controller) unwindFunctions(callingFuncs []callingFunction, goRoutineInfo tracee.GoRoutineInfo) ([]callingFunction, []callingFunction, error) {
for i := len(callingFuncs) - 1; i >= 0; i-- {
if callingFuncs[i].usedStackSize < goRoutineInfo.UsedStackSize {
return callingFuncs[0 : i+1], callingFuncs[i+1:], nil
} else if callingFuncs[i].usedStackSize == goRoutineInfo.UsedStackSize {
currFunction, err := c.process.FindFunction(goRoutineInfo.CurrentPC)
if err != nil {
return nil, nil, err
}
if callingFuncs[i].Name == currFunction.Name {
return callingFuncs[0 : i+1], callingFuncs[i+1:], nil
}
}
unwindFunc := callingFuncs[i]
if err := c.breakpoints.ClearConditional(unwindFunc.returnAddress, goRoutineInfo.ID); err != nil {
return nil, nil, err
}
if unwindFunc.setCallInstBreakpoints {
if err := c.clearCallInstBreakpoints(goRoutineInfo.ID, unwindFunc.StartAddr); err != nil {
return nil, nil, err
}
}
}
return nil, callingFuncs, nil
}
func (c *Controller) appendFunction(callingFuncs []callingFunction, newFunc callingFunction, goRoutineID int64) ([]callingFunction, error) {
if err := c.breakpoints.SetConditional(newFunc.returnAddress, goRoutineID); err != nil {
return nil, err
}
if typ, ok := c.breakpointTypes[newFunc.returnAddress]; ok && typ == breakpointTypeCall {
c.breakpointTypes[newFunc.returnAddress] = breakpointTypeReturnAndCall
} else {
c.breakpointTypes[newFunc.returnAddress] = breakpointTypeReturn
}
if newFunc.setCallInstBreakpoints {
if err := c.setCallInstBreakpoints(goRoutineID, newFunc.StartAddr); err != nil {
return nil, err
}
}
return append(callingFuncs, newFunc), nil
}
func (c *Controller) handleTrapAtDeferredFuncCall(threadID int, goRoutineInfo tracee.GoRoutineInfo) error {
if err := c.handleTrapAtFunctionCall(threadID, goRoutineInfo.CurrentPC-1, goRoutineInfo); err != nil {
return err
}
return c.breakpoints.ClearConditional(goRoutineInfo.CurrentPC-1, goRoutineInfo.ID)
}
func (c *Controller) handleTrapAfterFunctionReturn(threadID int, goRoutineInfo tracee.GoRoutineInfo) error {
status, _ := c.statusStore[goRoutineInfo.ID]
remainingFuncs, unwindedFuncs, err := c.unwindFunctions(status.callingFunctions, goRoutineInfo)
if err != nil {
return err
}
returnedFunc := unwindedFuncs[0].Function
currStackDepth := len(remainingFuncs) + 1 // include returnedFunc for now
if goRoutineInfo.Panicking && goRoutineInfo.PanicHandler != nil {
currStackDepth -= c.countSkippedFuncs(remainingFuncs, goRoutineInfo.PanicHandler.UsedStackSizeAtDefer)
}
if currStackDepth <= c.traceLevel && c.printableFunc(returnedFunc) {
prevStackFrame, err := c.prevStackFrame(goRoutineInfo, returnedFunc.StartAddr)
if err != nil {
return err
}
if err := c.printFunctionOutput(goRoutineInfo.ID, prevStackFrame, currStackDepth); err != nil {
return err
}
}
if err := c.process.SingleStep(threadID, goRoutineInfo.CurrentPC-1); err != nil {
return err
}
c.statusStore[goRoutineInfo.ID] = goRoutineStatus{callingFunctions: remainingFuncs}
return nil
}
// It must be called at the beginning of the function due to the StackFrameAt's constraint.
func (c *Controller) currentStackFrame(goRoutineInfo tracee.GoRoutineInfo) (*tracee.StackFrame, error) {
return c.process.StackFrameAt(goRoutineInfo.CurrentStackAddr, goRoutineInfo.CurrentPC)
}
// It must be called at return address due to the StackFrameAt's constraint.
func (c *Controller) prevStackFrame(goRoutineInfo tracee.GoRoutineInfo, rip uint64) (*tracee.StackFrame, error) {
return c.process.StackFrameAt(goRoutineInfo.CurrentStackAddr-8, rip)
}
func (c *Controller) printableFunc(f *tracee.Function) bool {
const runtimePkgPrefix = "runtime."
if strings.HasPrefix(f.Name, runtimePkgPrefix) {
// it may be ok to print runtime unexported functions, but
// these functions tend to be verbose and confusing.
return f.IsExported()
}
return true
}
func (c *Controller) printFunctionInput(goRoutineID int64, stackFrame *tracee.StackFrame, depth int) error {
var args []string
//for _, arg := range stackFrame.InputArguments {
// args = append(args, arg.ParseValue(c.parseLevel))
//}
fmt.Fprintf(c.outputWriter, "%s\\ (#%02d) %s(%s)\n", strings.Repeat("|", depth-1), goRoutineID, stackFrame.Function.Name, strings.Join(args, ", "))
return nil
}
func (c *Controller) printFunctionOutput(goRoutineID int64, stackFrame *tracee.StackFrame, depth int) error {
var args []string
for _, arg := range stackFrame.OutputArguments {
args = append(args, arg.ParseValue(c.parseLevel))
}
fmt.Fprintf(c.outputWriter, "%s/ (#%02d) %s() (%s)\n", strings.Repeat("|", depth-1), goRoutineID, stackFrame.Function.Name, strings.Join(args, ", "))
return nil
}
func (c *Controller) findCallInstAddresses(f *tracee.Function) ([]uint64, error) {
// this cache is not only efficient, but required because there are no call insts if breakpoints are set.
if cache, ok := c.callInstAddrCache[f.StartAddr]; ok {
return cache, nil
}
insts, err := c.process.ReadInstructions(f)
if err != nil {
return nil, err
}
var pos int
var addresses []uint64
for _, inst := range insts {
if inst.Op == x86asm.CALL || inst.Op == x86asm.LCALL {
addresses = append(addresses, f.StartAddr+uint64(pos))
}
pos += inst.Len
}
c.callInstAddrCache[f.StartAddr] = addresses
return addresses, nil
}
// Interrupt interrupts the main loop.
func (c *Controller) Interrupt() {
c.interruptCh <- true
}
|
package azure_test
import (
"errors"
"github.com/genevieve/leftovers/azure"
"github.com/genevieve/leftovers/azure/fakes"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Groups", func() {
var (
client *fakes.GroupsClient
logger *fakes.Logger
filter string
groups azure.Groups
)
BeforeEach(func() {
client = &fakes.GroupsClient{}
logger = &fakes.Logger{}
filter = "banana"
groups = azure.NewGroups(client, logger)
})
Describe("List", func() {
BeforeEach(func() {
logger.PromptWithDetailsCall.Returns.Proceed = true
client.ListGroupsCall.Returns.Groups = []string{"banana-group", "kiwi-group"}
})
It("returns a list of resource groups to delete", func() {
items, err := groups.List(filter, false)
Expect(err).NotTo(HaveOccurred())
Expect(client.ListGroupsCall.CallCount).To(Equal(1))
Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(1))
Expect(items).To(HaveLen(1))
})
Context("when client fails to list resource groups", func() {
BeforeEach(func() {
client.ListGroupsCall.Returns.Err = errors.New("some error")
})
It("returns the error", func() {
_, err := groups.List(filter, false)
Expect(err).To(MatchError("Listing Resource Groups: some error"))
})
})
Context("when the user responds no to the prompt", func() {
BeforeEach(func() {
logger.PromptWithDetailsCall.Returns.Proceed = false
})
It("does not return it in the list", func() {
items, err := groups.List(filter, false)
Expect(err).NotTo(HaveOccurred())
Expect(logger.PromptWithDetailsCall.Receives.ResourceType).To(Equal("Resource Group"))
Expect(logger.PromptWithDetailsCall.Receives.ResourceName).To(Equal("banana-group"))
Expect(items).To(HaveLen(0))
})
})
Context("when the resource group name does not contain the filter", func() {
It("does not return it in the list", func() {
items, err := groups.List("grape", false)
Expect(err).NotTo(HaveOccurred())
Expect(logger.PromptWithDetailsCall.CallCount).To(Equal(0))
Expect(items).To(HaveLen(0))
})
})
})
})
|
package httpx
import (
"context"
"fmt"
"io"
"net"
"net/http"
"strings"
"time"
)
// Decorator wraps/decorate a http.Handler with additional functionality.
type Decorator func(http.Handler) http.Handler
// AddHeaderDecorator returns a decorator that adds the given header to the HTTP response.
func AddHeaderDecorator(key, value string) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(key, value)
h.ServeHTTP(w, r)
})
}
}
// SetHeaderDecorator returns a decorator that sets the given header to the HTTP response.
func SetHeaderDecorator(key, value string) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set(key, value)
h.ServeHTTP(w, r)
})
}
}
// CheckHeaderDecorator returns a decorator that checks if the given request header
// matches the given value, if the header does not exist or doesn't match then
// respond with the provided status code header and its value as content.
func CheckHeaderDecorator(headerName, headerValue string, statusCode int) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
value := r.Header.Get(headerName)
if value != headerValue {
w.WriteHeader(statusCode)
// we don't care about the error if we can't write
_, _ = w.Write([]byte(http.StatusText(statusCode)))
return
}
h.ServeHTTP(w, r)
})
}
}
// RootDecorator decorates a handler to distinguish root path from 404s
// ServeMux matches "/" for both, root path and all unmatched URLs
// How to bypass: https://golang.org/pkg/net/http/#example_ServeMux_Handle
func RootDecorator() Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/" {
http.NotFound(w, r)
return
}
h.ServeHTTP(w, r)
})
}
}
// StripPrefixDecorator removes prefix from URL.
func StripPrefixDecorator(prefix string) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) {
r.URL.Path = p
h.ServeHTTP(w, r)
} else {
http.NotFound(w, r)
}
})
}
}
// EnableCORSDecorator adds required response headers to enable CORS and serves OPTIONS requests.
func EnableCORSDecorator() Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "GET,POST,PUT,PATCH,DELETE,HEAD,OPTIONS")
w.Header().Set("Access-Control-Allow-Headers", "Origin,Accept,Content-Type,Authorization")
// Stop here if its Preflighted OPTIONS request
if r.Method == "OPTIONS" {
w.WriteHeader(http.StatusOK)
return
}
h.ServeHTTP(w, r)
})
}
}
// Condition represents a condition based on the http.Request and the current state of the http.ResponseWriter
type Condition func(w http.ResponseWriter, r *http.Request) bool
// IfDecorator is a special adapter that will skip to the 'then' handler if a condition
// applies at runtime, or pass the control to the adapted handler otherwise.
func IfDecorator(cond Condition, then http.Handler) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cond(w, r) {
then.ServeHTTP(w, r)
} else {
h.ServeHTTP(w, r)
}
})
}
}
// TimeoutDecorator returns a adapter which adds a timeout to the context.
// Child handlers have the responsibility to obey the context deadline
func TimeoutDecorator(timeout time.Duration) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx, cancel := context.WithTimeout(r.Context(), timeout)
defer cancel()
r = r.WithContext(ctx)
h.ServeHTTP(w, r)
})
}
}
// LoggingDecorator returns an adapter that log requests to a given logger
func LoggingDecorator(logWriter io.Writer) Decorator {
return func(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
resLogger := &responseLogger{w, 0, 0}
h.ServeHTTP(resLogger, req)
_, _ = fmt.Fprintln(logWriter, formatLogLine(req, time.Now(), resLogger.Status(), resLogger.Size()))
})
}
}
func formatLogLine(req *http.Request, ts time.Time, status int, size int) string {
host, _, err := net.SplitHostPort(req.RemoteAddr)
if err != nil {
host = req.RemoteAddr
}
uri := req.URL.RequestURI()
formattedTime := ts.Format("02/Jan/2006:15:04:05 -0700")
return fmt.Sprintf("%s [%s] %s %s %s %d %d", host, formattedTime, req.Method, uri, req.Proto, status, size)
}
// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP
// status code and body size
type responseLogger struct {
http.ResponseWriter
status int
size int
}
func (l *responseLogger) Write(b []byte) (int, error) {
if l.status == 0 {
// The status will be StatusOK if WriteHeader has not been called yet
l.status = http.StatusOK
}
size, err := l.ResponseWriter.Write(b)
l.size += size
return size, err
}
func (l *responseLogger) WriteHeader(s int) {
l.ResponseWriter.WriteHeader(s)
l.status = s
}
func (l responseLogger) Status() int {
return l.status
}
func (l responseLogger) Size() int {
return l.size
}
|
package main
import (
"encoding/json"
"io"
"net/http"
"go.uber.org/zap"
)
// Colors is response of api
type Colors struct {
Colors []struct {
Value string `json:"value"`
} `json:"colors"`
}
func decodeBody(body io.Reader, out interface{}) error {
decoder := json.NewDecoder(body)
return decoder.Decode(out)
}
// Get is function of example of http.Get
func Get(url string) {
var colors Colors
resp, err := http.Get(url)
_ = err
defer resp.Body.Close()
err = decodeBody(resp.Body, &colors)
_ = err
logger, _ := zap.NewDevelopment()
logger.Info("get colors", zap.Reflect("colors", colors))
}
func main() {
url := "https://api.noopschallenge.com/hexbot"
Get(url)
}
|
package main
import "fmt"
func main() {
s := `Lorem Ipsum is
simply "dummy text"
of the printing
and typesetting industry.`
fmt.Println(s)
}
|
/*
Copyright © 2020 Yueming Xu <yxu@tibco.com>
This file is subject to the license terms contained in the license file that is distributed with this file.
*/
package main
import "github.com/yxuco/tcmdtool/cmd"
func main() {
cmd.Execute()
}
|
// Copyright 2019 Blues Inc. All rights reserved.
// Use of this source code is governed by licenses granted by the
// copyright holder including that found in the LICENSE file.
package noteutil
import (
"encoding/json"
"flag"
"fmt"
"github.com/blues/note-go/notecard"
"github.com/blues/note-go/notehub"
"io/ioutil"
"math/rand"
"os"
"strconv"
"time"
)
// ConfigSettings defines the config file that maintains the command processor's state
type ConfigSettings struct {
When string `json:"when,omitempty"`
Hub string `json:"hub,omitempty"`
App string `json:"app,omitempty"`
Device string `json:"device,omitempty"`
Product string `json:"product,omitempty"`
Root string `json:"root,omitempty"`
Cert string `json:"cert,omitempty"`
Key string `json:"key,omitempty"`
Secure bool `json:"secure,omitempty"`
Interface string `json:"interface,omitempty"`
Port string `json:"port,omitempty"`
PortConfig int `json:"port_config,omitempty"`
}
// Config is the active copy of our configuration file, never dirty.
var flagConfigReset bool
var flagConfigSave bool
var flagConfigHTTP bool
var flagConfigHTTPS bool
var flagConfig ConfigSettings
// Config are the master config settings
var Config ConfigSettings
// ConfigRead reads the current info from config file
func ConfigRead() error {
// As a convenience to all tools, generate a new random seed for each iteration
rand.Seed(time.Now().UnixNano())
rand.Seed(rand.Int63() ^ time.Now().UnixNano())
// Read the config file
contents, err := ioutil.ReadFile(configSettingsPath())
if os.IsNotExist(err) {
ConfigReset()
err = nil
} else if err == nil {
err = json.Unmarshal(contents, &Config)
if err != nil || Config.When == "" {
ConfigReset()
if err != nil {
err = fmt.Errorf("can't read configuration: %s", err)
}
}
}
return err
}
// ConfigWrite updates the file with the current config info
func ConfigWrite() error {
// Marshal it
configJSON, _ := json.MarshalIndent(Config, "", " ")
// Write the file
fd, err := os.OpenFile(configSettingsPath(), os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0666)
if err != nil {
return err
}
fd.Write(configJSON)
fd.Close()
// Done
return err
}
// Reset the comms to default
func configResetInterface() {
Config.Interface = "serial"
Config.Port, Config.PortConfig = notecard.SerialDefaults()
}
// ConfigReset updates the file with the default info
func ConfigReset() {
configResetInterface()
Config.Hub = notehub.DefaultAPIService
Config.When = time.Now().UTC().Format("2006-01-02T15:04:05Z")
return
}
// ConfigShow displays all current config parameters
func ConfigShow() error {
fmt.Printf("\nCurrently saved values:\n")
if Config.Secure {
fmt.Printf(" -https\n")
} else {
fmt.Printf(" -http\n")
}
if Config.Hub != "" && Config.Hub != notehub.DefaultAPIService {
fmt.Printf(" -hub %s\n", Config.Hub)
}
if Config.App != "" {
fmt.Printf(" -app %s\n", Config.App)
}
if Config.Product != "" {
fmt.Printf(" -product %s\n", Config.Product)
}
if Config.Device != "" {
fmt.Printf(" -device %s\n", Config.Device)
}
if Config.Root != "" {
fmt.Printf(" -root %s\n", Config.Root)
}
if Config.Cert != "" {
fmt.Printf(" -cert %s\n", Config.Cert)
}
if Config.Key != "" {
fmt.Printf(" -key %s\n", Config.Key)
}
if Config.Interface != "" {
fmt.Printf(" -interface %s\n", Config.Interface)
if Config.Port == "" {
fmt.Printf(" -port -\n")
fmt.Printf(" -portconfig -\n")
} else {
fmt.Printf(" -port %s\n", Config.Port)
fmt.Printf(" -portconfig %d\n", Config.PortConfig)
}
}
return nil
}
// ConfigFlagsProcess processes the registered config flags
func ConfigFlagsProcess() (err error) {
// Read if not yet read
if Config.When == "" {
err = ConfigRead()
if err != nil {
return
}
}
// Reset if requested
if flagConfigReset {
ConfigReset()
}
// Set the flags as desired
if flagConfigHTTP {
Config.Secure = false
}
if flagConfigHTTPS {
Config.Secure = true
}
if flagConfig.Hub == "-" {
Config.Hub = notehub.DefaultAPIService
} else if flagConfig.Hub != "" {
Config.Hub = flagConfig.Hub
}
if flagConfig.Root == "-" {
Config.Root = ""
} else if flagConfig.Root != "" {
Config.Root = flagConfig.Root
}
if flagConfig.Key == "-" {
Config.Key = ""
} else if flagConfig.Key != "" {
Config.Key = flagConfig.Key
}
if flagConfig.Cert == "-" {
Config.Cert = ""
} else if flagConfig.Cert != "" {
Config.Cert = flagConfig.Cert
}
if flagConfig.App == "-" {
Config.App = ""
} else if flagConfig.App != "" {
Config.App = flagConfig.App
}
if flagConfig.Device == "-" {
Config.Device = ""
} else if flagConfig.Device != "" {
Config.Device = flagConfig.Device
}
if flagConfig.Product == "-" {
Config.Product = ""
} else if flagConfig.Product != "" {
Config.Product = flagConfig.Product
}
if flagConfig.Interface == "-" {
configResetInterface()
} else if flagConfig.Interface != "" {
Config.Interface = flagConfig.Interface
}
if flagConfig.Port != "" {
Config.Port = flagConfig.Port
}
if flagConfig.PortConfig != -1 {
Config.PortConfig = flagConfig.PortConfig
}
// Save if requested
if flagConfigSave {
ConfigWrite()
ConfigShow()
}
// Override, just for this session, with env vars
str := os.Getenv("NOTE_INTERFACE")
if str != "" {
Config.Interface = str
}
str = os.Getenv("NOTE_PORT")
if str != "" {
Config.Port = str
str := os.Getenv("NOTE_PORT_CONFIG")
strint, err2 := strconv.Atoi(str)
if err2 != nil {
strint = Config.PortConfig
}
Config.PortConfig = strint
}
// Done
return nil
}
// ConfigFlagsRegister registers the config-related flags
func ConfigFlagsRegister() {
// Start by setting to default if requested
flag.BoolVar(&flagConfigReset, "config-reset", false, "reset the note tool config to its defaults")
// Process the commands
flag.StringVar(&flagConfig.Interface, "interface", "", "select 'serial' or 'i2c' interface")
flag.StringVar(&flagConfig.Port, "port", "", "select serial or i2c port")
flag.IntVar(&flagConfig.PortConfig, "portconfig", -1, "set serial device speed or i2c address")
flag.BoolVar(&flagConfigHTTP, "http", false, "use http instead of https")
flag.BoolVar(&flagConfigHTTPS, "https", false, "use https instead of http")
flag.StringVar(&flagConfig.Hub, "hub", "", "set notehub command service URL")
flag.StringVar(&flagConfig.Device, "device", "", "set DeviceUID")
flag.StringVar(&flagConfig.Product, "product", "", "set ProductUID")
flag.StringVar(&flagConfig.App, "app", "", "set AppUID (the Project UID)")
flag.StringVar(&flagConfig.Root, "root", "", "set path to service's root CA certificate file")
flag.StringVar(&flagConfig.Key, "key", "", "set path to local private key file")
flag.StringVar(&flagConfig.Cert, "cert", "", "set path to local cert file")
// Write the config if asked to do so
flag.BoolVar(&flagConfigSave, "config-save", false, "save changes to note tool config")
}
// FlagParse is a wrapper around flag.Parse that handles our config flags
func FlagParse() (err error) {
ConfigFlagsRegister()
flag.Parse()
return ConfigFlagsProcess()
}
|
// Copyright 2017 YTD Authors. All rights reserved.
// Use of this source code is governed by a MIT-style
// license that can be found in the LICENSE file.
// ytd program entry.
package main
import (
"flag"
"fmt"
_ "net/http/pprof"
"os"
"runtime"
"runtime/pprof"
"strings"
"github.com/Ch3ck/youtube-dl/api"
"github.com/Sirupsen/logrus"
)
const (
//BANNER for ytd which prints the help info
BANNER = "ytd -ids 'videoId,videoId2' -format mp3 -bitrate 123 -path ~/Downloads/ videoUrl %s\n"
//VERSION which prints the ytd version.
VERSION = "v0.1"
)
var (
ids string
version bool
format string
path string
bitrate uint
)
const (
defaultMaxDownloads = 5
)
var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file")
func init() {
// parse flags
flag.StringVar(&ids, "ids", "", "Youtube Video IDs. Separated then by using a comma.")
flag.StringVar(&format, "format", "", "File Format(mp3, webm, flv)")
flag.StringVar(&path, "path", ".", "Output Path")
flag.BoolVar(&version, "version", false, "print version and exit")
flag.UintVar(&bitrate, "bitrate", 192, "Audio Bitrate")
flag.Usage = func() {
fmt.Fprint(os.Stderr, fmt.Sprintf(BANNER, VERSION))
flag.PrintDefaults()
}
flag.Parse()
if version {
logrus.Infof("%s", VERSION)
os.Exit(0)
}
}
func main() {
if *cpuprofile != "" {
f, err := os.Create(*cpuprofile)
if err != nil {
logrus.Fatalf("%v", err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
runtime.SetBlockProfileRate(20)
if path == "" {
path, _ = os.Getwd()
}
if len(os.Args) == 1 {
usageAndExit(BANNER, -1)
}
//Get Video Id
if ids == "" {
url := os.Args[1]
startProcessing([]string{url})
} else {
startProcessing(strings.Split(ids, ","))
}
}
func startProcessing(urls []string) {
ch := api.DownloadStreams(defaultMaxDownloads, format, path, bitrate, urls)
for err := range ch {
//Extract Video data and decode
if err != nil {
logrus.Errorf("Error decoding Video stream: %v", err)
}
}
}
func usageAndExit(message string, exitCode int) {
if message != "" {
fmt.Fprintf(os.Stderr, message)
fmt.Fprintf(os.Stderr, "\n\n")
}
flag.Usage()
fmt.Fprintf(os.Stderr, "\n")
os.Exit(exitCode)
}
|
package lfsapi
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
var testCert = `-----BEGIN CERTIFICATE-----
MIIDyjCCArKgAwIBAgIJAMi9TouXnW+ZMA0GCSqGSIb3DQEBBQUAMEwxCzAJBgNV
BAYTAlVTMRMwEQYDVQQIEwpTb21lLVN0YXRlMRAwDgYDVQQKEwdnaXQtbGZzMRYw
FAYDVQQDEw1naXQtbGZzLmxvY2FsMB4XDTE2MDMwOTEwNTk1NFoXDTI2MDMwNzEw
NTk1NFowTDELMAkGA1UEBhMCVVMxEzARBgNVBAgTClNvbWUtU3RhdGUxEDAOBgNV
BAoTB2dpdC1sZnMxFjAUBgNVBAMTDWdpdC1sZnMubG9jYWwwggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQCXmsI2w44nOsP7n3kL1Lz04U5FMZRErBSXLOE+
dpd4tMpgrjOncJPD9NapHabsVIOnuVvMDuBbWYwU9PwbN4tjQzch8DRxBju6fCp/
Pm+QF6p2Ga+NuSHWoVfNFuF2776aF9gSLC0rFnBekD3HCz+h6I5HFgHBvRjeVyAs
PRw471Y28Je609SoYugxaQNzRvahP0Qf43tE74/WN3FTGXy1+iU+uXpfp8KxnsuB
gfj+Wi6mPt8Q2utcA1j82dJ0K8ZbHSbllzmI+N/UuRLsbTUEdeFWYdZ0AlZNd/Vc
PlOSeoExwvOHIuUasT/cLIrEkdXNud2QLg2GpsB6fJi3NEUhAgMBAAGjga4wgasw
HQYDVR0OBBYEFC8oVPRQbekTwfkntgdL7PADXNDbMHwGA1UdIwR1MHOAFC8oVPRQ
bekTwfkntgdL7PADXNDboVCkTjBMMQswCQYDVQQGEwJVUzETMBEGA1UECBMKU29t
ZS1TdGF0ZTEQMA4GA1UEChMHZ2l0LWxmczEWMBQGA1UEAxMNZ2l0LWxmcy5sb2Nh
bIIJAMi9TouXnW+ZMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBACIl
/CBLIhC3drrYme4cGArhWyXIyRpMoy9Z+9Dru8rSuOr/RXR6sbYhlE1iMGg4GsP8
4Cj7aIct6Vb9NFv5bGNyFJAmDesm3SZlEcWxU3YBzNPiJXGiUpQHCkp0BH+gvsXc
tb58XoiDZPVqrl0jNfX/nHpHR9c3DaI3Tjx0F/No0ZM6mLQ1cNMikFyEWQ4U0zmW
LvV+vvKuOixRqbcVnB5iTxqMwFG0X3tUql0cftGBgoCoR1+FSBOs0EXLODCck6ql
aW6vZwkA+ccj/pDTx8LBe2lnpatrFeIt6znAUJW3G8r6SFHKVBWHwmESZS4kxhjx
NpW5Hh0w4/5iIetCkJ0=
-----END CERTIFICATE-----`
var sslCAInfoConfigHostNames = []string{
"git-lfs.local",
"git-lfs.local/",
}
var sslCAInfoMatchedHostTests = []struct {
hostName string
shouldMatch bool
}{
{"git-lfs.local", true},
{"git-lfs.local:8443", false},
{"wronghost.com", false},
}
func TestCertFromSSLCAInfoConfig(t *testing.T) {
tempfile, err := ioutil.TempFile("", "testcert")
assert.Nil(t, err, "Error creating temp cert file")
defer os.Remove(tempfile.Name())
_, err = tempfile.WriteString(testCert)
assert.Nil(t, err, "Error writing temp cert file")
tempfile.Close()
// Test http.<url>.sslcainfo
for _, hostName := range sslCAInfoConfigHostNames {
hostKey := fmt.Sprintf("http.https://%v.sslcainfo", hostName)
c, err := NewClient(nil, UniqTestEnv(map[string]string{
hostKey: tempfile.Name(),
}))
assert.Nil(t, err)
for _, matchedHostTest := range sslCAInfoMatchedHostTests {
pool := getRootCAsForHost(c, matchedHostTest.hostName)
var shouldOrShouldnt string
if matchedHostTest.shouldMatch {
shouldOrShouldnt = "should"
} else {
shouldOrShouldnt = "should not"
}
assert.Equal(t, matchedHostTest.shouldMatch, pool != nil,
"Cert lookup for \"%v\" %v have succeeded with \"%v\"",
matchedHostTest.hostName, shouldOrShouldnt, hostKey)
}
}
// Test http.sslcainfo
c, err := NewClient(nil, UniqTestEnv(map[string]string{
"http.sslcainfo": tempfile.Name(),
}))
assert.Nil(t, err)
// Should match any host at all
for _, matchedHostTest := range sslCAInfoMatchedHostTests {
pool := getRootCAsForHost(c, matchedHostTest.hostName)
assert.NotNil(t, pool)
}
}
func TestCertFromSSLCAInfoEnv(t *testing.T) {
tempfile, err := ioutil.TempFile("", "testcert")
assert.Nil(t, err, "Error creating temp cert file")
defer os.Remove(tempfile.Name())
_, err = tempfile.WriteString(testCert)
assert.Nil(t, err, "Error writing temp cert file")
tempfile.Close()
c, err := NewClient(UniqTestEnv(map[string]string{
"GIT_SSL_CAINFO": tempfile.Name(),
}), nil)
assert.Nil(t, err)
// Should match any host at all
for _, matchedHostTest := range sslCAInfoMatchedHostTests {
pool := getRootCAsForHost(c, matchedHostTest.hostName)
assert.NotNil(t, pool)
}
}
func TestCertFromSSLCAPathConfig(t *testing.T) {
tempdir, err := ioutil.TempDir("", "testcertdir")
assert.Nil(t, err, "Error creating temp cert dir")
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644)
assert.Nil(t, err, "Error creating cert file")
c, err := NewClient(nil, UniqTestEnv(map[string]string{
"http.sslcapath": tempdir,
}))
assert.Nil(t, err)
// Should match any host at all
for _, matchedHostTest := range sslCAInfoMatchedHostTests {
pool := getRootCAsForHost(c, matchedHostTest.hostName)
assert.NotNil(t, pool)
}
}
func TestCertFromSSLCAPathEnv(t *testing.T) {
tempdir, err := ioutil.TempDir("", "testcertdir")
assert.Nil(t, err, "Error creating temp cert dir")
defer os.RemoveAll(tempdir)
err = ioutil.WriteFile(filepath.Join(tempdir, "cert1.pem"), []byte(testCert), 0644)
assert.Nil(t, err, "Error creating cert file")
c, err := NewClient(UniqTestEnv(map[string]string{
"GIT_SSL_CAPATH": tempdir,
}), nil)
assert.Nil(t, err)
// Should match any host at all
for _, matchedHostTest := range sslCAInfoMatchedHostTests {
pool := getRootCAsForHost(c, matchedHostTest.hostName)
assert.NotNil(t, pool)
}
}
func TestCertVerifyDisabledGlobalEnv(t *testing.T) {
empty := &Client{}
httpClient := empty.httpClient("anyhost.com")
tr, ok := httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.False(t, tr.TLSClientConfig.InsecureSkipVerify)
}
c, err := NewClient(UniqTestEnv(map[string]string{
"GIT_SSL_NO_VERIFY": "1",
}), nil)
assert.Nil(t, err)
httpClient = c.httpClient("anyhost.com")
tr, ok = httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.True(t, tr.TLSClientConfig.InsecureSkipVerify)
}
}
func TestCertVerifyDisabledGlobalConfig(t *testing.T) {
def := &Client{}
httpClient := def.httpClient("anyhost.com")
tr, ok := httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.False(t, tr.TLSClientConfig.InsecureSkipVerify)
}
c, err := NewClient(nil, UniqTestEnv(map[string]string{
"http.sslverify": "false",
}))
assert.Nil(t, err)
httpClient = c.httpClient("anyhost.com")
tr, ok = httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.True(t, tr.TLSClientConfig.InsecureSkipVerify)
}
}
func TestCertVerifyDisabledHostConfig(t *testing.T) {
def := &Client{}
httpClient := def.httpClient("specifichost.com")
tr, ok := httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.False(t, tr.TLSClientConfig.InsecureSkipVerify)
}
httpClient = def.httpClient("otherhost.com")
tr, ok = httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.False(t, tr.TLSClientConfig.InsecureSkipVerify)
}
c, err := NewClient(nil, UniqTestEnv(map[string]string{
"http.https://specifichost.com/.sslverify": "false",
}))
assert.Nil(t, err)
httpClient = c.httpClient("specifichost.com")
tr, ok = httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.True(t, tr.TLSClientConfig.InsecureSkipVerify)
}
httpClient = c.httpClient("otherhost.com")
tr, ok = httpClient.Transport.(*http.Transport)
if assert.True(t, ok) {
assert.False(t, tr.TLSClientConfig.InsecureSkipVerify)
}
}
|
package most_common_word
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_mostCommonWord(t *testing.T) {
tests := []struct {
paragraph string
banned []string
want string
}{
{
paragraph: "Bob hit a ball, the hit BALL flew far after it was hit.",
banned: []string{"hit"},
want: "ball",
},
{
paragraph: "a.",
banned: []string{},
want: "a",
},
}
for _, tt := range tests {
t.Run("", func(t *testing.T) {
got := mostCommonWord(tt.paragraph, tt.banned)
assert.Equal(t, got, tt.want)
})
}
}
|
package cparser
import (
"sync"
"ntoolkit/commands"
"ntoolkit/errors"
"ntoolkit/parser/tools"
)
// CommandParser is a high level interface for dispatching text commands.
type CommandParser struct {
Commands *commands.Commands
blockParser *tools.BlockParser
factory []CommandFactory
}
// New returns a new command cparser with the attached commands object.
// If no commands object is supplied a new blank on is created.
func New(cmd ...*commands.Commands) *CommandParser {
var commander *commands.Commands
if len(cmd) > 0 {
commander = cmd[0]
}
if commander == nil {
commander = commands.New()
}
return &CommandParser{
Commands: commander,
blockParser: tools.NewBlockParser(),
factory: make([]CommandFactory, 0) }
}
func (p *CommandParser) Execute(command string, context interface{}) (promise *DeferredCommand) {
defer (func() {
r := recover()
if r != nil {
err := r.(error)
p.failed(errors.Fail(ErrCommandFailed{}, err, err.Error()))
}
})()
p.blockParser.Parse(command)
tokens, err := p.blockParser.Finished()
if err != nil {
return p.failed(errors.Fail(ErrBadSyntax{}, err, "Invalid command string"))
}
for i := range p.factory {
cmd, err := p.factory[i].Parse(tokens, context)
if err != nil {
return p.failed(errors.Fail(ErrCommandFailed{}, err, "Command syntax error"))
}
if cmd != nil {
rtn := &DeferredCommand{}
p.Commands.Execute(cmd).Then(func() {
rtn.Resolve(cmd)
}, func(err error) {
rtn.Reject(errors.Fail(ErrCommandFailed{}, err, "Command failed to execute"))
})
return rtn
}
}
return p.failed(errors.Fail(ErrNoHandler{}, nil, "No handler supported the given command"))
}
// Wait for an executed command to resolve and return nil or the error.
func (p *CommandParser) Wait(command string, context interface{}) (commands.Command, error) {
wg := &sync.WaitGroup{}
wg.Add(1)
var err error
var cmd commands.Command
p.Execute(command, context).Then(func(c commands.Command) {
cmd = c
wg.Done()
}, func(errRtn error) {
err = errRtn
wg.Done()
})
wg.Wait()
return cmd, err
}
// Register a new command factory to handle some kind of input.
func (p *CommandParser) Register(factory CommandFactory) {
p.factory = append(p.factory, factory)
}
// Command returns a new standard command factory; you can use .Word() and .Token()
// on the returned object, or just pass in args; "go" -> Word() and "[name]" -> Token().
func (p *CommandParser) Command(words ...string) *StandardCommandFactory {
factory := newStandardCommandFactory()
for i := range words {
word := words[i]
if len(word) > 2 && word[0] == '[' && word[len(word)-1] == ']' {
factory.Token(word[1:len(word)-1])
} else {
factory.Word(word)
}
}
return factory
}
func (p *CommandParser) failed(err error) *DeferredCommand {
rtn := &DeferredCommand{}
rtn.Reject(err)
return rtn
}
|
package benchcore
import (
"crypto/tls"
"fmt"
"io"
"io/ioutil"
"net/http"
"sync"
"sync/atomic"
"time"
)
type BenchCore struct {
Request *http.Request // go-http request struct
Concurrency int // 并发数
start time.Time // 开始时间
end time.Time // 结束时间
benchDuration time.Duration // 时间模式:压测总时间
benchNumber uint64 // 数量模式:压测总量
httpCodeStat map[int]int // 错误码统计: http-code -> count
httpCodeStatMu sync.RWMutex // 错误码统计读写锁保护
errorCountStat map[string]int // 常规错误统计: error -> count
errorCountStatMu sync.RWMutex // 常规错误统计读写锁保护
clients chan *http.Client // http-client连接池
reqErrCount uint64 // 请求错误数
reqDoneCount uint64 // 请求完成数
wg sync.WaitGroup // 使用go-WaitGroup同步所有协程任务
}
// 压测结果
type BenchResult struct {
TotalDuration time.Duration // 耗时
RequestCount uint64 // 请求量
CodeStat map[int]int // http错误码统计
Errors map[string]int // 常规错误统计
}
// 请求量模式:发送指定数量的请求
func (b *BenchCore) SendCountMod(n uint64) *BenchResult {
b.benchNumber = n
b.init()
return b.benchmark()
}
// 时间模式:持续发送一定时间的请求
func (b *BenchCore) SendDurationMod(t time.Duration) *BenchResult {
b.benchDuration = t
b.init()
return b.benchmark()
}
func (b *BenchCore) init() {
b.httpCodeStat = make(map[int]int) // http错误码统计
b.errorCountStat = make(map[string]int) // 错误统计
if b.Request == nil {
panic("Request is nil")
}
}
// 主逻辑:发送并统计数据
func (b *BenchCore) benchmark() *BenchResult {
// 创建连接池
b.createHttpClients()
// 开始压测
fmt.Printf("\n\n玩命测试中,你等会...\n")
{
/* 怼它 */
b.start = time.Now()
b.sendRequests()
b.end = time.Now()
}
// fmt.Printf("start_time:%d end_time:%d duration:%d", b.start, b.end, b.end.Sub(b.start))
// 怼完:统计数据
return b.genBenchResult()
}
func (b *BenchCore) genBenchResult() *BenchResult {
return &BenchResult{
TotalDuration: b.end.Sub(b.start), // 计算耗时
RequestCount: b.reqDoneCount,
CodeStat: b.httpCodeStat,
Errors: b.errorCountStat,
}
}
// 外部接口:打印压测结果
func (r *BenchResult) PrintResults() {
var respTotal int = 0
var errorTotal int = 0
for i := range r.CodeStat {
respTotal += r.CodeStat[i]
}
for i := range r.Errors {
errorTotal += r.Errors[i]
}
fmt.Printf("#############################################################################################################\n")
fmt.Printf(" 耗时: %0.3fs\n", r.TotalDuration.Seconds())
fmt.Printf(" 总请求数: %d (%0.1f/s) (%0.5fs/r)\n",
r.RequestCount,
float64(r.RequestCount)/r.TotalDuration.Seconds(),
r.TotalDuration.Seconds()/float64(r.RequestCount),
)
fmt.Printf(" 总响应数: %d (%0.1f/s) (%0.5fs/r)\n",
respTotal,
float64(respTotal)/r.TotalDuration.Seconds(),
r.TotalDuration.Seconds()/float64(respTotal),
)
fmt.Printf(" 错误: %d\n", errorTotal)
fmt.Printf(" HTTP错误码分布(code->次数):\n")
for code, count := range r.CodeStat {
fmt.Printf(" [http-%d]: %d\n", code, count)
}
fmt.Printf("常规错误码分布(次数->错误详细):\n")
for err, count := range r.Errors {
fmt.Printf(" %d 次:%s\n", count, err)
}
fmt.Printf("#############################################################################################################\n\n")
}
// 单一发包
func (b *BenchCore) packOne(c *http.Client) {
defer func() {
b.clients <- c // 回收客户端
b.wg.Done() // 协程同步
}()
// 计算超时时间点
if b.benchDuration > 0 {
c.Timeout = b.benchDuration - time.Since(b.start)
}
response, err := c.Do(b.Request)
if err == nil {
// 为了复用tcp,将返回数据丢弃,注意:此处必须有读取操作,否则无法复用
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
}
if b.isTimeout() {
return
}
// 统计数据:原子/lock map[]
atomic.AddUint64(&b.reqDoneCount, 1)
if err != nil {
atomic.AddUint64(&b.reqErrCount, 1)
b.errorCountStatMu.Lock()
b.errorCountStat[err.Error()]++
b.errorCountStatMu.Unlock()
return
}
// 错误码统计
b.httpCodeStatMu.Lock()
b.httpCodeStat[response.StatusCode]++
b.httpCodeStatMu.Unlock()
}
// 批量发送请求
func (b *BenchCore) sendRequests() {
defer b.wg.Wait()
for n := uint64(0); (b.benchNumber == 0 || b.benchNumber > n) && !b.isTimeout(); n++ {
b.wg.Add(1)
go b.packOne(<-b.clients) // 新协程,旧连接
}
}
// 超时判断
func (b *BenchCore) isTimeout() bool {
return b.benchDuration != 0 && time.Since(b.start) > b.benchDuration
}
// go-http-client
func (b *BenchCore) createHttpClients() {
// 根据同时并发数创建
b.clients = make(chan *http.Client, b.Concurrency)
for i := 0; i < b.Concurrency; i++ {
// 创建client
b.clients <- &http.Client{
CheckRedirect: func(*http.Request, []*http.Request) error {
return fmt.Errorf("no redirects")
},
// 重要:复用连接
Transport: &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
DisableCompression: true,
DisableKeepAlives: false, // re-use tcp
},
}
}
}
|
package skeleton
type (
Context interface {
Valid() bool
GetString() string
}
)
|
package main
import "fmt"
func main() {//0 1 2 3 4 5 6 7 8 9
s := []int{10, 11, 12, 42, 43, 44, 45, 46, 47, 48,
// 10 11 12 13 14
49, 50, 51, 18, 19}
for i, v := range s {
fmt.Println(i, v)
}
fmt.Printf("The type is %T\n", s)
fmt.Println("Output should be: 42 43 44 45 46", s[3:8])
fmt.Println("Output should be: 47 48 49 50 51", s[8:13])
fmt.Println("Output should be: 44 45 46 47 48", s[5:10])
fmt.Println("Output should be: 43 44 45 46 47", s[4:9])
fmt.Println(s)
}
//Hands-on exercise #3
//Using the code from the previous example, use SLICING to create the following new slices which are then printed:
//[42 43 44 45 46]
//[47 48 49 50 51]
//[44 45 46 47 48]
//[43 44 45 46 47]
//solution: https://play.golang.org/p/SGfiULXzAB
//video: 073 |
package handlers
type BaseResponse struct {
StatusEvent string `json:"status_event"`
}
const (
GET = ".get"
POST = ".post"
PUT = ".put"
DELETE = ".delete"
CALLED = ".called"
SUCCESS = ".success"
STARTED = ".started"
PROCESS_REDELIVERY = ".process_redelivery"
DELETE_REGISTRATION = ".delete_registration"
NO_ENDPOINT = ".no_registered_endpoint"
HANDLE = ".handle"
DISPATCH = ".dispatch"
NOT_FOUND = ".not_found"
ERROR = ".server_error"
INVALID_REQUEST = ".request.invalid_request"
BAD_REQUEST = ".request.bad_request"
VALID_REQUEST = ".request.valid"
INVALID_TOKEN = ".auth.invalid_token"
NOT_AUTHORISED = ".auth.not_authorised"
TOKEN_OK = ".auth.token_ok"
HEALTH_HANDLER = "event_sauce.health"
EVENT_HANDLER = "event_sauce.event"
REGISTER_HANDLER = "event_sauce.register"
EVENT_QUEUE = "event_sauce.event_queue"
DEAD_LETTER_QUEUE = "event_sauce.dead_letter_queue"
WORKER = ".worker"
)
|
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information.
package sync2
import (
"context"
"runtime"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
)
func requireBlocked(t *testing.T, ch chan struct{}) {
select {
case <-ch:
t.Fatal("channel read or close when it should not have been")
default:
}
}
func TestReceiverClosableChan_Basic(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
ch := MakeReceiverClosableChan[int](3)
require.True(t, ch.BlockingSend(1))
require.True(t, ch.BlockingSend(2))
v, err := ch.Receive(ctx)
require.NoError(t, err)
require.Equal(t, v, 1)
v, err = ch.Receive(ctx)
require.NoError(t, err)
require.Equal(t, v, 2)
require.True(t, ch.BlockingSend(3))
require.True(t, ch.BlockingSend(4))
require.True(t, ch.BlockingSend(5))
v, err = ch.Receive(ctx)
require.NoError(t, err)
require.Equal(t, v, 3)
vs := ch.StopReceiving()
require.Equal(t, vs, []int{4, 5})
require.False(t, ch.BlockingSend(6))
}
func TestReceiverClosableChan_BlockingSend(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
ch := MakeReceiverClosableChan[int](3)
require.True(t, ch.BlockingSend(1))
require.True(t, ch.BlockingSend(2))
require.True(t, ch.BlockingSend(3))
sending := make(chan struct{})
sent := make(chan struct{})
sentWithoutRace := false
ctx.Go(func() error {
close(sending)
require.True(t, ch.BlockingSend(4))
close(sent)
sentWithoutRace = true
return nil
})
<-sending
for i := 0; i < 10; i++ {
// make sure the send is blocked
runtime.Gosched()
}
requireBlocked(t, sent)
require.False(t, sentWithoutRace)
v, err := ch.Receive(ctx)
require.NoError(t, err)
require.Equal(t, v, 1)
<-sent
}
func TestReceiverClosableChan_UnableToSend(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
ch := MakeReceiverClosableChan[int](3)
require.True(t, ch.BlockingSend(1))
require.True(t, ch.BlockingSend(2))
require.True(t, ch.BlockingSend(3))
sending := make(chan struct{})
sent := make(chan struct{})
sentWithoutRace := false
ctx.Go(func() error {
close(sending)
require.False(t, ch.BlockingSend(4))
close(sent)
sentWithoutRace = true
return nil
})
<-sending
for i := 0; i < 10; i++ {
// make sure the send is blocked
runtime.Gosched()
}
requireBlocked(t, sent)
require.False(t, sentWithoutRace)
vs := ch.StopReceiving()
require.Equal(t, vs, []int{1, 2, 3})
<-sent
}
func TestReceiverClosableChan_BlockingReceive(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
ch := MakeReceiverClosableChan[int](3)
receiving := make(chan struct{})
received := make(chan struct{})
receivedWithoutRace := false
ctx.Go(func() error {
close(receiving)
ctx := context.Background()
v, err := ch.Receive(ctx)
require.NoError(t, err)
require.Equal(t, v, 1)
close(received)
receivedWithoutRace = true
return nil
})
<-receiving
for i := 0; i < 10; i++ {
// make sure the receive is blocked
runtime.Gosched()
}
requireBlocked(t, received)
require.False(t, receivedWithoutRace)
require.True(t, ch.BlockingSend(1))
<-received
}
func TestReceiverClosableChan_ContextCanceled(t *testing.T) {
t.Parallel()
ctx := testcontext.New(t)
ch := MakeReceiverClosableChan[int](3)
receiving := make(chan struct{})
received := make(chan struct{})
receivedWithoutRace := false
cancelCtx, cancel := context.WithCancel(ctx)
ctx.Go(func() error {
close(receiving)
_, err := ch.Receive(cancelCtx)
require.ErrorIs(t, err, context.Canceled)
close(received)
receivedWithoutRace = true
return nil
})
<-receiving
for i := 0; i < 10; i++ {
// make sure the receive is blocked
runtime.Gosched()
}
requireBlocked(t, received)
require.False(t, receivedWithoutRace)
cancel()
toSend := 3
var expected []int
for i := 0; i < toSend; i++ {
require.True(t, ch.BlockingSend(i))
expected = append(expected, i)
}
<-received
require.Equal(t, ch.StopReceiving(), expected)
}
|
package golog
// LogMsgCtx holds information about a log message
type LogMsgCtx struct {
// Name is the identifier of the logger
Name string
// Level is the name of the message log level
Level string
// Msg is the log message contents
Msg string
}
// NewLogMsgCtx creates a new LogMsgCtx
func NewLogMsgCtx(name, lvl, msg string) LogMsgCtx {
return LogMsgCtx{
Name: name,
Level: lvl,
Msg: msg,
}
}
|
// httping 0.9.1 - A tool to measure RTT on HTTP/S requests
// This software is distributed AS IS and has no warranty. This is merely a learning exercise and should not be used in production under any circumstances.
// This is my own work and not that of my employer, not is endorsed or supported by them in any conceivable way.
// Pedro Perez - pjperez@outlook.com
// Based on https://github.com/giigame/httping (Thanks!)
package main
import (
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"time"
"strconv"
"github.com/montanaflynn/stats"
)
const httpingVersion = "0.9.1"
//const jsonResults = true
// Reply is a data structure for the server replies
type Reply struct {
Hostname string
ClientIP string
Time time.Time
}
// Result is the struct to generate the metada json results
type Result struct {
Host string `json:"host"`
HTTPVerb string `json:"httpVerb"`
HostHeaders string `json:"hostHeader"`
Seq int `json:"seq"`
HTTPStatus int `json:"httpStatus"`
Bytes int `json:"bytes"`
RTT float32 `json:"rtt"`
}
func main() {
// Available flags
urlPtr := flag.String("url", "", "Requested URL")
httpverbPtr := flag.String("httpverb", "GET", "HTTP Verb: Only GET or HEAD supported at the moment")
countPtr := flag.Int("count", 10, "Number of requests to send")
listenPtr := flag.Int("listen", 0, "Enable listener mode on specified port, e.g. '-r 80'")
hostHeaderPtr := flag.String("hostheader", "", "Optional: Host header")
jsonResultsPtr := flag.Bool("json", false, "If true, produces output in json format")
flag.Parse()
urlStr := *urlPtr
httpVerb := *httpverbPtr
jsonResults := *jsonResultsPtr
if jsonResults == false {
fmt.Println("\nhttping " + httpingVersion + " - A tool to measure RTT on HTTP/S requests")
fmt.Println("Help: httping -h")
}
// If listener mode is selected, ignore the rest of the args
if *listenPtr > 0 {
listenPort := strconv.Itoa(*listenPtr)
fmt.Println("Listening on port " + listenPort)
http.HandleFunc("/", serverRESPONSE)
http.ListenAndServe(":"+listenPort, nil)
}
// Exit if URL is not specified, print usage
if len(urlStr) < 1 {
flag.Usage()
fmt.Printf("\nYou haven't specified a URL to test!\n\n")
os.Exit(1)
}
// Exit if the number of probes is zero, print usage
if *countPtr < 1 {
flag.Usage()
fmt.Printf("\nNumber of probes has to be greater than 0!\n\n")
os.Exit(1)
}
// Check what protocol has been specified in the URL by checking the first 7 or 8 chars.
// If none specified, fall back to HTTP
if len(urlStr) > 6 {
if urlStr[:7] != "http://" {
if urlStr[:8] != "https://" {
if strings.Contains(urlStr, "://") {
fmt.Println("\n\nWrong protocol specified, httping only supports HTTP and HTTPS")
os.Exit(1)
}
fmt.Printf("\n\nNo protocol specified, falling back to HTTP\n\n")
urlStr = "http://" + urlStr
}
}
} else {
fmt.Println()
os.Exit(1)
}
// Parse URL and fail if the host can't be resolved.
url, err := url.Parse(urlStr)
if err != nil {
fmt.Println("Cannot resolve: " + urlStr)
os.Exit(1)
return
}
// If a custom host header is specified, use it. Otherwise host header = url.Host
var hostHeader string
if *hostHeaderPtr != "" {
hostHeader = *hostHeaderPtr
} else {
hostHeader = url.Host
}
if jsonResults == false {
fmt.Printf("HTTP %s to %s (%s):\n", httpVerb, url.Host, urlStr)
}
ping(httpVerb, url, *countPtr, hostHeader, jsonResults)
}
func ping(httpVerb string, url *url.URL, count int, hostHeader string, jsonResults bool) {
// This function is responsible to send the requests, count the time and show statistics when finished
// Initialise needed variables
timeTotal := time.Duration(0)
i := 1
successfulProbes := 0
var responseTimes []float64
fBreak := 0
// Change request timeout to 2 seconds
timeout := time.Duration(2 * time.Second)
client := http.Client{
Timeout: timeout,
}
// Send requests for url, "count" times
for i = 1; count >= i && fBreak == 0; i++ {
// Get the request ready - Headers, verb, etc
request, err := http.NewRequest(httpVerb, url.String(), nil)
request.Host = hostHeader
request.Header.Set("User-Agent", "httping "+httpingVersion)
// Send request and measure time to completion
timeStart := time.Now()
result, errRequest := client.Do(request)
responseTime := time.Since(timeStart)
if err != nil || errRequest != nil {
fmt.Println("Timeout when connecting to", url)
} else {
// Add all the response times to calculate the average later
timeTotal += responseTime
// Calculate the downloaded bytes
body, _ := ioutil.ReadAll(result.Body)
bytes := len(body)
// Print result on screen
if jsonResults == true {
// Get the json ready
results := &Result{
Host: url.Host,
HTTPVerb: httpVerb,
HostHeaders: hostHeader,
Seq: i,
HTTPStatus: result.StatusCode,
Bytes: bytes,
RTT: float32(responseTime) / 1e6,
}
resultsMarshaled, _ := json.Marshal(results)
fmt.Println(string(resultsMarshaled))
} else {
fmt.Printf("connected to %s, seq=%d, httpVerb=%s, httpStatus=%d, bytes=%d, RTT=%.2f ms\n", url, i, httpVerb, result.StatusCode, bytes, float32(responseTime)/1e6)
}
// Count how many probes are successful, i.e. how many get a 200 HTTP StatusCode - If successful also add the result to a slice "responseTimes"
if result.StatusCode == 200 {
successfulProbes++
responseTimes = append(responseTimes, float64(responseTime))
}
}
time.Sleep(1e9)
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for sig := range c {
_ = sig
// Stop the loop by enabling the fBreak flag
fBreak = 1
}
}()
}
// Let's calculate and spill some results
// 1. Average response time
timeAverage := time.Duration(int64(0))
if successfulProbes > 0 {
timeAverage = time.Duration(int64(timeTotal) / int64(successfulProbes))
} else {
fmt.Println("All probes failed")
os.Exit(1)
}
// 2. Min and Max response times
var biggest float64
smallest := float64(1000000000)
for _, v := range responseTimes {
if v > biggest {
biggest = v
}
if v < smallest {
smallest = v
}
}
// 3. Median response time
median, _ := stats.Median(responseTimes)
// 4. Percentile
percentile90, _ := stats.Percentile(responseTimes, float64(90))
percentile75, _ := stats.Percentile(responseTimes, float64(75))
percentile50, _ := stats.Percentile(responseTimes, float64(50))
percentile25, _ := stats.Percentile(responseTimes, float64(25))
// Print it all!!!
if jsonResults == true {
} else {
fmt.Println("\nProbes sent:", i-1, "\nSuccessful responses:", successfulProbes, "\n% of requests failed:", float64(100-(successfulProbes*100)/(i-1)), "\nMin response time:", time.Duration(smallest), "\nAverage response time:", timeAverage, "\nMedian response time:", time.Duration(median), "\nMax response time:", time.Duration(biggest))
fmt.Println("\n90% of requests were faster than:", time.Duration(percentile90), "\n75% of requests were faster than:", time.Duration(percentile75), "\n50% of requests were faster than:", time.Duration(percentile50), "\n25% of requests were faster than:", time.Duration(percentile25))
}
}
func serverRESPONSE(w http.ResponseWriter, r *http.Request) {
hostname, err := os.Hostname() // Get the local hostname
// Get the client's IP address.
// RemoteAddr returns the client IP address with the port after a colon
// We split the client IP + port based on colon(s) and only remove
// after the last one, so we don't break IPv6
clientsocket := r.RemoteAddr
clientipMap := strings.Split(clientsocket, ":")
clientipMap = clientipMap[:len(clientipMap)-1]
clientip := strings.Join(clientipMap, ":")
response := Reply{hostname, clientip, time.Now()} // Construct the response with the gathered data
// Convert to json
jsonRESPONSE, err := json.Marshal(response)
if err != nil {
log.Output(0, "json conversion failed")
}
io.WriteString(w, string(jsonRESPONSE)) // Send response back to client
}
|
package postgres
import (
"chat/internal/chat"
"database/sql"
"fmt"
"strconv"
"github.com/lib/pq"
"github.com/pkg/errors"
)
var _ chat.Chats = &ChatStorage{}
// RobotStorage ...
type ChatStorage struct {
statementStorage
createStmt *sql.Stmt
createUCStmt *sql.Stmt
findStmt *sql.Stmt
}
// NewRobotStorage ...
func NewChatStorage(db *DB) (*ChatStorage, error) {
s := &ChatStorage{statementStorage: newStatementsStorage(db)}
stmts := []stmt{
{Query: createChatQuery, Dst: &s.createStmt},
{Query: findChatQuery, Dst: &s.findStmt},
{Query: createChatUserQuery, Dst: &s.createUCStmt},
}
if err := s.initStatements(stmts); err != nil {
return nil, errors.Wrap(err, "can't init statements")
}
return s, nil
}
const chatFields = "users, name, created_at"
const createChatQuery = "INSERT INTO public.chats (name, created_at) VALUES ($1, now()) RETURNING id"
const createChatUserQuery = "INSERT INTO public.users_chats(chat_id, user_id) SELECT $1, unnest($2::integer[])"
func (s *ChatStorage) Create(c *chat.Chat) (int64, error) {
tx, err := s.db.Session.Begin()
if err != nil {
return 0, err
}
{
stmt := tx.Stmt(s.createStmt)
defer stmt.Close()
if err := stmt.QueryRow(&c.Name).Scan(&c.ID); err != nil {
tx.Rollback()
msg := fmt.Sprintf("can not exec query with chatID %v", c.ID)
return 0, errors.WithMessage(err, msg)
}
}
{
stmt := tx.Stmt(s.createUCStmt)
valueArgs := []int64{}
defer stmt.Close()
for _, v := range c.Users {
num, _ := strconv.ParseInt(v, 10, 64)
valueArgs = append(valueArgs, num)
}
if _, err := stmt.Exec(c.ID, pq.Array(valueArgs)); err != nil {
tx.Rollback()
msg := fmt.Sprintf("failed to exec users with chat id %v", c.ID)
return 0, errors.WithMessage(err, msg)
}
}
return c.ID, tx.Commit()
}
const findChatQuery = "SELECT chats.id, array_agg(users_chats.user_id), chats.name, chats.created_at FROM public.chats INNER JOIN users_chats ON (users_chats.chat_id = chats.id)" +
"GROUP BY chats.id HAVING $1=ANY(array_agg(users_chats.user_id)) ORDER BY (SELECT MAX(created_at) FROM public.messages WHERE messages.chat = chats.id) DESC"
func (s *ChatStorage) Find(id int64) ([]*chat.Chat, error) {
var chs []*chat.Chat
rows, err := s.findStmt.Query(id)
if err != nil {
msg := fmt.Sprintf("can't scan chats with user id %v", id)
return nil, errors.WithMessage(err, msg)
}
defer rows.Close()
for rows.Next() {
var c chat.Chat
if err := scanChat(rows, &c); err != nil {
msg := fmt.Sprintf("failed to scan msgs with chat id %v", id)
return nil, errors.WithMessage(err, msg)
}
chs = append(chs, &c)
}
return chs, nil
}
func scanChat(scanner sqlScanner, c *chat.Chat) error {
return scanner.Scan(&c.ID, pq.Array(&c.Users), &c.Name, &c.CreatedAt)
}
|
// SPDX-License-Identifier: MIT
package lsp
import (
"reflect"
"github.com/caixw/apidoc/v7/core"
"github.com/caixw/apidoc/v7/internal/ast"
"github.com/caixw/apidoc/v7/internal/lsp/protocol"
)
var (
referencerType = reflect.TypeOf((*ast.Referencer)(nil)).Elem()
definitionerType = reflect.TypeOf((*ast.Definitioner)(nil)).Elem()
)
// textDocument/references
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_references
func (s *server) textDocumentReferences(notify bool, in *protocol.ReferenceParams, out *[]core.Location) error {
f := s.findFolder(in.TextDocument.URI)
if f == nil {
return nil
}
f.parsedMux.RLock()
defer f.parsedMux.RUnlock()
*out = references(f.doc, in.TextDocument.URI, in.Position, in.Context.IncludeDeclaration)
return nil
}
// textDocument/definition
//
// https://microsoft.github.io/language-server-protocol/specifications/specification-current/#textDocument_definition
func (s *server) textDocumentDefinition(notify bool, in *protocol.DefinitionParams, out *[]core.Location) error {
// NOTE: LSP 允许 out 的值是 null,而 jsonrpc 模块默认情况下是空值,而不是 nil,
// 所以在可能的情况下,都尽量将其返回类型改为数组,
// 或是像 protocol.Hover 一样为返回类型实现 json.Marshaler 接口。
f := s.findFolder(in.TextDocument.URI)
if f == nil {
return nil
}
f.parsedMux.RLock()
defer f.parsedMux.RUnlock()
if r := f.doc.Search(in.TextDocument.URI, in.TextDocumentPositionParams.Position, definitionerType); r != nil {
*out = []core.Location{r.(ast.Definitioner).Definition().Location}
}
return nil
}
func references(doc *ast.APIDoc, uri core.URI, pos core.Position, include bool) (locations []core.Location) {
r := doc.Search(uri, pos, referencerType)
if r == nil {
return
}
referencer := r.(ast.Referencer)
if include {
locations = append(locations, referencer.Loc())
}
for _, ref := range referencer.References() {
locations = append(locations, ref.Location)
}
return
}
|
package lambda
import (
"context"
"fmt"
"os"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/service/s3"
"golang.org/x/sync/errgroup"
"github.com/Clever/ci-scripts/internal/environment"
)
// Lambda wraps s3 to provide a simple API building and publishing lambdas.
type Lambda struct {
awsCfg aws.Config
}
// New initializes a new Lambda handling wrapper with it's s3 client.
func New(ctx context.Context) *Lambda {
return &Lambda{
awsCfg: environment.AWSCfg(ctx, environment.LambdaAccessKeyID, environment.LambdaSecretAccessKey),
}
}
// Publish an already built lambda artifact archive to s3 using the
// artifact name as the key. The archive is pushed to each of the 4 aws
// regions. Each region is pushed in it's own goroutine.
func (l *Lambda) Publish(ctx context.Context, binaryPath, artifactName string) error {
grp, grpCtx := errgroup.WithContext(ctx)
for _, region := range environment.Regions {
region := region
bucket := fmt.Sprintf("%s-%s", environment.LambdaArtifactBucketPrefix, region)
key := s3Key(artifactName)
s3uri := fmt.Sprintf("s3://%s/%s", bucket, key)
fmt.Println("uploading lambda artifact", binaryPath, "to", s3uri, "...")
grp.Go(func() error {
f, err := os.Open(binaryPath)
if err != nil {
return fmt.Errorf("unable to open lambda artifact archive %s: %v", binaryPath, err)
}
cfg := l.awsCfg.Copy()
cfg.Region = region
_, err = s3.NewFromConfig(cfg).PutObject(grpCtx, &s3.PutObjectInput{
Bucket: aws.String(bucket),
Key: aws.String(key),
Body: f,
})
if err != nil {
return fmt.Errorf("failed to upload %s to %s: %v", binaryPath, s3uri, err)
}
return nil
})
}
return grp.Wait()
}
|
package controller
import (
"context"
"encoding/json"
"errors"
"github.com/ipfs/go-cid"
storeError "github.com/kenlabs/pando-store/pkg/error"
"github.com/kenlabs/pando-store/pkg/types/cbortypes"
v1 "github.com/kenlabs/pando/pkg/api/v1"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/mongo/readpref"
"net/http"
"strconv"
)
func (c *Controller) SnapShotList() ([]byte, error) {
res, err := c.Core.StoreInstance.PandoStore.SnapShotStore().GetSnapShotList(context.Background())
if err != nil {
return nil, v1.NewError(err, http.StatusInternalServerError)
}
if res == nil {
return nil, v1.NewError(v1.ResourceNotFound, http.StatusNotFound)
}
data, err := json.Marshal(*res)
if err != nil {
return nil, v1.NewError(err, http.StatusInternalServerError)
}
return data, nil
}
func (c *Controller) MetadataSnapShot(ctx context.Context, cidstr string, height string) ([]byte, error) {
var snapshotFromHeight *cbortypes.SnapShot
var snapshotFromCid *cbortypes.SnapShot
if cidstr == "" && height == "" {
return nil, v1.NewError(errors.New("height or cid is required"), http.StatusBadRequest)
}
if cidstr != "" {
snapshotCid, err := cid.Decode(cidstr)
if err != nil {
return nil, err
}
snapshotFromCid, err = c.Core.StoreInstance.PandoStore.SnapShotStore().GetSnapShotByCid(ctx, snapshotCid)
if err != nil {
if err == storeError.InvalidParameters {
return nil, v1.NewError(v1.InvalidQuery, http.StatusBadRequest)
}
return nil, v1.NewError(err, http.StatusInternalServerError)
}
}
if height != "" {
snapshotHeight, err := strconv.ParseUint(height, 10, 64)
if err != nil {
return nil, v1.NewError(err, http.StatusBadRequest)
}
snapshotFromHeight, _, err = c.Core.StoreInstance.PandoStore.SnapShotStore().GetSnapShotByHeight(ctx, snapshotHeight)
if err != nil {
if err == storeError.InvalidParameters {
return nil, v1.NewError(v1.InvalidQuery, http.StatusBadRequest)
}
return nil, v1.NewError(err, http.StatusInternalServerError)
}
}
if snapshotFromHeight != nil && snapshotFromCid != nil && snapshotFromHeight != snapshotFromCid {
return nil, v1.NewError(errors.New("dismatched cid and height for snapshot"), http.StatusBadRequest)
}
var resSnapshot *cbortypes.SnapShot
var res []byte
var err error
if snapshotFromCid != nil {
resSnapshot = snapshotFromCid
} else {
resSnapshot = snapshotFromHeight
}
res, err = json.Marshal(resSnapshot)
if err != nil {
return nil, v1.NewError(err, http.StatusInternalServerError)
}
return res, nil
}
func (c *Controller) MetaInclusion(ctx context.Context, cidstr string) ([]byte, error) {
metaCid, err := cid.Decode(cidstr)
if err != nil {
logger.Errorf("invalid cid: %s, err:%v", metaCid.String(), err)
return nil, v1.NewError(errors.New("invalid cid"), http.StatusBadRequest)
}
inclusion, err := c.Core.StoreInstance.PandoStore.MetaInclusion(ctx, metaCid)
if err != nil {
logger.Errorf("failed to get meta inclusion for cid: %s, err:%v", metaCid.String(), err)
return nil, v1.NewError(v1.InternalServerError, http.StatusInternalServerError)
}
res, err := json.Marshal(inclusion)
if err != nil {
return nil, v1.NewError(err, http.StatusInternalServerError)
}
return res, nil
}
func (c *Controller) MetadataQuery(ctx context.Context, providerID string, queryStr string) (queryResult interface{}, err error) {
var bsonQuery bson.D
err = bson.UnmarshalExtJSON([]byte(queryStr), true, &bsonQuery)
if err != nil {
return nil, err
}
var resJson []bson.M
opts := options.RunCmd().SetReadPreference(readpref.Primary())
res, err := c.Core.StoreInstance.MetadataCache.Database(providerID).RunCommandCursor(
ctx,
bsonQuery,
opts,
)
if err != nil {
return nil, err
}
if err = res.All(ctx, &resJson); err != nil {
return nil, err
}
return resJson, nil
}
|
// Copyright (C) 2023 Storj Labs, Inc.
// See LICENSE for copying information
package sync2_test
import (
"sync/atomic"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/sync2"
)
func TestGo(t *testing.T) {
var a int32
wait := sync2.Go(
func() { atomic.AddInt32(&a, 1) },
func() { atomic.AddInt32(&a, 1) },
)
wait()
require.Equal(t, int32(2), a)
wait()
require.Equal(t, int32(2), a)
}
|
package prompt
import (
"github.com/google/wire"
)
var WireSet = wire.NewSet(
NewTerminalPrompt,
wire.Value(OpenInput(TTYOpen)))
|
package main
import "fmt"
func soma(numeros ...int) int {
somatoria := 0
for _, n := range numeros {
somatoria += n
}
return somatoria
}
func main() {
fmt.Println(soma(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
}
|
package raftkv
import "github.com/6.824/labrpc"
import "crypto/rand"
import "math/big"
type RetryState int
const (
DontRetry RetryState = iota
RetryWithNewIndex
RetryWithOldIndex
)
type Clerk struct {
servers []*labrpc.ClientEnd
// You will have to modify this struct.
serverCount int
maxRetryTime int32
}
func nrand() int64 {
max := big.NewInt(int64(1) << 62)
bigx, _ := rand.Int(rand.Reader, max)
x := bigx.Int64()
return x
}
func MakeClerk(servers []*labrpc.ClientEnd) *Clerk {
ck := new(Clerk)
ck.servers = servers
// You'll have to add code here.
ck.serverCount = len(servers)
return ck
}
func (ck *Clerk) getServer(server int) int {
randIndex := server
randNumber := nrand() % int64(ck.serverCount)
randIndex = int(randNumber)
return randIndex
}
//
// fetch the current value for a key.
// returns "" if the key does not exist.
// keeps trying forever in the face of all other errors.
//
// you can send an RPC with code like this:
// ok := ck.servers[i].Call("RaftKV.Get", &args, &reply)
//
// the types of args and reply (including whether they are pointers)
// must match the declared types of the RPC handler function's
// arguments. and reply must be passed as a pointer.
//
func (ck *Clerk) Get(key string) string {
// You will have to modify this function.
result := ""
request := GetArgs{}
request.Key = key
request.Nonce = nrand()
// result = ck.GetInternal(&request, 1, -1)
needRetry := RetryWithNewIndex
serverIndex := -1
for needRetry != DontRetry {
if needRetry == RetryWithNewIndex {
serverIndex = ck.getServer(serverIndex)
}
result, needRetry = ck.GetInternal(&request, serverIndex)
}
return result
}
func (ck *Clerk) GetInternal(getRequest *GetArgs, serverIndex int) (string, RetryState) {
server := ck.servers[serverIndex]
getReply := GetReply{}
ok := server.Call("RaftKV.Get", getRequest, &getReply)
result := ""
retry := DontRetry
if ok {
if getReply.WrongLeader {
retry = RetryWithNewIndex
} else {
if getReply.Err == OK || getReply.Err == ErrNoKey {
result = getReply.Value
} else {
retry = RetryWithOldIndex
}
}
} else {
retry = RetryWithNewIndex
}
return result, retry
}
//
// shared by Put and Append.
//
// you can send an RPC with code like this:
// ok := ck.servers[i].Call("RaftKV.PutAppend", &args, &reply)
//
// the types of args and reply (including whether they are pointers)
// must match the declared types of the RPC handler function's
// arguments. and reply must be passed as a pointer.
//
func (ck *Clerk) PutAppend(key string, value string, op string) {
// You will have to modify this function.
request := PutAppendArgs{}
request.Key = key
request.Value = value
request.Op = op
request.Nonce = nrand()
needRetry := RetryWithNewIndex
serverIndex := -1
for needRetry != DontRetry {
if serverIndex != -1 {
Sleep(1000)
}
// do real put.
if needRetry == RetryWithNewIndex {
serverIndex = ck.getServer(serverIndex)
}
needRetry = ck.PutInternal(&request, serverIndex)
}
}
func (ck *Clerk) PutInternal(putAppendArgs *PutAppendArgs, serverIndex int) RetryState {
server := ck.servers[serverIndex]
putAppendReply := PutAppendReply{}
ok := server.Call("RaftKV.PutAppend", putAppendArgs, &putAppendReply)
DPrintf("retry?%v, reply=%v", ok, &putAppendReply)
retry := DontRetry
if ok {
if putAppendReply.WrongLeader {
retry = RetryWithNewIndex
} else {
if putAppendReply.Err != OK {
retry = RetryWithOldIndex
}
}
} else {
retry = RetryWithNewIndex
}
return retry
}
func (ck *Clerk) Put(key string, value string) {
ck.PutAppend(key, value, "Put")
}
func (ck *Clerk) Append(key string, value string) {
ck.PutAppend(key, value, "Append")
}
|
package main
import (
"github.com/gorilla/websocket"
"log"
"net/http"
)
const (
ReadBufferSize = 1024
WriteBufferSize = 1024
)
type Server struct {
Address string
connLimit int
conns chan struct{}
Http *http.Server
}
func NewServer(address string) *Server {
s := &Server{
Address: address,
Http: &http.Server{
Addr: address,
},
}
s.Http.Handler = s
return s
}
func (s *Server) LimitConnections(count int) {
s.connLimit = count
s.conns = make(chan struct{}, count)
for i := 0; i < count; i++ {
s.conns <- struct{}{}
}
}
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if s.connLimit != 0 {
select {
case <-s.conns:
defer func() {
s.conns <- struct{}{}
}()
break
default:
log.Printf("Connection from %s refused. Limit reached.", r.RemoteAddr)
http.Error(w, "Connection limit reached", http.StatusServiceUnavailable)
return
}
}
conn, err := websocket.Upgrade(w, r, nil, ReadBufferSize, WriteBufferSize)
if _, ok := err.(websocket.HandshakeError); ok {
http.Error(w, "Websocket handshake expected", 400)
return
} else if err != nil {
log.Print(err)
return
}
log.Printf("Websocket connection from %s established.", r.RemoteAddr)
// serve websocket connection
for {
msgType, _, err := conn.NextReader()
if err != nil {
log.Printf("Websocket connection from %s closed.", r.RemoteAddr)
return
}
switch msgType {
case websocket.BinaryMessage:
case websocket.TextMessage:
}
}
}
|
/*
* Neblio REST API Suite
*
* APIs for Interacting with NTP1 Tokens & The Neblio Blockchain
*
* API version: 1.3.0
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package neblioapi
type IssueTokenResponse struct {
// TokenId of the to be issued token
TokenId string `json:"tokenId,omitempty"`
// Unsigned, raw transaction hex of the transaction to issue the token
TxHex string `json:"txHex,omitempty"`
}
|
package apiControllers
import "github.com/gin-gonic/gin"
func broadvidadsFlushAdCache(c *gin.Context) {}
|
package main
import (
"fmt"
"math"
)
/*
Given an array which consists of non-negative integers and an integer m,
you can split the array into m non-empty continuous subarrays.
Write an algorithm to minimize the largest sum among these m subarrays.
Note:
If n is the length of array, assume the following constraints are satisfied:
1 ≤ n ≤ 1000
1 ≤ m ≤ min(50, n)
Examples:
Input:
nums = [7,2,5,10,8]
m = 2
Output:
18
Explanation:
There are four ways to split nums into two subarrays.
The best way is to split it into [7,2,5] and [10,8],
where the largest sum among the two subarrays is only 18.
*/
// dp[m][n]=min(max(dp[m-1][k],sum[n-k])) k start at m-1=>n
// k start m-1 means : make sure m-1 must have m-1 element.
// dp[m][n] not dp[n][m] 原因是 每次都是m + 1 做计算
// 这个可以做数组压缩
func splitArray(nums []int, m int) int {
dp := make([][]int, 0)
sum := make([]int, len(nums)+1)
for i := 1; i <= len(nums); i++ {
sum[i] = sum[i-1] + nums[i-1]
}
for i := 0; i <= m; i++ {
dp = append(dp, make([]int, len(nums)+1))
}
for i := 0; i <= m; i++ {
for j := 0; j <= len(nums); j++ {
dp[i][j] = math.MaxInt64
}
}
dp[0][0] = 0
for i := 1; i <= m; i++ {
for j := 1; j <= len(nums); j++ {
for k := i - 1; k < j; k++ {
//for k:=j-1;k>=0;k-- {
max := dp[i-1][k]
if max < sum[j]-sum[k] {
max = sum[j] - sum[k]
}
if dp[i][j] > max {
dp[i][j] = max
}
}
}
for _,d := range dp {
fmt.Println(d)
}
fmt.Println("XXXXXXX")
}
return dp[m][len(nums)]
}
func main() {
//fmt.Println(splitArray([]int{7, 2, 5, 10, 8}, 2))
fmt.Println(splitArray([]int{1, 2,2147483646}, 2))
}
|
package protectionruns
import(
"errors"
"fmt"
"encoding/json"
"github.com/cohesity/management-sdk-go/models"
"github.com/cohesity/management-sdk-go/unirest-go"
"github.com/cohesity/management-sdk-go/apihelper"
"github.com/cohesity/management-sdk-go/configuration"
)
/*
* Client structure as interface implementation
*/
type PROTECTIONRUNS_IMPL struct {
config configuration.CONFIGURATION
}
/**
* Cancel a Protection Job run.
* @param int64 id parameter: Required
* @param *models.CancelAProtectionJobRun body parameter: Optional
* @return Returns the response from the API call
*/
func (me *PROTECTIONRUNS_IMPL) CreateCancelProtectionJobRun (
id int64,
body *models.CancelAProtectionJobRun) (error) {
//the endpoint path uri
_pathUrl := "/public/protectionRuns/cancel/{id}"
//variable to hold errors
var err error = nil
//process optional template parameters
_pathUrl, err = apihelper.AppendUrlWithTemplateParameters(_pathUrl, map[string]interface{} {
"id" : id,
})
if err != nil {
//error in template param handling
return err
}
//the base uri for api requests
_queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);
//prepare query string for API call
_queryBuilder = _queryBuilder + _pathUrl
//validate and preprocess url
_queryBuilder, err = apihelper.CleanUrl(_queryBuilder)
if err != nil {
//error in url validation or cleaning
return err
}
if me.config.AccessToken() == nil {
return errors.New("Access Token not set. Please authorize the client using client.Authorize()");
}
//prepare headers for the outgoing request
headers := map[string]interface{} {
"user-agent" : "cohesity-Go-sdk-6.2.0",
"content-type" : "application/json; charset=utf-8",
"Authorization" : fmt.Sprintf("%s %s",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),
}
//prepare API request
_request := unirest.Post(_queryBuilder, headers, body)
//and invoke the API call request to fetch the response
_response, err := unirest.AsString(_request,me.config.SkipSSL());
if err != nil {
//error in API invocation
return err
}
//error handling using HTTP status codes
if (_response.Code == 0) {
err = apihelper.NewAPIError("Error", _response.Code, _response.RawBody)
} else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK
err = apihelper.NewAPIError("HTTP Response Not OK", _response.Code, _response.RawBody)
}
if(err != nil) {
//error detected in status code validation
return err
}
//returning the response
return nil
}
/**
* If no parameters are specified, Job Runs currently
* on the Cohesity Cluster are returned. Both running and completed Job Runs
* are reported.
* Specifying parameters filters the results that are returned.
* @param *int64 startTimeUsecs parameter: Optional
* @param *bool excludeTasks parameter: Optional
* @param *int64 sourceId parameter: Optional
* @param *int64 jobId parameter: Optional
* @param *int64 endTimeUsecs parameter: Optional
* @param *int64 numRuns parameter: Optional
* @param []string runTypes parameter: Optional
* @param *bool excludeErrorRuns parameter: Optional
* @param *bool excludeNonRestoreableRuns parameter: Optional
* @param *int64 startedTimeUsecs parameter: Optional
* @return Returns the []*models.ProtectionJobRunInstance response from the API call
*/
func (me *PROTECTIONRUNS_IMPL) GetProtectionRuns (
startTimeUsecs *int64,
excludeTasks *bool,
sourceId *int64,
jobId *int64,
endTimeUsecs *int64,
numRuns *int64,
runTypes []string,
excludeErrorRuns *bool,
excludeNonRestoreableRuns *bool,
startedTimeUsecs *int64) ([]*models.ProtectionJobRunInstance, error) {
//the endpoint path uri
_pathUrl := "/public/protectionRuns"
//variable to hold errors
var err error = nil
//the base uri for api requests
_queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);
//prepare query string for API call
_queryBuilder = _queryBuilder + _pathUrl
//process optional query parameters
_queryBuilder, err = apihelper.AppendUrlWithQueryParameters(_queryBuilder, map[string]interface{} {
"startTimeUsecs" : startTimeUsecs,
"excludeTasks" : excludeTasks,
"sourceId" : sourceId,
"jobId" : jobId,
"endTimeUsecs" : endTimeUsecs,
"numRuns" : numRuns,
"runTypes" : runTypes,
"excludeErrorRuns" : excludeErrorRuns,
"excludeNonRestoreableRuns" : excludeNonRestoreableRuns,
"startedTimeUsecs" : startedTimeUsecs,
})
if err != nil {
//error in query param handling
return nil, err
}
//validate and preprocess url
_queryBuilder, err = apihelper.CleanUrl(_queryBuilder)
if err != nil {
//error in url validation or cleaning
return nil, err
}
if me.config.AccessToken() == nil {
return nil, errors.New("Access Token not set. Please authorize the client using client.Authorize()");
}
//prepare headers for the outgoing request
headers := map[string]interface{} {
"user-agent" : "cohesity-Go-sdk-6.2.0",
"accept" : "application/json",
"Authorization" : fmt.Sprintf("%s %s",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),
}
//prepare API request
_request := unirest.Get(_queryBuilder, headers)
//and invoke the API call request to fetch the response
_response, err := unirest.AsString(_request,me.config.SkipSSL());
if err != nil {
//error in API invocation
return nil, err
}
//error handling using HTTP status codes
if (_response.Code == 0) {
err = apihelper.NewAPIError("Error", _response.Code, _response.RawBody)
} else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK
err = apihelper.NewAPIError("HTTP Response Not OK", _response.Code, _response.RawBody)
}
if(err != nil) {
//error detected in status code validation
return nil, err
}
//returning the response
var retVal []*models.ProtectionJobRunInstance
err = json.Unmarshal(_response.RawBody, &retVal)
if err != nil {
//error in parsing
return nil, err
}
return retVal, nil
}
/**
* Update the expiration date (retention period) for the specified Protection
* Job Runs and their snapshots.
* After an expiration time is reached, the Job Run and its snapshots are deleted.
* If an expiration time of 0 is specified, a Job Run and its snapshots
* are immediately deleted.
* @param *models.UpdateProtectionJobRunsParameters body parameter: Required
* @return Returns the response from the API call
*/
func (me *PROTECTIONRUNS_IMPL) UpdateProtectionRuns (
body *models.UpdateProtectionJobRunsParameters) (error) {
//validating required parameters
if (body == nil){
return errors.New("The parameter 'body' is a required parameter and cannot be nil.")
} //the endpoint path uri
_pathUrl := "/public/protectionRuns"
//variable to hold errors
var err error = nil
//the base uri for api requests
_queryBuilder := configuration.GetBaseURI(configuration.DEFAULT_HOST,me.config);
//prepare query string for API call
_queryBuilder = _queryBuilder + _pathUrl
//validate and preprocess url
_queryBuilder, err = apihelper.CleanUrl(_queryBuilder)
if err != nil {
//error in url validation or cleaning
return err
}
if me.config.AccessToken() == nil {
return errors.New("Access Token not set. Please authorize the client using client.Authorize()");
}
//prepare headers for the outgoing request
headers := map[string]interface{} {
"user-agent" : "cohesity-Go-sdk-6.2.0",
"content-type" : "application/json; charset=utf-8",
"Authorization" : fmt.Sprintf("%s %s",*me.config.AccessToken().TokenType, *me.config.AccessToken().AccessToken),
}
//prepare API request
_request := unirest.Put(_queryBuilder, headers, body)
//and invoke the API call request to fetch the response
_response, err := unirest.AsString(_request,me.config.SkipSSL());
if err != nil {
//error in API invocation
return err
}
//error handling using HTTP status codes
if (_response.Code == 0) {
err = apihelper.NewAPIError("Error", _response.Code, _response.RawBody)
} else if (_response.Code < 200) || (_response.Code > 206) { //[200,206] = HTTP OK
err = apihelper.NewAPIError("HTTP Response Not OK", _response.Code, _response.RawBody)
}
if(err != nil) {
//error detected in status code validation
return err
}
//returning the response
return nil
}
|
package distro_test
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/osbuild/osbuild-composer/internal/distro"
"github.com/osbuild/osbuild-composer/internal/distro/distro_test_common"
"github.com/osbuild/osbuild-composer/internal/distro/fedora31"
"github.com/osbuild/osbuild-composer/internal/distro/fedora32"
"github.com/osbuild/osbuild-composer/internal/distro/fedora33"
"github.com/osbuild/osbuild-composer/internal/distro/rhel8"
)
func TestDistro_Manifest(t *testing.T) {
distro_test_common.TestDistro_Manifest(
t,
"../../test/data/cases/",
"*",
fedora31.New(), fedora32.New(), fedora33.New(), rhel8.New(),
)
}
// Test that all distros are registered properly and that Registry.List() works.
func TestDistro_RegistryList(t *testing.T) {
expected := []string{
"fedora-31",
"fedora-32",
"fedora-33",
"rhel-8",
}
distros, err := distro.NewRegistry(fedora31.New(), fedora32.New(), fedora33.New(), rhel8.New())
require.NoError(t, err)
require.Equalf(t, expected, distros.List(), "unexpected list of distros")
}
|
package timingwheel_test
import (
"fmt"
"time"
"github.com/EagleChen/timingwheel"
)
func ExampleTimingWheel() {
tw := timingwheel.NewWheelTimer()
tw.Start()
done := make(chan struct{})
if err := tw.Add(time.Now().UnixNano()/1000000+1, func() {
fmt.Println("hello world")
done <- struct{}{}
}); err != nil {
fmt.Println(err)
done <- struct{}{}
}
<-done
if err := tw.After(5, func() {
fmt.Println("hello world again")
done <- struct{}{}
}); err != nil {
fmt.Println(err)
done <- struct{}{}
}
<-done
// create another timewheel internally
if err := tw.After(30, func() {
fmt.Println("hello world again and again")
done <- struct{}{}
}); err != nil {
done <- struct{}{}
}
<-done
tw.After(10, func() {
fmt.Println("won't run")
})
tw.Stop()
// Output:
// hello world
// hello world again
// hello world again and again
}
|
package helpers
import (
"encoding/json"
"fmt"
"reflect"
"strings"
)
func ToLiteralForJS(input interface{}) string {
switch t := input.(type) {
case bool, int, int8, int16, int32, int64, float32, float64:
return fmt.Sprintf("%v", t)
case string:
return fmt.Sprintf("\"%s\"", t)
}
switch reflect.TypeOf(input).Kind() {
case reflect.Slice:
t := reflect.ValueOf(input)
l := t.Len()
es := make([]string, l)
for i := 0; i < l; i++ {
e := t.Index(i)
es[i] = ToLiteralForGo(e.Interface())
}
return fmt.Sprintf("[%s]", strings.Join(es, ", "))
}
b, err := json.Marshal(input)
if err != nil {
fmt.Println(err)
}
return string(b)
}
func ConvertTypeForJS(s string) string {
v, ok := map[string]string{
"integer": "number",
}[s]
if !ok {
return s
}
return v
}
|
package login_psql
import (
"context"
"database/sql"
"errors"
"github.com/lib/pq"
"github.com/overmesgit/awesomeSql/login"
"github.com/overmesgit/awesomeSql/login_psql/models"
"github.com/volatiletech/null/v8"
"github.com/volatiletech/sqlboiler/v4/boil"
)
type PSQLStorage struct {
db *sql.DB
}
func NewPSQLStorage(conn string) (*PSQLStorage, error) {
db, err := sql.Open("postgres", conn)
if err != nil {
return nil, err
}
return &PSQLStorage{db}, nil
}
func psqlUser(user *login.User, passwordHash string) *models.User {
return &models.User{
Username: user.Username,
Password: passwordHash,
Email: user.Email,
Mood: null.StringFrom(user.Mood),
Type: string(user.Type),
}
}
func userFromPsqlUser(userObj *models.User) *login.User {
return &login.User{
UserID: int32(userObj.UserID),
Username: userObj.Username,
Email: userObj.Email,
Mood: userObj.Mood.String,
Type: login.UserType(userObj.Type),
}
}
func (s *PSQLStorage) GetDB() *sql.DB {
return s.db
}
func (s *PSQLStorage) Create(user *login.User, passwordHash string) (int32, *login.Error) {
userObj := psqlUser(user, passwordHash)
err := userObj.Insert(context.TODO(), s.db, boil.Infer())
if err != nil {
var e *pq.Error
if errors.As(err, &e) {
if e.Code.Name() == "unique_violation" {
return 0, login.WrapError(err, "user already exists", login.UserAlreadyExistError)
}
} else {
return 0, login.WrapError(err, "internal error", login.InternalError)
}
}
return int32(userObj.UserID), nil
}
func (s *PSQLStorage) GetUser(userId int32) (*login.User, *login.Error) {
userObj, err := models.Users(models.UserWhere.UserID.EQ(int(userId))).One(context.TODO(), s.db)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, login.WrapError(err, "not found", login.UserNotFoundError)
} else {
return nil, login.WrapError(err, "internal error", login.InternalError)
}
}
return userFromPsqlUser(userObj), nil
}
func (s *PSQLStorage) CheckPassword(email string, passwordHash string) (*login.User, *login.Error) {
query := models.Users(models.UserWhere.Email.EQ(email), models.UserWhere.Password.EQ(passwordHash))
userObj, err := query.One(context.TODO(), s.db)
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return nil, login.WrapError(err, "not found", login.UserNotFoundError)
} else {
return nil, login.WrapError(err, "internal error", login.InternalError)
}
}
return userFromPsqlUser(userObj), nil
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package fingerprint
import "testing"
func TestEncryptionStatusEctoolUnmarshalerTPMSeed(t *testing.T) {
// It should not matter that there are tabs to the left of the example output.
var out = `
FPMCU encryption status: 0x00000001 FPTPM_seed_set
Valid flags: 0x00000001 FPTPM_seed_set
`
var expect = EncryptionStatus{
Current: EncryptionStatusTPMSeedSet,
Valid: EncryptionStatusTPMSeedSet,
}
actual, err := unmarshalEctoolEncryptionStatus(out)
if err != nil {
t.Fatal("Failed to unmarshal encryption status: ", err)
}
if actual != expect {
t.Fatalf("Unmarshaled encryption status block %+v doesn't match expected block %+v.", actual, expect)
}
}
func TestEncryptionStatusEctoolUnmarshalerNoTPMSeed(t *testing.T) {
// It should not matter that there are tabs to the left of the example output.
var out = `
FPMCU encryption status: 0x00000000
Valid flags: 0x00000001 FPTPM_seed_set
`
var expect = EncryptionStatus{
Current: 0,
Valid: EncryptionStatusTPMSeedSet,
}
actual, err := unmarshalEctoolEncryptionStatus(out)
if err != nil {
t.Fatal("Failed to unmarshal encryption status: ", err)
}
if actual != expect {
t.Fatalf("Unmarshaled encryption status block %+v doesn't match expected block %+v.", actual, expect)
}
}
func TestEncryptionStatusFlagsIsSet(t *testing.T) {
var flags EncryptionStatusFlags
if flags.IsSet(EncryptionStatusTPMSeedSet) {
t.Fatal("Flag EncryptionStatusTPMSeedSet was reported as set.")
}
flags = EncryptionStatusTPMSeedSet
if !flags.IsSet(EncryptionStatusTPMSeedSet) {
t.Fatal("Flag EncryptionStatusTPMSeedSet was reported as not set.")
}
}
|
package util
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"errors"
"github.com/yuwe1/pgim/pkg/logger"
)
// token信息
type TokenInfo struct {
AppId int64 `json:"app_id"`
UserId int64 `json:"user_id"`
DeviceId int64 `json:"device_id"`
Expire int64 `json:"expire"`
}
// 获取token
func GetToken(appId, userId, deviceId int64, expire int64, publicKey string) (string, error) {
info := TokenInfo{appId, userId, deviceId, expire}
// 转换成字节数组
bytes, err := json.Marshal(info)
if err != nil {
logger.Sugar.Error(err)
return "", err
}
// 对token使用公钥加密
token, err := RsaEncrypt(bytes, []byte(publicKey))
if err != nil {
return "", err
}
// 使用base64编码
return base64.StdEncoding.EncodeToString(token), nil
}
// 加密
func RsaEncrypt(origData []byte, publicKey []byte) ([]byte, error) {
// 解密pem格式的公钥
block, _ := pem.Decode(publicKey)
if block == nil {
return nil, errors.New("public key error")
}
// 解析公钥
pubInterface, err := x509.ParsePKIXPublicKey(block.Bytes)
if err != nil {
return nil, err
}
// 类型断言
pub := pubInterface.(*rsa.PublicKey)
// 加密
return rsa.EncryptPKCS1v15(rand.Reader, pub, origData)
}
// 使用私钥进行解密
// 解密
func RsaDecrypt(ciphertext []byte, privateKey []byte) ([]byte, error) {
//解密
block, _ := pem.Decode(privateKey)
if block == nil {
return nil, errors.New("private key error!")
}
//解析PKCS1格式的私钥
priv, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
// 解密
return rsa.DecryptPKCS1v15(rand.Reader, priv, ciphertext)
}
// PrivateKey 公钥
var PrivateKey = `
-----BEGIN RSA PRIVATE KEY-----
MIICWwIBAAKBgQDcGsUIIAINHfRTdMmgGwLrjzfMNSrtgIf4EGsNaYwmC1GjF/bM
h0Mcm10oLhNrKNYCTTQVGGIxuc5heKd1gOzb7bdTnCDPPZ7oV7p1B9Pud+6zPaco
qDz2M24vHFWYY2FbIIJh8fHhKcfXNXOLovdVBE7Zy682X1+R1lRK8D+vmQIDAQAB
AoGAeWAZvz1HZExca5k/hpbeqV+0+VtobMgwMs96+U53BpO/VRzl8Cu3CpNyb7HY
64L9YQ+J5QgpPhqkgIO0dMu/0RIXsmhvr2gcxmKObcqT3JQ6S4rjHTln49I2sYTz
7JEH4TcplKjSjHyq5MhHfA+CV2/AB2BO6G8limu7SheXuvECQQDwOpZrZDeTOOBk
z1vercawd+J9ll/FZYttnrWYTI1sSF1sNfZ7dUXPyYPQFZ0LQ1bhZGmWBZ6a6wd9
R+PKlmJvAkEA6o32c/WEXxW2zeh18sOO4wqUiBYq3L3hFObhcsUAY8jfykQefW8q
yPuuL02jLIajFWd0itjvIrzWnVmoUuXydwJAXGLrvllIVkIlah+lATprkypH3Gyc
YFnxCTNkOzIVoXMjGp6WMFylgIfLPZdSUiaPnxby1FNM7987fh7Lp/m12QJAK9iL
2JNtwkSR3p305oOuAz0oFORn8MnB+KFMRaMT9pNHWk0vke0lB1sc7ZTKyvkEJW0o
eQgic9DvIYzwDUcU8wJAIkKROzuzLi9AvLnLUrSdI6998lmeYO9x7pwZPukz3era
zncjRK3pbVkv0KrKfczuJiRlZ7dUzVO0b6QJr8TRAA==
-----END RSA PRIVATE KEY-----
`
// 公钥: 根据私钥生成
//openssl rsa -in rsa_private_key.pem -pubout -out rsa_public_key.pem
var PublicKey = `
-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDcGsUIIAINHfRTdMmgGwLrjzfM
NSrtgIf4EGsNaYwmC1GjF/bMh0Mcm10oLhNrKNYCTTQVGGIxuc5heKd1gOzb7bdT
nCDPPZ7oV7p1B9Pud+6zPacoqDz2M24vHFWYY2FbIIJh8fHhKcfXNXOLovdVBE7Z
y682X1+R1lRK8D+vmQIDAQAB
-----END PUBLIC KEY-----
`
|
package types
// NodeConnectOptions are options for configuring a connection to a remote node.
type NodeConnectOptions struct {
// The user to attempt to SSH into the remote node as.
SSHUser string
// A password to use for SSH authentication.
SSHPassword string
// The path to the key to use for SSH authentication.
SSHKeyFile string
// The port to use for the SSH connection
SSHPort int
// The address of the new node.
Address string
}
// AddNodeOptions represents options passed to the AddNode operation.
type AddNodeOptions struct {
// Options for remote connections
*NodeConnectOptions
// The role to assign the new node.
NodeRole K3sRole
}
// RemoveNodeOptions are options passed to a RemoveNode operation (not implemented).
type RemoveNodeOptions struct {
// Options for remote connections
*NodeConnectOptions
// Attempt to remote into the system and uninstall k3s
Uninstall bool
// The name of the node to remove
Name string
// The IP address of the node to remove
IPAddress string
}
// ClusterManager is an interface for managing the nodes in a k3s cluster.
type ClusterManager interface {
// AddNode should add a new node to the k3s cluster.
AddNode(Node, *AddNodeOptions) error
// RemoveNode should drain and remove the given node from the k3s cluster.
// If NodeConnectOptions are not nil and Uninstall is true, then k3s and
// all of its assets should be completely removed from the system. (not implemented)
RemoveNode(*RemoveNodeOptions) error
}
|
// Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package graphics
import (
"context"
"os"
"path/filepath"
"time"
"chromiumos/tast/common/testexec"
"chromiumos/tast/local/graphics/expectations"
"chromiumos/tast/local/gtest"
"chromiumos/tast/testing"
)
// clvkTest is used to describe the config used to run each test.
type clvkTest struct {
exe string // The test executable name.
}
func init() {
testing.AddTest(&testing.Test{
Func: Clvk,
Desc: "Run OpenCL implementation on top of Vulkan using clvk",
Contacts: []string{
"rjodin@chromium.org",
"chromeos-gfx@google.com",
},
Attr: []string{},
SoftwareDeps: []string{"vulkan"},
Fixture: "graphicsNoChrome",
Params: []testing.Param{{
Name: "api_tests",
Val: clvkTest{
exe: "api_tests",
},
Timeout: 1 * time.Minute,
ExtraAttr: []string{"group:graphics", "graphics_opencl", "graphics_perbuild"},
}, {
Name: "simple_test",
Val: clvkTest{
exe: "simple_test",
},
Timeout: 1 * time.Minute,
ExtraAttr: []string{"group:mainline"},
}},
})
}
func Clvk(ctx context.Context, s *testing.State) {
test := s.Param().(clvkTest)
// Allow to see clvk error and warn messages directly in test logFile.
os.Setenv("CLVK_LOG", "2")
expectation, err := expectations.GetTestExpectation(ctx, s.TestName())
if err != nil {
s.Fatal("Failed to load test expectation: ", err)
}
// Schedules a post-test expectations handling. If the test is expected to
// fail, but did not, then this generates an error.
defer func() {
if err := expectation.HandleFinalExpectation(); err != nil {
s.Error("Unmet expectation: ", err)
}
}()
const testPath = "/usr/local/opencl"
testExec := filepath.Join(testPath, test.exe)
logFile := filepath.Join(s.OutDir(), filepath.Base(test.exe)+".txt")
if test.exe == "api_tests" {
if report, err := gtest.New(testExec,
gtest.Logfile(logFile),
).Run(ctx); err != nil && report != nil {
passedTests := report.PassedTestNames()
failedTests := report.FailedTestNames()
if expErr := expectation.ReportErrorf("Passed %d tests, failed %d tests (%s) - %v", len(passedTests), len(failedTests), failedTests, err); expErr != nil {
s.Error("Unexpected error: ", expErr)
}
} else if err != nil && report == nil {
if expErr := expectation.ReportError(err); expErr != nil {
s.Error("Unexpected error: ", expErr)
}
}
} else {
f, err := os.Create(logFile)
if err != nil {
s.Fatal("Failed to create a log file: ", err)
}
defer f.Close()
cmd := testexec.CommandContext(ctx, testExec)
cmd.Stdout = f
cmd.Stderr = f
if err = cmd.Run(testexec.DumpLogOnError); err != nil {
if expErr := expectation.ReportError(err); expErr != nil {
s.Error("Unexpected error: ", expErr)
}
}
}
}
|
package _7_throughTheFog
import "fmt"
func main() {
deposit := 100
rate := 20
threshold := 170
result := depositProfit(deposit, rate, threshold)
fmt.Println(result)
}
func depositProfit(deposit int, rate int, threshold int) int {
years := 0
total := float64(deposit)
for total < float64(threshold) {
years++
total += total * float64(rate) / 100
}
return years
}
|
package models
import(
"encoding/json"
)
/**
* Type definition for Type30Enum enum
*/
type Type30Enum int
/**
* Value collection for Type30Enum enum
*/
const (
Type30_KRECOVERVMS Type30Enum = 1 + iota
Type30_KMOUNTVOLUMES
)
func (r Type30Enum) MarshalJSON() ([]byte, error) {
s := Type30EnumToValue(r)
return json.Marshal(s)
}
func (r *Type30Enum) UnmarshalJSON(data []byte) error {
var s string
json.Unmarshal(data, &s)
v := Type30EnumFromValue(s)
*r = v
return nil
}
/**
* Converts Type30Enum to its string representation
*/
func Type30EnumToValue(type30Enum Type30Enum) string {
switch type30Enum {
case Type30_KRECOVERVMS:
return "kRecoverVMs"
case Type30_KMOUNTVOLUMES:
return "kMountVolumes"
default:
return "kRecoverVMs"
}
}
/**
* Converts Type30Enum Array to its string Array representation
*/
func Type30EnumArrayToValue(type30Enum []Type30Enum) []string {
convArray := make([]string,len( type30Enum))
for i:=0; i<len(type30Enum);i++ {
convArray[i] = Type30EnumToValue(type30Enum[i])
}
return convArray
}
/**
* Converts given value to its enum representation
*/
func Type30EnumFromValue(value string) Type30Enum {
switch value {
case "kRecoverVMs":
return Type30_KRECOVERVMS
case "kMountVolumes":
return Type30_KMOUNTVOLUMES
default:
return Type30_KRECOVERVMS
}
}
|
package main
import (
"fmt"
)
func main() {
// var intArr = [...]int8 {10, 20, 30, 21}
// fmt.Printf("%p\n", &intArr[1])
// slice := intArr[1:3]
// fmt.Printf("%p\n", &slice[0])
// //fmt.Printf("%p\n", slice)
// fmt.Println(cap(slice))
// slice = append(slice, 22)
// slice = append(slice, 23)
// fmt.Println(cap(slice))
// // var ptr *int8
// // ptr = 0xc00004e059
// // var slice1 []int = make([]int, 4, 5)
// // fmt.Println(slice1)
// // fmt.Println(len(slice1))
// // fmt.Println(cap(slice1))
// // slice1 = append(slice1, 10)
// // fmt.Println(len(slice1))
// // fmt.Println(cap(slice1))
// var slice1 []int
// fmt.Printf("%p", slice1)
// slice1 = append(slice1, 10)
// fmt.Println(slice1)
// funcC := funcB(10, 20)
// sum := funcC(30, 40)
// fmt.Println(sum)
// var str = "hello"
// slice2 := str[0:]
// fmt.Printf("%T,%v,%p\n", slice2, len(slice2), &str)
// var slice3 []byte = []byte(str)
// slice3[0] = 'e'
// fmt.Printf("%T,%v,%p\n", slice3, len(slice3), &str)
// var slice4 []uint = make([]uint, 0)
// fmt.Printf("slice4==%p\n", slice4)
// fbn := fbn(10, slice4)
// fmt.Println("slice4==", slice4)
// fmt.Printf("%v", fbn)
var strSlice = []string {"a", "b", "c"}
for k, v := range strSlice {
fmt.Println(v, k)
}
}
var funcA = func (a, b int) (sum int){
sum = a + b
return
}
func funcB(a, b int) func (int, int) int {
return func(c int, d int) int{
fmt.Println(c, d)
return a + b
}
}
func fbn(n int, slice []uint) uint {
fmt.Printf("slice4==%p\n", slice)
if n == 1 || n == 2 {
slice = append(slice, 1)
return 1
}else {
slice = append(slice, fbn(n-1, slice) + fbn(n-2, slice))
return fbn(n-1, slice) + fbn(n-2, slice)
}
} |
package user
import (
"go.mongodb.org/mongo-driver/bson/primitive"
"yes-blog/graph/model"
"yes-blog/internal/model/post"
)
type User struct {
ID primitive.ObjectID `bson:"_id" json:"id,omitempty"`
Name string
Password string
Email string
Admin bool
Posts []*post.Post
}
func NewUser(name, password, email string) (*User, error) {
return newUser(name, password, email, false)
}
func NewAdmin(name, password, email string) (*User, error) {
return newUser(name, password, email, true)
}
func newUser(name, password, email string, admin bool) (*User, error) {
// hashing password
hashedPass, err := hashAndSalt([]byte(password))
if err != nil {
return nil, model.InternalServerException{Message: "internal server error: couldn't hash password"}
}
return &User{
Name: name,
Password: hashedPass,
Email: email,
Admin: admin,
Posts: []*post.Post{},
}, nil
}
func (u *User) AddPost(p *post.Post) *User {
u.Posts = append(u.Posts, p)
return u
}
func (u *User) DeletePost(id string) *User {
for i, p := range u.Posts {
if p.ID.Hex() == id {
u.Posts = append(u.Posts[:i], u.Posts[i+1:]...)
return u
}
}
return u
}
func (u *User) UpdatePassword(password string) error {
// hashing password
hashedPass, err := hashAndSalt([]byte(password))
if err != nil {
return model.InternalServerException{Message: "internal server error: couldn't hash password"}
}
u.Password = hashedPass
return nil
}
func (u *User) UpdateName(name string) {
u.Name = name
}
func (u *User) Promote() {
u.SetAdmin(true)
}
func (u *User) Demote() {
u.SetAdmin(false)
}
func (u *User)SetAdmin(state bool){
u.Admin=state
}
func (u *User) Verify(password string) bool {
return CheckPasswordHash(password, u.Password)
}
func (u *User) IsAdmin() bool {
return u.Admin
}
|
/*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains an object which encapsulates k8s clients which are useful for e2e tests.
package test
import (
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
// Allow E2E to run against a cluster using OpenID.
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
"sigs.k8s.io/gateway-api/pkg/client/clientset/versioned"
gatewayapi "sigs.k8s.io/gateway-api/pkg/client/clientset/versioned/typed/apis/v1beta1"
)
// Clients holds instances of interfaces for making requests to Knative Serving.
type Clients struct {
KubeClient kubernetes.Interface
GatewayAPIClient *GatewayAPIClients
Dynamic dynamic.Interface
}
// GatewayAPIClients holds instances of interfaces for making requests to Knative
// networking clients.
type GatewayAPIClients struct {
HTTPRoutes gatewayapi.HTTPRouteInterface
}
// NewClientsFromConfig instantiates and returns several clientsets required for making request to the
// Knative Serving cluster specified by the combination of clusterName and configPath. Clients can
// make requests within namespace.
func NewClientsFromConfig(cfg *rest.Config, namespace string) (*Clients, error) {
// We poll, so set our limits high.
cfg.QPS = 100
cfg.Burst = 200
var (
err error
clients Clients
)
clients.KubeClient, err = kubernetes.NewForConfig(cfg)
if err != nil {
return nil, err
}
clients.Dynamic, err = dynamic.NewForConfig(cfg)
if err != nil {
return nil, err
}
clients.GatewayAPIClient, err = newGatewayAPIClients(cfg, namespace)
if err != nil {
return nil, err
}
return &clients, nil
}
// newGatewayAPIClients instantiates and returns the gateway-api clientset required to make requests
// to gateway API resources on the Knative service cluster
func newGatewayAPIClients(cfg *rest.Config, namespace string) (*GatewayAPIClients, error) {
cs, err := versioned.NewForConfig(cfg)
if err != nil {
return nil, err
}
return &GatewayAPIClients{
HTTPRoutes: cs.GatewayV1beta1().HTTPRoutes(namespace),
}, nil
}
|
package main
import (
"bufio"
"io/ioutil"
"log"
"os"
"strings"
)
func readBytesFromFile() []byte {
targetSrcFile := os.Args[1]
content, err := ioutil.ReadFile(targetSrcFile)
if err != nil {
log.Fatal(err)
}
return content
}
func readBytesFromPipe() []byte {
reader := bufio.NewReader(os.Stdin)
content, err := ioutil.ReadAll(reader)
if err != nil {
log.Fatal(err)
}
return content
}
func readLines() []string {
var content []byte
if len(os.Args) > 1 {
content = readBytesFromFile()
} else {
content = readBytesFromPipe()
}
//fmt.Printf("File contents: %s", content)
lines := strings.Split(string(content), "\n")
return lines
}
|
// Package msgpack provides a codec for encoding and decoding data using msgpack.
package msgpack
import (
"github.com/achilleasa/usrv/encoding"
impl "gopkg.in/vmihailenco/msgpack.v2"
)
type msgpackCodec struct{}
func (c *msgpackCodec) Marshaler() encoding.Marshaler {
return func(v interface{}) ([]byte, error) {
return impl.Marshal(v)
}
}
func (c *msgpackCodec) Unmarshaler() encoding.Unmarshaler {
return func(data []byte, target interface{}) error {
return impl.Unmarshal(data, target)
}
}
// Codec returns a codec that implements encoding and decoding of data using msgpack.
func Codec() encoding.Codec {
return &msgpackCodec{}
}
|
package aws
import (
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector"
"github.com/cyberark/secretless-broker/pkg/secretless/plugin/connector/http"
)
// PluginInfo is required as part of the Secretless plugin spec. It provides
// important metadata about the plugin.
func PluginInfo() map[string]string {
return map[string]string{
"pluginAPIVersion": "0.1.0",
"type": "connector.http",
"id": "aws",
"description": "injects an HTTP request with AWS authorization headers",
}
}
// NewConnector returns an http.Connector that decorates each incoming http
// request with authorization data.
func NewConnector(conRes connector.Resources) http.Connector {
return &Connector{
logger: conRes.Logger(),
}
}
// GetHTTPPlugin is required as part of the Secretless plugin spec for HTTP
// connector plugins. It returns the HTTP plugin.
func GetHTTPPlugin() http.Plugin {
return http.NewConnectorFunc(NewConnector)
}
|
/*
@File: model.go
@Contact: lucien@lucien.ink
@Licence: (C)Copyright 2019 Lucien Shui
@Modify Time @Author @Version @Description
------------ ------- -------- -----------
2019-07-25 01:39 Lucien 1.0 Init
*/
package model
import (
"github.com/jinzhu/gorm"
_ "github.com/mattn/go-sqlite3"
)
var db *gorm.DB
func init() {
var err error
db, err = gorm.Open("sqlite3", "webhook.db")
if err != nil {
panic(err)
}
if !db.HasTable(&History{}) {
if err := db.CreateTable(&History{}).Error; err != nil {
panic(err)
}
}
}
|
package main
import (
"fmt"
"os"
"os/exec"
"strings"
)
// This is a meta-linter to help code quality high.
// It runs several linters and shows the result.
// It applies filters over the results because otherwise
// there's too much noise.
// List of possible linters: https://github.com/alecthomas/gometalinter#supported-linters
// To run: go run scripts\lint.go
func runCommandAndPrintResult(cmd *exec.Cmd, installCmd func() error) error {
d, err := cmd.CombinedOutput()
if err != nil && installCmd != nil && strings.Contains(err.Error(), "executable file not found") {
err2 := installCmd()
if err2 != nil {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running install command for tool '%s' failed with %s\n", tool, err2)
return err
}
// re-run command after installing it
cmd = exec.Command(cmd.Args[0], cmd.Args[1:]...)
d, err = cmd.CombinedOutput()
}
if len(d) > 0 {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("%s:\n%s\n", tool, d)
}
return err
}
func ignoreExitStatusError(err error) bool {
if err == nil {
return true
}
s := err.Error()
// many lint tools return exit code 1 or 2 to indicate they found errors
if strings.Contains(s, "exit status 1") {
return true
}
return strings.Contains(s, "exit status 2")
}
func ignoreError(err error) bool {
return err == nil
}
func goVet() {
cmd := exec.Command("go", "vet")
err := runCommandAndPrintResult(cmd, nil)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func goVetShadow() {
cmd := exec.Command("go", "tool", "vet", "-shadow", ".")
err := runCommandAndPrintResult(cmd, nil)
if !ignoreError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func deadcode() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/tsenart/deadcode")
return cmd.Run()
}
cmd := exec.Command("deadcode")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func varcheck() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "gitlab.com/opennota/check/cmd/varcheck")
return cmd.Run()
}
cmd := exec.Command("varcheck")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func structcheck() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "gitlab.com/opennota/check/cmd/structcheck")
return cmd.Run()
}
cmd := exec.Command("structcheck")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func aligncheck() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "gitlab.com/opennota/check/cmd/aligncheck")
return cmd.Run()
}
cmd := exec.Command("aligncheck")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func megacheck() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "honnef.co/go/tools/cmd/megacheck")
return cmd.Run()
}
cmd := exec.Command("megacheck")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func maligned() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/mdempsky/maligned")
return cmd.Run()
}
cmd := exec.Command("maligned")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func errcheck() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/kisielk/errcheck")
return cmd.Run()
}
cmd := exec.Command("errcheck")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func dupl() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/mibk/dupl")
return cmd.Run()
}
cmd := exec.Command("dupl")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func ineffassign() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/gordonklaus/ineffassign")
return cmd.Run()
}
cmd := exec.Command("ineffassign", ".")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func unconvert() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/mdempsky/unconvert")
return cmd.Run()
}
cmd := exec.Command("unconvert")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func goconst() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/jgautheron/goconst/cmd/goconst")
return cmd.Run()
}
cmd := exec.Command("goconst", "./...")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func misspell() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/client9/misspell/cmd/misspell")
return cmd.Run()
}
cmd := exec.Command("misspell", ".")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func nakedret() {
install := func() error {
cmd := exec.Command("go", "get", "-u", "github.com/alexkohler/nakedret")
return cmd.Run()
}
cmd := exec.Command("nakedret", ".")
err := runCommandAndPrintResult(cmd, install)
if !ignoreExitStatusError(err) {
tool := strings.Join(cmd.Args, " ")
fmt.Printf("Running %s failed with %s\n", tool, err)
}
}
func runToolByName(tool string) {
switch tool {
case "vet", "govet":
goVet()
case "vetshadow", "shadow":
goVetShadow()
case "dead", "deadcode", "unused":
deadcode()
case "var", "varcheck":
varcheck()
case "align", "aligncheck":
aligncheck()
case "struct", "structcheck":
structcheck()
case "mega", "megacheck":
megacheck()
case "err", "errcheck":
errcheck()
case "dupl", "duplicate":
dupl()
case "assign", "ineffasign":
ineffassign()
case "unconvert":
unconvert()
case "const", "goconst":
goconst()
case "spell", "misspell":
misspell()
case "ret", "nakedret":
nakedret()
default:
fmt.Printf("Unknown tool '%s'\n", tool)
os.Exit(1)
}
}
func main() {
// either run tools specified in command line or all of them
if len(os.Args) > 1 {
for _, tool := range os.Args[1:] {
runToolByName(tool)
}
return
}
goVet()
goVetShadow()
deadcode()
varcheck()
// struct check and maligned return the same info and for sake of
// easier porting we wouldn't act on their advice
// structcheck()
// maligned()
aligncheck()
megacheck()
errcheck()
// dupl() // too many results and mostly false positive
ineffassign()
unconvert()
// goconst() // not especially interesting
misspell()
nakedret()
// TODO: https://github.com/securego/gosec doesn't yet support
// code outside GOPATH
}
|
package main
import "fmt"
/*
Given an integer array, you need to find one continuous subarray that if you only sort this subarray in ascending order, then the whole array will be sorted in ascending order, too.
You need to find the shortest such subarray and output its length.
Example 1:
Input: [2, 6, 4, 8, 10, 9, 15]
Output: 5
Explanation: You need to sort [6, 4, 8, 10, 9] in ascending order to make the whole array sorted in ascending order.
Note:
Then length of the input array is in range [1, 10,000].
The input array may contain duplicates, so ascending order here means <=.
*/
func findUnsortedSubarray(nums []int) int {
if len(nums) <= 0 {return 0}
/*
算法:看最大值和最小值哪里开始变化的。变化部分就是需要排序部分
坑依然很多
testcase中正序,逆序,单个元素都需要考虑在内
*/
max := nums[0]-1
right := len(nums)
inorder := true
for i:=0;i<len(nums);i++ {
if max <= nums[i] { //一定是小于等于,否则对于相等元素情况无法通过
max = nums[i]
} else {
right=i
inorder=false
}
}
if inorder {return 0}
min := nums[len(nums)-1]
left := -1
for i:=len(nums)-1;i>=0;i-- {
if min >= nums[i] {
min=nums[i]
} else {
left=i
}
}
//fmt.Println(nums,left,right)
return right-left+1
}
func main() {
fmt.Println(findUnsortedSubarray([]int{2, 6, 4, 8, 10, 9, 15}))
fmt.Println(findUnsortedSubarray([]int{2,5,3,6}))
fmt.Println(findUnsortedSubarray([]int{1,3,2,4,5}))
fmt.Println(findUnsortedSubarray([]int{2,3}))
fmt.Println(findUnsortedSubarray([]int{2,1}))
fmt.Println(findUnsortedSubarray([]int{2}))
fmt.Println(findUnsortedSubarray([]int{1,2,3,3,3,2}))
}
|
package main
import (
"os"
"github.com/typical-go/typical-go/pkg/typgo"
)
var descriptor = typgo.Descriptor{
ProjectName: "custom-task",
ProjectVersion: "1.0.0",
Environment: typgo.Environment{
"key1": "value1",
"key2": "value2",
},
Tasks: []typgo.Tasker{
// compile
&typgo.GoBuild{},
// run
&typgo.RunBinary{
Before: typgo.TaskNames{"build"},
},
// ping
&typgo.Task{
Name: "ping",
Usage: "print pong",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("pong") // new action with golang implementation
return nil
}),
},
// info
&typgo.Task{
Name: "info",
Usage: "print info",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("print the info:")
c.ExecuteCommandLine("go version")
c.ExecuteCommandLine("git version")
c.Infof("ENV: key1=%s\n", os.Getenv("key1"))
return nil
}),
},
// help
&typgo.Task{
Name: "go-help",
Usage: "print go help",
Action: &typgo.Command{
Name: "go",
Args: []string{"help"},
Stdout: os.Stdout,
},
},
// multi-task
&typgo.Task{
Name: "multi-task",
Usage: "run multi-task",
Action: typgo.TaskNames{"ping", "info"},
},
// database
&typgo.Task{
Name: "database",
Aliases: []string{"db"},
Usage: "database tool",
SubTasks: []*typgo.Task{
{
Name: "create",
Usage: "create database",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("create database")
return nil
}),
},
{
Name: "drop",
Usage: "drop database",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("drop databse")
return nil
}),
},
{
Name: "migrate",
Usage: "migrate database",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("migrate databse")
return nil
}),
},
{
Name: "seed",
Usage: "seed database",
Action: typgo.NewAction(func(c *typgo.Context) error {
c.Info("seed databse")
return nil
}),
},
},
},
// greet
&greetTask{person: "john doe"},
},
}
type greetTask struct {
person string
}
var _ typgo.Tasker = (*greetTask)(nil)
var _ typgo.Action = (*greetTask)(nil)
func (g *greetTask) Task() *typgo.Task {
return &typgo.Task{
Name: "greet",
Usage: "greet person",
Action: g,
}
}
func (g *greetTask) Execute(c *typgo.Context) error {
c.Infof("Hello %s\n", g.person)
return nil
}
func main() {
typgo.Start(&descriptor)
}
|
package main
import "fmt"
/*
Given a singly linked list L: L0→L1→…→Ln-1→Ln,
reorder it to: L0→Ln→L1→Ln-1→L2→Ln-2→…
You may not modify the values in the list's nodes, only nodes itself may be changed.
Example 1:
Given 1->2->3->4, reorder it to 1->4->2->3.
Example 2:
Given 1->2->3->4->5, reorder it to 1->5->2->4->3.
*/
func main() {
l := &ListNode{
1,
&ListNode{
2,
&ListNode{
3,
&ListNode{
4,
&ListNode{
5,
nil,
},
},
},
},
}
printlist(l)
reorderList(l)
printlist(l)
}
func printlist(h *ListNode) {
ret := ""
for h!= nil {
ret += fmt.Sprintf("%d,",h.Val)
h = h.Next
}
fmt.Println(ret)
}
/**
* Definition for singly-linked list.
*/
type ListNode struct {
Val int
Next *ListNode
}
func reorderList(head *ListNode) {
l := listLen(head)
if l < 3 {return}
reorder(head,1,l)
}
func listLen(h *ListNode) int {
if h == nil {return 0}
return listLen(h.Next)+1
}
func reorder(h *ListNode, idx, n int) (*ListNode,*ListNode) {
/*
递归,第一个返回值是head自己,第二个是head子集合的后续节点
当剩余1个节点或者两个节点的时候处理返回
*/
if n%2==0 && idx==n/2 {
// 剩下两个元素,如何判断剩下几个?跟list长度相关
tl := h.Next.Next // 记录的后继
h.Next.Next = nil // 处理完了置空
return h,tl
}
if n%2==1 && idx==(n/2+1) {
// 剩下一个元素情况
tl := h.Next
h.Next=nil
return h,tl
}
nl,tl := reorder(h.Next,idx+1,n)
// 这是个从后往前处理的,后序遍历
tt := tl.Next // 记录后继的下一个
h.Next=tl // h的下一个是后继
h.Next.Next = nl // 后继的下一个h.Next 的递归recorder第一个结果
return h,tt // 返回h,和后继
}
|
package watcher
import (
"time"
"github.com/MagalixTechnologies/uuid-go"
)
const (
// DefaultEventsOrigin default origin when not specified
DefaultEventsOrigin = "watcher"
)
// Event structure
type Event struct {
ID interface{} `json:"id,omitempty" bson:"id,omitempty"`
Timestamp time.Time `json:"timestamp,omitempty"`
Entity string `json:"entity" bson:"entity"`
EntityID string `json:"entity_id,omitempty" bson:"entity_id,omitempty"`
AccountID uuid.UUID `json:"account_id" bson:"account_id"`
ClusterID uuid.UUID `json:"cluster_id" bson:"cluster_id"`
NodeID *uuid.UUID `json:"node_id" bson:"node_id"`
ApplicationID *uuid.UUID `json:"application_id,omitempty" bson:"application_id,omitempty"`
ServiceID *uuid.UUID `json:"service_id,omitempty" bson:"service_id,omitempty"`
ContainerID *uuid.UUID `json:"container_id,omitempty" bson:"container_id,omitempty"`
Kind string `json:"kind" bson:"kind"`
Value interface{} `json:"value,omitempty" bson:"value,omitempty"`
Origin string `json:"origin,omitempty" bson:"origin,omitempty"`
Source interface{} `json:"source,omitempty" bson:"source,omitempty"`
Meta interface{} `json:"meta,omitempty" bson:"meta,omitempty"`
}
// NewEvent creates a new event should be deprecated in favor of NewEventWithSource
func NewEvent(
timestamp time.Time,
identity Identity,
entity string,
entityID string,
kind string,
value interface{},
origin string,
) Event {
return NewEventWithSource(
timestamp, identity, entity, entityID, kind, value, origin, nil, nil,
)
}
// NewEventWithSource creates a new event
func NewEventWithSource(
timestamp time.Time,
identity Identity,
entity string,
entityID string,
kind string,
value interface{},
origin string,
source *ContainerStatusSource,
meta *string,
) Event {
var metaInterface interface{}
if meta != nil {
metaInterface = meta
}
event := Event{
ID: uuid.NewV4(),
Timestamp: timestamp,
Entity: entity,
EntityID: entityID,
AccountID: identity.AccountID,
ApplicationID: &identity.ApplicationID,
ServiceID: &identity.ServiceID,
Kind: kind,
Value: value,
Meta: metaInterface,
}
// json omitempty will not work if source is typed nil
if source != nil {
event.Source = source
}
return event
}
// Identity a struct to identify an entity
type Identity struct {
AccountID uuid.UUID
ApplicationID uuid.UUID
ServiceID uuid.UUID
}
|
package httpexpect
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestStringFailed(t *testing.T) {
chain := makeChain(newMockReporter(t))
chain.fail("fail")
value := &String{chain, ""}
value.Path("$").chain.assertFailed(t)
value.Schema("")
value.DateTime()
value.Empty()
value.NotEmpty()
value.Equal("")
value.NotEqual("")
value.EqualFold("")
value.NotEqualFold("")
value.Contains("")
value.NotContains("")
value.ContainsFold("")
value.NotContainsFold("")
}
func TestStringGetters(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "foo")
assert.Equal(t, "foo", value.Raw())
value.chain.assertOK(t)
value.chain.reset()
assert.Equal(t, "foo", value.Path("$").Raw())
value.chain.assertOK(t)
value.chain.reset()
value.Schema(`{"type": "string"}`)
value.chain.assertOK(t)
value.chain.reset()
value.Schema(`{"type": "object"}`)
value.chain.assertFailed(t)
value.chain.reset()
}
func TestStringEmpty(t *testing.T) {
reporter := newMockReporter(t)
value1 := NewString(reporter, "")
value1.Empty()
value1.chain.assertOK(t)
value1.chain.reset()
value1.NotEmpty()
value1.chain.assertFailed(t)
value1.chain.reset()
value2 := NewString(reporter, "a")
value2.Empty()
value2.chain.assertFailed(t)
value2.chain.reset()
value2.NotEmpty()
value2.chain.assertOK(t)
value2.chain.reset()
}
func TestStringEqual(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "foo")
assert.Equal(t, "foo", value.Raw())
value.Equal("foo")
value.chain.assertOK(t)
value.chain.reset()
value.Equal("FOO")
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqual("FOO")
value.chain.assertOK(t)
value.chain.reset()
value.NotEqual("foo")
value.chain.assertFailed(t)
value.chain.reset()
}
func TestStringEqualFold(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "foo")
value.EqualFold("foo")
value.chain.assertOK(t)
value.chain.reset()
value.EqualFold("FOO")
value.chain.assertOK(t)
value.chain.reset()
value.EqualFold("foo2")
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualFold("foo")
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualFold("FOO")
value.chain.assertFailed(t)
value.chain.reset()
value.NotEqualFold("foo2")
value.chain.assertOK(t)
value.chain.reset()
}
func TestStringContains(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "11-foo-22")
value.Contains("foo")
value.chain.assertOK(t)
value.chain.reset()
value.Contains("FOO")
value.chain.assertFailed(t)
value.chain.reset()
value.NotContains("FOO")
value.chain.assertOK(t)
value.chain.reset()
value.NotContains("foo")
value.chain.assertFailed(t)
value.chain.reset()
}
func TestStringContainsFold(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "11-foo-22")
value.ContainsFold("foo")
value.chain.assertOK(t)
value.chain.reset()
value.ContainsFold("FOO")
value.chain.assertOK(t)
value.chain.reset()
value.ContainsFold("foo3")
value.chain.assertFailed(t)
value.chain.reset()
value.NotContainsFold("foo")
value.chain.assertFailed(t)
value.chain.reset()
value.NotContainsFold("FOO")
value.chain.assertFailed(t)
value.chain.reset()
value.NotContainsFold("foo3")
value.chain.assertOK(t)
value.chain.reset()
}
func TestStringLength(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "1234567")
num := value.Length()
value.chain.assertOK(t)
num.chain.assertOK(t)
assert.Equal(t, 7.0, num.Raw())
}
func TestStringDateTime(t *testing.T) {
reporter := newMockReporter(t)
value1 := NewString(reporter, "Tue, 15 Nov 1994 08:12:31 GMT")
dt1 := value1.DateTime()
value1.chain.assertOK(t)
dt1.chain.assertOK(t)
assert.True(t, time.Date(1994, 11, 15, 8, 12, 31, 0, time.UTC).Equal(dt1.Raw()))
value2 := NewString(reporter, "15 Nov 94 08:12 GMT")
dt2 := value2.DateTime(time.RFC822)
value2.chain.assertOK(t)
dt2.chain.assertOK(t)
assert.True(t, time.Date(1994, 11, 15, 8, 12, 0, 0, time.UTC).Equal(dt2.Raw()))
value3 := NewString(reporter, "bad")
dt3 := value3.DateTime()
value3.chain.assertFailed(t)
dt3.chain.assertFailed(t)
assert.True(t, time.Unix(0, 0).Equal(dt3.Raw()))
}
func TestStringMatchOne(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "http://example.com/users/john")
m1 := value.Match(`http://(?P<host>.+)/users/(?P<user>.+)`)
m1.chain.assertOK(t)
assert.Equal(t,
[]string{"http://example.com/users/john", "example.com", "john"},
m1.submatches)
m2 := value.Match(`http://(.+)/users/(.+)`)
m2.chain.assertOK(t)
assert.Equal(t,
[]string{"http://example.com/users/john", "example.com", "john"},
m2.submatches)
}
func TestStringMatchAll(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter,
"http://example.com/users/john http://example.com/users/bob")
m := value.MatchAll(`http://(\S+)/users/(\S+)`)
assert.Equal(t, 2, len(m))
m[0].chain.assertOK(t)
m[1].chain.assertOK(t)
assert.Equal(t,
[]string{"http://example.com/users/john", "example.com", "john"},
m[0].submatches)
assert.Equal(t,
[]string{"http://example.com/users/bob", "example.com", "bob"},
m[1].submatches)
}
func TestStringMatchStatus(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "a")
value.Match(`a`)
value.chain.assertOK(t)
value.chain.reset()
value.MatchAll(`a`)
value.chain.assertOK(t)
value.chain.reset()
value.NotMatch(`a`)
value.chain.assertFailed(t)
value.chain.reset()
value.Match(`[^a]`)
value.chain.assertFailed(t)
value.chain.reset()
value.MatchAll(`[^a]`)
value.chain.assertFailed(t)
value.chain.reset()
value.NotMatch(`[^a]`)
value.chain.assertOK(t)
value.chain.reset()
assert.Equal(t, []string{}, value.Match(`[^a]`).submatches)
assert.Equal(t, []Match{}, value.MatchAll(`[^a]`))
}
func TestStringMatchInvalid(t *testing.T) {
reporter := newMockReporter(t)
value := NewString(reporter, "a")
value.Match(`[`)
value.chain.assertFailed(t)
value.chain.reset()
value.MatchAll(`[`)
value.chain.assertFailed(t)
value.chain.reset()
value.NotMatch(`[`)
value.chain.assertFailed(t)
value.chain.reset()
}
|
/*
Copyright 2019 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package stats
import (
"context"
"fmt"
"io"
"math"
"reflect"
"sort"
"sync"
"time"
"github.com/GoogleCloudPlatform/cloud-ingest/agent/common"
"github.com/GoogleCloudPlatform/cloud-ingest/agent/stats/throughput"
"github.com/golang/glog"
taskpb "github.com/GoogleCloudPlatform/cloud-ingest/proto/task_go_proto"
)
const (
statsDisplayFreq = 1 * time.Second // The frequency of displaying stats to stdout.
accumulatorFreq = 1 * time.Second // The frequency of accumulating bytes copied.
)
var (
displayTickerMaker = func() common.Ticker {
return common.NewClockTicker(statsDisplayFreq)
}
accumulatorTickerMaker = func() common.Ticker {
return common.NewClockTicker(accumulatorFreq)
}
)
type taskDur struct {
task string
dur time.Duration
}
type lifetimeStats struct {
PulseStats // Embedded struct.
taskDone map[string]uint64
ctrlMsgTime time.Time
bwLimit int64
}
// PulseStats contains stats which are sent with each Agent pulse message.
type PulseStats struct {
CopyBytes int64
ListBytes int64
CopyOpenMs int64
CopyStatMs int64
CopySeekMs int64
CopyReadMs int64
CopyWriteMs int64
CopyInternalRetries int64
DeleteInternalRetries int64
ListDirOpenMs int64
ListDirReadMs int64
ListFileWriteMs int64
ListDirWriteMs int64
}
func (ps1 *PulseStats) add(ps2 *PulseStats) {
ps1v := reflect.ValueOf(ps1).Elem()
ps2v := reflect.ValueOf(ps2).Elem()
for i := 0; i < ps1v.NumField(); i++ {
ps1v.Field(i).SetInt(ps1v.Field(i).Int() + ps2v.Field(i).Int())
}
}
func (ps1 *PulseStats) sub(ps2 *PulseStats) {
ps1v := reflect.ValueOf(ps1).Elem()
ps2v := reflect.ValueOf(ps2).Elem()
for i := 0; i < ps1v.NumField(); i++ {
ps1v.Field(i).SetInt(ps1v.Field(i).Int() - ps2v.Field(i).Int())
}
}
// Tracker collects stats about the Agent and provides a display to STDOUT.
// Stats are collected by calling the various Record* functions as appropriate.
type Tracker struct {
taskDoneChan chan string // Channel to record task completions.
bwLimitChan chan int64 // Channel to record the bandwidth limit.
ctrlMsgChan chan time.Time // Channel to record control message timing.
lifetime lifetimeStats // Cumulative for the lifetime of this procces.
tpTracker *throughput.Tracker // Measures outgoing copy throughput.
spinnerIdx int // For displaying the mighty spinner.
// For managing accumulated pulse stats.
pulseStatsMu sync.Mutex
pulseStatsChan chan *PulseStats
currPulseStats PulseStats
prevPulseStats PulseStats
// Testing hooks.
selectDone func()
displayTicker common.Ticker
accumulatorTicker common.Ticker
}
// NewTracker returns a new Tracker, which can then be used to record stats.
func NewTracker(ctx context.Context) *Tracker {
t := &Tracker{
// Large buffers to avoid blocking.
taskDoneChan: make(chan string, 100),
bwLimitChan: make(chan int64, 10),
ctrlMsgChan: make(chan time.Time, 10),
lifetime: lifetimeStats{
taskDone: map[string]uint64{"copy": 0, "list": 0},
ctrlMsgTime: time.Now(),
bwLimit: math.MaxInt32,
},
pulseStatsChan: make(chan *PulseStats, 100),
tpTracker: throughput.NewTracker(ctx),
selectDone: func() {},
displayTicker: displayTickerMaker(),
accumulatorTicker: accumulatorTickerMaker(),
}
go t.track(ctx)
return t
}
// AccumulatedPulseStats returns the PulseStats since the last time this function was called.
// This function is *NOT* idempotent, as calling it resets the underlying PulseStats.
func (t *Tracker) AccumulatedPulseStats() *PulseStats {
if t == nil {
return &PulseStats{}
}
t.pulseStatsMu.Lock()
defer t.pulseStatsMu.Unlock()
d := t.currPulseStats
t.currPulseStats = PulseStats{}
return &d
}
// RecordTaskResp tracks the count of completed tasks. Takes no action for a nil receiver.
func (t *Tracker) RecordTaskResp(resp *taskpb.TaskRespMsg) {
if t == nil {
return
}
task := ""
if resp.ReqSpec.GetCopySpec() != nil {
task = "copy"
} else if resp.ReqSpec.GetListSpec() != nil {
task = "list"
} else if resp.ReqSpec.GetCopyBundleSpec() != nil {
task = "copy"
} else {
glog.Errorf("resp.ReqSpec doesn't match any known spec type: %v", resp.ReqSpec)
}
if task != "" {
t.taskDoneChan <- task // Record the task completion.
}
}
// CopyByteTrackingReader is an io.Reader that wraps another io.Reader and
// performs byte tracking during the Read function.
type CopyByteTrackingReader struct {
reader io.Reader
tracker *Tracker
}
// NewCopyByteTrackingReader returns a CopyByteTrackingReader.
// Returns the passed in reader for a nil receiver.
func (t *Tracker) NewCopyByteTrackingReader(r io.Reader) io.Reader {
if t == nil {
return r
}
return &CopyByteTrackingReader{reader: r, tracker: t}
}
// Read implements the io.Reader interface.
func (cbtr *CopyByteTrackingReader) Read(buf []byte) (n int, err error) {
start := time.Now()
n, err = cbtr.reader.Read(buf)
cbtr.tracker.pulseStatsChan <- &PulseStats{
CopyReadMs: DurMs(start),
CopyBytes: int64(n),
}
cbtr.tracker.tpTracker.RecordBytesSent(int64(n))
return n, err
}
// TimingReader is an io.Reader that wraps another io.Reader and
// tracks the total duration of the Read calls.
type TimingReader struct {
reader io.Reader
readDur time.Duration
}
// NewTimingReader returns a TimingReader.
func NewTimingReader(r io.Reader) *TimingReader {
return &TimingReader{reader: r}
}
// Read implements the io.Reader interface.
func (tr *TimingReader) Read(buf []byte) (n int, err error) {
start := time.Now()
n, err = tr.reader.Read(buf)
tr.readDur += time.Now().Sub(start)
return n, err
}
// ReadDur returns the total duration of this reader's Read calls.
func (tr *TimingReader) ReadDur() time.Duration {
return tr.readDur
}
// ListByteTrackingWriter is an io.Writer that wraps another io.Writer and
// performs byte tracking during the Write function.
type ListByteTrackingWriter struct {
writer io.Writer
tracker *Tracker
file bool
}
// NewListByteTrackingWriter returns a ListByteTrackingWriter. If 'file' is true, timing stats
// will be written for ListFileWriteMs. If false, timing stats will be written for ListDirWriteMs.
// Returns the passed in writer for a nil receiver.
func (t *Tracker) NewListByteTrackingWriter(w io.Writer, file bool) io.Writer {
if t == nil {
return w
}
return &ListByteTrackingWriter{writer: w, tracker: t, file: file}
}
// Write implements the io.Writer interface.
func (lbtw *ListByteTrackingWriter) Write(p []byte) (n int, err error) {
start := time.Now()
n, err = lbtw.writer.Write(p)
ps := &PulseStats{ListBytes: int64(n)}
if lbtw.file {
ps.ListFileWriteMs = DurMs(start)
} else {
ps.ListDirWriteMs = DurMs(start)
}
lbtw.tracker.pulseStatsChan <- ps
return n, err
}
// RecordPulseStats tracks stats contained within 'ps'.
// Takes no action for a nil receiver.
func (t *Tracker) RecordPulseStats(ps *PulseStats) {
if t == nil {
return
}
t.pulseStatsChan <- ps
}
// DurMs returns the duration in millis between time.Now() and 'start'.
func DurMs(start time.Time) int64 {
return time.Now().Sub(start).Nanoseconds() / 1000000
}
// RecordBWLimit tracks the current bandwidth limit.
// Takes no action for a nil receiver.
func (t *Tracker) RecordBWLimit(agentBW int64) {
if t == nil {
return
}
t.bwLimitChan <- agentBW
}
// RecordCtrlMsg tracks received control messages.
// Takes no action for a nil receiver.
func (t *Tracker) RecordCtrlMsg(time time.Time) {
if t == nil {
return
}
t.ctrlMsgChan <- time
}
func (t *Tracker) track(ctx context.Context) {
for {
select {
case <-ctx.Done():
if err := ctx.Err(); err != nil {
glog.Infof("stats.Tracker track ctx ended with err: %v", err)
}
return
case task := <-t.taskDoneChan:
t.lifetime.taskDone[task]++
case ps := <-t.pulseStatsChan:
t.lifetime.PulseStats.add(ps)
case agentBW := <-t.bwLimitChan:
t.lifetime.bwLimit = agentBW
case time := <-t.ctrlMsgChan:
t.lifetime.ctrlMsgTime = time
case <-t.displayTicker.GetChannel():
t.displayStats()
case <-t.accumulatorTicker.GetChannel():
t.accumulatePulseStats()
}
t.selectDone() // Testing hook.
}
}
func (t *Tracker) accumulatePulseStats() {
t.pulseStatsMu.Lock()
defer t.pulseStatsMu.Unlock()
t.currPulseStats.add(&t.lifetime.PulseStats)
t.currPulseStats.sub(&t.prevPulseStats)
t.prevPulseStats = t.lifetime.PulseStats
}
func (t *Tracker) displayStats() string {
// Generate the transmission rate and sum.
txRate := fmt.Sprintf("txRate:%v/s", byteCountBinary(t.tpTracker.Throughput(), 7))
if txLim := t.lifetime.bwLimit; txLim > 0 && txLim < math.MaxInt32 {
txRate += fmt.Sprintf(" (capped at %v/s)", byteCountBinary(t.lifetime.bwLimit, 7))
}
txSum := fmt.Sprintf("txSum:%v", byteCountBinary(t.lifetime.CopyBytes, 7))
// Generate the task response counts.
taskResps := "taskResps["
var keys []string
for k := range t.lifetime.taskDone {
keys = append(keys, k)
}
sort.Strings(keys)
for i, k := range keys {
if i > 0 {
taskResps += " "
}
taskResps += fmt.Sprintf("%v:%v", k, t.lifetime.taskDone[k])
}
taskResps += "]"
// Generate the control message age and status.
ctrlMsgAge := "-"
ctrlMsgHealth := "-"
if !t.lifetime.ctrlMsgTime.IsZero() {
age := time.Now().Sub(t.lifetime.ctrlMsgTime).Truncate(time.Second)
ctrlMsgAge = fmt.Sprintf("%v", age)
ctrlMsgHealth = "ok"
if age > 30*time.Second {
ctrlMsgHealth = "??"
}
}
ctrlMsg := fmt.Sprintf("ctrlMsgAge:%v (%v)", ctrlMsgAge, ctrlMsgHealth)
// Generate the spinner.
spinnerChars := `-\|/`
t.spinnerIdx = (t.spinnerIdx + 1) % len(spinnerChars)
spinner := spinnerChars[t.spinnerIdx]
// Display the generated stats.
// TODO(b/123023481): Ensure the Agent display works on Windows.
fmt.Printf("\r%120s\r", "") // Overwrite the previous line and reset the cursor.
displayLine := fmt.Sprintf("%v %v %v %v %c", txRate, txSum, taskResps, ctrlMsg, spinner)
fmt.Print(displayLine)
return displayLine // For testing.
}
func byteCountBinary(b int64, pad int) string {
const unit = 1024
if b < unit {
return fmt.Sprintf("%*dB", pad, b)
}
div, exp := int64(unit), 0
for n := b / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%*.1f%ciB", pad-2, float64(b)/float64(div), "KMGTPE"[exp])
}
|
package models
import (
"encoding/json"
// "github.com/elgs/gosqljson"
"log"
// "strings"
)
type MerchantCategory struct {
Id int64 `form:"id"`
Merchant_category_name string `form:"merchant_category_name"`
Slug string `form:"slug"`
Create_at int64 `form:"create_at"`
Update_at int64 `form:"update_at"`
}
func ListAllCategoriesMerchant(service_name string) (string, string, error) {
merchantCat_table := service_name + "_merchant_category"
ConnectDb()
SQL_SELECT_merchant_category := "SELECT * FROM " + merchantCat_table
rows, err := DB.Query(SQL_SELECT_merchant_category)
if err != nil {
return "", "err", err
}
var merchantCat MerchantCategory
merchants_cat := make([]*MerchantCategory, 0, 17)
for rows.Next() {
err := rows.Scan(&merchantCat.Id, &merchantCat.Merchant_category_name, &merchantCat.Slug, &merchantCat.Create_at, &merchantCat.Update_at)
if err != nil {
return "", "err", err
}
merchants_cat = append(merchants_cat, &MerchantCategory{merchantCat.Id, merchantCat.Merchant_category_name, merchantCat.Slug, merchantCat.Create_at, merchantCat.Update_at})
}
log.Println(merchants_cat)
s, _ := json.Marshal(merchants_cat)
res_merchants_cat := string(s)
return res_merchants_cat, "success", err
}
func (catmerchant *MerchantCategory) Save(service_name string) (string, error) {
merchantCat_table := service_name + "_merchant_category"
ConnectDb()
tx, err := DB.Begin()
if err != nil {
return "err", err
}
SQL_INSERT_merchantCat_table := `INSERT INTO ` + merchantCat_table + `
(merchant_category_name, slug, create_at, update_at)
VALUES (?, ?, ?, ?)
`
_, err1 := tx.Exec(SQL_INSERT_merchantCat_table, catmerchant.Merchant_category_name, catmerchant.Slug, catmerchant.Create_at, catmerchant.Update_at)
if err1 != nil {
tx.Rollback()
return "err", err1
}
tx.Commit()
defer CloseDb()
return "success", nil
}
func (catmerchant *MerchantCategory) Update(service_name string) (string, error) {
merchantCat_table := service_name + "_merchant_category"
ConnectDb()
tx, err := DB.Begin()
if err != nil {
return "err", err
}
UPDATE_OFFER_CAT := `UPDATE ` + merchantCat_table + ` SET
merchant_category_name=? , slug=?, update_at=?
WHERE id=?`
_, err1 := tx.Exec(UPDATE_OFFER_CAT, catmerchant.Merchant_category_name, catmerchant.Slug, catmerchant.Update_at, catmerchant.Id)
if err1 != nil {
tx.Rollback()
return "err", err1
}
tx.Commit()
defer CloseDb()
return "success", nil
}
func (catmerchant *MerchantCategory) Delete(service_name string) (string, error) {
merchantCat_table := service_name + "_merchant_category"
ConnectDb()
tx, err := DB.Begin()
if err != nil {
return "err", err
}
SQL_DELETE_OFFERCAT := "DELETE FROM " + merchantCat_table + " WHERE id=?"
_, err = tx.Exec(SQL_DELETE_OFFERCAT, catmerchant.Id)
log.Println(err)
if err != nil {
tx.Rollback()
return "err", err
}
tx.Commit()
defer CloseDb()
return "success", nil
}
|
/**
顺序栈实现游览器前进,后退功能
*/
package stack
import "fmt"
const MAXNUM = 2
func main() {
sb := newStackBrower()
sb.visit("a")
sb.visit("b")
// sb.visit("c")
sb.backward()
sb.backward()
sb.foreward()
sb.visit("d")
}
func newStackBrower() *stackBrowser {
return &stackBrowser{
sfore: newStackA("fore", MAXNUM),
sback: newStackA("back", MAXNUM),
}
}
func newStackA(name string, num int) *stackA {
return &stackA{
name: name,
num: num,
cnt: 0,
arr: make([]string, num),
}
}
type stackBrowser struct {
sfore *stackA
sback *stackA
}
func (sb *stackBrowser) visit(x string) {
fmt.Printf("visit page[%s]\n", x)
sb.sback.clear()
sb.sfore.push(x)
fmt.Printf("%s\n", sb.sfore)
fmt.Printf("%s\n", sb.sback)
fmt.Println()
}
func (sb *stackBrowser) backward() {
fmt.Println("backward page")
x := sb.sfore.pop()
sb.sback.push(x)
fmt.Printf("%s\n", sb.sfore)
fmt.Printf("%s\n", sb.sback)
fmt.Println()
}
func (sb *stackBrowser) foreward() {
fmt.Println("foreward page")
x := sb.sback.pop()
sb.sfore.push(x)
fmt.Printf("%s\n", sb.sfore)
fmt.Printf("%s\n", sb.sback)
fmt.Println()
}
type stackA struct {
name string
num int
cnt int
arr []string
}
func (sa *stackA) String() string {
var s string
s = fmt.Sprintf("[%sstack bottom] ", sa.name)
for i := 0; i < sa.cnt; i++ {
s += fmt.Sprintf("[%s] ", sa.arr[i])
}
s += " top"
return s
}
func (sa *stackA) clear() {
sa.cnt = 0
}
func (sa *stackA) push(x string) {
if sa.cnt == sa.num {
panic("brower max page")
}
sa.arr[sa.cnt] = x
sa.cnt += 1
}
func (sa *stackA) pop() string {
if sa.cnt == 0 {
panic("brower no page")
}
x := sa.arr[sa.cnt-1]
sa.cnt -= 1
return x
}
|
package main
import "fmt"
func main() {
s := []int{9, 2, 3, 1, 4, 7, 5, 6, 0, 8}
res := make([]int, len(s))
mergeSortRecursive(s, res, 0, len(s)-1)
}
func mergeSortRecursive(s, result []int, left, right int) {
if left == right {
return
}
middle := (left + right) / 2
mergeSortRecursive(s, result, left, middle)
mergeSortRecursive(s, result, middle+1, right)
merge()
}
func merge(s, result, leftBegin, rightBegin, rightEnd): {
fmt.Println("MERGE NOT IMPLEMENTED")
}
|
package main
type Todo struct {
Id int `json:"id"`
Title string `json:"title"`
Position int `json:"position"`
Complete bool `json:"complete"`
}
|
package abkzeromq
import (
"fmt"
"strings"
zmq "github.com/alecthomas/gozmq"
)
type READ func(key string) string
type PUSH func(key string, val string) bool
func ZmqRep(req_port int, rep_port int, read READ, push PUSH) {
context, _ := zmq.NewContext()
socket, _ := context.NewSocket(zmq.REP)
socket.Bind(fmt.Sprintf("tcp://127.0.0.1:%d", req_port))
socket.Bind(fmt.Sprintf("tcp://127.0.0.1:%d", rep_port))
fmt.Printf("ZMQ REQ/REP Daemon at port %d and %d\n", req_port, rep_port)
for {
msg, _ := socket.Recv(0)
fmt.Println("Got:", string(msg))
msg_arr := strings.Fields(string(msg))
if len(msg_arr) == 1 {
read(msg_arr[0])
} else if len(msg_arr) == 2 {
push(msg_arr[0], msg_arr[1])
}
socket.Send(msg, 0)
}
}
func ZmqReq(req_port int, rep_port int, dat ...string) {
fmt.Printf("ZMQ REQ/REP Client at port %d and %d\n", req_port, rep_port)
context, _ := zmq.NewContext()
socket, _ := context.NewSocket(zmq.REQ)
socket.Connect(fmt.Sprintf("tcp://127.0.0.1:%d", req_port))
socket.Connect(fmt.Sprintf("tcp://127.0.0.1:%d", rep_port))
var msg string
msg = strings.Join(dat, " ")
socket.Send([]byte(msg), 0)
fmt.Printf("msg: %s\n", msg)
socket.Recv(0)
}
|
package coinzilla
import (
"encoding/json"
"fmt"
"net/http"
"github.com/prebid/openrtb/v19/openrtb2"
"github.com/prebid/prebid-server/config"
"github.com/prebid/prebid-server/openrtb_ext"
"github.com/prebid/prebid-server/adapters"
"github.com/prebid/prebid-server/errortypes"
)
func Builder(bidderName openrtb_ext.BidderName, config config.Adapter, server config.Server) (adapters.Bidder, error) {
bidder := &adapter{
endpoint: config.Endpoint,
}
return bidder, nil
}
type adapter struct {
endpoint string
}
func (adapter *adapter) MakeRequests(openRTBRequest *openrtb2.BidRequest, reqInfo *adapters.ExtraRequestInfo) (requestsToBidder []*adapters.RequestData, errs []error) {
if len(openRTBRequest.Imp) == 0 {
return nil, []error{&errortypes.BadInput{
Message: "No impression in the bid request",
}}
}
openRTBRequestJSON, err := json.Marshal(openRTBRequest)
if err != nil {
errs = append(errs, err)
return nil, errs
}
headers := http.Header{}
headers.Add("Content-Type", "application/json;charset=utf-8")
headers.Add("Accept", "application/json")
headers.Add("x-openrtb-version", "2.5")
requestToBidder := &adapters.RequestData{
Method: "POST",
Uri: adapter.endpoint,
Body: openRTBRequestJSON,
Headers: headers,
}
requestsToBidder = append(requestsToBidder, requestToBidder)
return requestsToBidder, errs
}
func (adapter *adapter) MakeBids(openRTBRequest *openrtb2.BidRequest, requestToBidder *adapters.RequestData, bidderRawResponse *adapters.ResponseData) (bidderResponse *adapters.BidderResponse, errs []error) {
switch bidderRawResponse.StatusCode {
case http.StatusOK:
break
case http.StatusNoContent:
return nil, nil
default:
err := &errortypes.BadServerResponse{
Message: fmt.Sprintf("Unexpected code: %d. Run with request.debug = 1", bidderRawResponse.StatusCode),
}
return nil, []error{err}
}
var openRTBBidderResponse openrtb2.BidResponse
if err := json.Unmarshal(bidderRawResponse.Body, &openRTBBidderResponse); err != nil {
return nil, []error{err}
}
bidsCapacity := len(openRTBBidderResponse.SeatBid[0].Bid)
bidderResponse = adapters.NewBidderResponseWithBidsCapacity(bidsCapacity)
bidderResponse.Currency = openRTBBidderResponse.Cur
var typedBid *adapters.TypedBid
for _, seatBid := range openRTBBidderResponse.SeatBid {
for i := range seatBid.Bid {
typedBid = &adapters.TypedBid{Bid: &seatBid.Bid[i], BidType: "banner"}
bidderResponse.Bids = append(bidderResponse.Bids, typedBid)
}
}
return bidderResponse, nil
}
|
package main
import (
"bufio"
"encoding/json"
"fmt"
"log"
"net/http"
"net/url"
"regexp"
"runtime"
"strings"
"time"
"github.com/iand/feedparser"
"github.com/koding/cache"
"html/template"
"sevki.org/lib/prettyprint"
)
var (
httpCache *cache.MemoryTTL
)
func init() {
httpCache = cache.NewMemoryWithTTL(time.Minute)
}
var funcMap = template.FuncMap{
"gover": gover,
"now": now,
"uptime": uptime,
"atom": getAtom,
"json": getJson,
"jsonxsfr": getJsonXSFR,
"contains": contains,
"isrepeated": isRepeated,
"lessthan": lessthan,
"multiply": multiply,
"add": add,
"jsondate": jsondate,
"linkify": linkify,
"ppJson": prettyprint.AsJSON,
"regexMatch": regexMatch,
}
func regexMatch(a, b string) string {
if re, err := regexp.Compile(a); err != nil {
return err.Error()
} else {
return re.FindString(b)
}
}
func jsondate(a string) time.Time {
if t, err := time.Parse(time.RFC3339, a); err != nil {
return time.Now()
} else {
return t
}
}
func lessthan(a, b float64) bool { return a < b }
func add(a, b float64) float64 { return a + b }
func multiply(a, b float64) float64 { return a * b }
func uptime() string {
return time.Since(start).String()
}
func renderTime() string {
return ""
}
func gover() string {
return runtime.Version()
}
func now() time.Time {
return time.Now()
}
func contains(a []interface{}, b interface{}) bool {
for _, i := range a {
if i == b {
return true
}
}
return false
}
func isRepeated(a ...string) bool {
for _, b := range a[1:] {
if a[0] == b {
return true
}
}
return false
}
func getAtom(url string) interface{} {
if val, _ := httpCache.Get(url); val != nil {
return val
}
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
return nil
}
feed, _ := feedparser.NewFeed(resp.Body)
resp.Body.Close()
httpCache.Set(url, feed.Items)
return feed.Items
}
func getJsonWithXSFR(url string) interface{} {
if val, _ := httpCache.Get(url); val != nil {
return val
}
resp, err := http.Get(url)
if err != nil {
log.Fatal(err)
return nil
}
defer resp.Body.Close()
// The JSON response begins with an XSRF-defeating header
// like ")]}\n". Read that and skip it.
br := bufio.NewReader(resp.Body)
if _, err := br.ReadSlice('\n'); err != nil {
return err
}
dec := json.NewDecoder(br)
var result interface{}
err = dec.Decode(&result)
if err != nil {
log.Fatal(err)
return nil
} else {
httpCache.Set(url, result)
return result
}
}
func getJson(url string) interface{} {
if val, _ := httpCache.Get(url); val != nil {
return val
}
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
return nil
}
dec := json.NewDecoder(resp.Body)
var result interface{}
err = dec.Decode(&result)
if err != nil {
log.Fatal(err)
return nil
} else {
httpCache.Set(url, result)
return result
}
}
func getJsonXSFR(url string) interface{} {
if val, _ := httpCache.Get(url); val != nil {
return val
}
resp, err := http.Get(url)
defer resp.Body.Close()
if err != nil {
log.Fatal(err)
return nil
}
br := bufio.NewReader(resp.Body)
// For security reasons or something, this URL starts with ")]}'\n" before
// the JSON object. So ignore that.
// Shawn Pearce says it's guaranteed to always be just one line, ending in '\n'.
for {
b, err := br.ReadByte()
if err != nil {
return nil
}
if b == '\n' {
break
}
}
dec := json.NewDecoder(br)
var result interface{}
err = dec.Decode(&result)
if err != nil {
log.Fatal(err)
return nil
} else {
httpCache.Set(url, result)
return result
}
}
func linkify(s string) template.HTML {
ss := strings.Split(s, " ")
var sn []string
for _, x := range ss {
u, err := url.Parse(x)
if err == nil && u.IsAbs() {
sn = append(sn,
fmt.Sprintf("<a href=\"%s\">%s</a>",
x,
strings.TrimLeft(x, u.Scheme+"://")))
} else {
sn = append(sn, x)
}
}
return template.HTML(strings.Join(sn, " "))
}
|
package main
import "fmt"
var x int
func main() {
x = 10
fmt.Printf("%v, %T", x, x)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.