text stringlengths 11 4.05M |
|---|
//+build wireinject
package os
import (
"github.com/google/wire"
"github.com/raba-jp/primus/pkg/backend"
"github.com/raba-jp/primus/pkg/operations/os/starlarkfn"
"github.com/raba-jp/primus/pkg/starlark"
)
func IsDarwin() starlark.Fn {
wire.Build(
backend.NewExecInterface,
starlarkfn.IsDarwin,
)
return nil
}
func IsArchLinux() starlark.Fn {
wire.Build(
backend.NewFs,
starlarkfn.IsArchLinux,
)
return nil
}
|
package gobucket
import (
"fmt"
"net/http"
"reflect"
"testing"
)
func TestPullRequestsService_GetAll(t *testing.T) {
setUp()
defer tearDown()
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/", func(w http.ResponseWriter, r *http.Request) {
if m := "GET"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
if r.URL.Query().Get("page") == "1" {
fmt.Fprint(w, `{
"pagelen": 10,
"next": "https://bitbucket.org/api/2.0/repositories/batman/?page=2",
"values": [
{
"title": "Recognition / GenericBundle"
},
{
"title": "Recognition / ResellerBundle"
},
{
"title": "Recognition / CatalogueBundle"
},
{
"title": "Recognition / Recognition Service"
},
{
"title": "Recognition / BatchBundle"
},
{
"title": "Recognition / Image Service"
},
{
"title": "Recognition / Super User"
},
{
"title": "Utility / Phingistrano"
},
{
"title": "Internal / GIT Hooks"
},
{
"title": "Internal / Bare Project Template"
}
],
"page": 1,
"size": 11
}`)
} else {
fmt.Fprint(w, `{
"pagelen": 1,
"values": [
{
"title": "Recognition / ClientBundle"
}
],
"page": 2,
"size": 11
}`)
}
})
resp, err := client.PullRequests.GetAll("batman", "batcave")
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
respLen := len(resp)
if respLen != 11 {
t.Errorf("Response length = %v, expected %v", respLen, 11)
}
expected := []*PullRequest{
&PullRequest{Title: "Recognition / GenericBundle"},
&PullRequest{Title: "Recognition / ResellerBundle"},
&PullRequest{Title: "Recognition / CatalogueBundle"},
&PullRequest{Title: "Recognition / Recognition Service"},
&PullRequest{Title: "Recognition / BatchBundle"},
&PullRequest{Title: "Recognition / Image Service"},
&PullRequest{Title: "Recognition / Super User"},
&PullRequest{Title: "Utility / Phingistrano"},
&PullRequest{Title: "Internal / GIT Hooks"},
&PullRequest{Title: "Internal / Bare Project Template"},
&PullRequest{Title: "Recognition / ClientBundle"},
}
if !reflect.DeepEqual(resp, expected) {
t.Errorf("Response body = %v, expected %v", resp, expected)
}
}
func TestPullRequestsService_GetAll_ToLower(t *testing.T) {
setUp()
defer tearDown()
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/", func(w http.ResponseWriter, r *http.Request) {
if m := "GET"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
if r.URL.Query().Get("page") == "1" {
fmt.Fprint(w, `{
"pagelen": 10,
"next": "https://bitbucket.org/api/2.0/repositories/batman/?page=2",
"values": [
{
"title": "Recognition / GenericBundle"
},
{
"title": "Recognition / ResellerBundle"
},
{
"title": "Recognition / CatalogueBundle"
},
{
"title": "Recognition / Recognition Service"
},
{
"title": "Recognition / BatchBundle"
},
{
"title": "Recognition / Image Service"
},
{
"title": "Recognition / Super User"
},
{
"title": "Utility / Phingistrano"
},
{
"title": "Internal / GIT Hooks"
},
{
"title": "Internal / Bare Project Template"
}
],
"page": 1,
"size": 11
}`)
} else {
fmt.Fprint(w, `{
"pagelen": 1,
"values": [
{
"title": "Recognition / ClientBundle"
}
],
"page": 2,
"size": 11
}`)
}
})
resp, err := client.PullRequests.GetAll("Batman", "batcave")
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
respLen := len(resp)
if respLen != 11 {
t.Errorf("Response length = %v, expected %v", respLen, 11)
}
expected := []*PullRequest{
&PullRequest{Title: "Recognition / GenericBundle"},
&PullRequest{Title: "Recognition / ResellerBundle"},
&PullRequest{Title: "Recognition / CatalogueBundle"},
&PullRequest{Title: "Recognition / Recognition Service"},
&PullRequest{Title: "Recognition / BatchBundle"},
&PullRequest{Title: "Recognition / Image Service"},
&PullRequest{Title: "Recognition / Super User"},
&PullRequest{Title: "Utility / Phingistrano"},
&PullRequest{Title: "Internal / GIT Hooks"},
&PullRequest{Title: "Internal / Bare Project Template"},
&PullRequest{Title: "Recognition / ClientBundle"},
}
if !reflect.DeepEqual(resp, expected) {
t.Errorf("Response body = %v, expected %v", resp, expected)
}
}
func TestPullRequestsService_GetBranch(t *testing.T) {
setUp()
defer tearDown()
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/", func(w http.ResponseWriter, r *http.Request) {
if m := "GET"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
if r.URL.Query().Get("page") == "1" {
fmt.Fprint(w, `{
"pagelen": 10,
"next": "https://bitbucket.org/api/2.0/repositories/batman/?page=2",
"values": [
{
"title": "Recognition / GenericBundle",
"branch": {"Name":"test"}
},
{
"title": "Recognition / ResellerBundle",
"branch": {"Name":"test"}
},
{
"title": "Recognition / CatalogueBundle",
"branch": {"Name":"test"}
},
{
"title": "Recognition / Recognition Service",
"branch": {"Name":"test"}
},
{
"title": "Recognition / BatchBundle",
"Source": {"branch": {"name":"thisone"}}
},
{
"title": "Recognition / Image Service",
"branch": {"name":"test"}
},
{
"title": "Recognition / Super User",
"branch": {"name":"test"}
},
{
"title": "Utility / Phingistrano",
"source": {"branch": {"name":"test"}}
},
{
"title": "Internal / GIT Hooks",
"source": {"branch": {"name":"test"}}
},
{
"title": "Internal / Bare Project Template",
"source": {"branch": {"name":"test"}}
}
],
"page": 1,
"size": 11
}`)
} else {
fmt.Fprint(w, `{
"pagelen": 1,
"values": [
{
"title": "Recognition / ClientBundle",
"source": {"branch": {"name":"test"}}
}
],
"page": 2,
"size": 11
}`)
}
})
resp, err := client.PullRequests.GetBranch("batman", "batcave", "thisone")
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
expected := &PullRequest{
Title: "Recognition / BatchBundle",
Source: PlaceInfo{
Branch: BranchName{
Name: "thisone",
},
},
}
if !reflect.DeepEqual(resp, expected) {
t.Errorf("Response body = %v, expected %v", resp, expected)
}
}
func TestPullRequestsService_Approve(t *testing.T) {
setUp()
defer tearDown()
apiHit := false
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/123/approve", func(w http.ResponseWriter, r *http.Request) {
if m := "POST"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
apiHit = true
fmt.Fprint(w, `{"status": "success"}`)
})
err := client.PullRequests.Approve("batman", "batcave", 123)
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
if apiHit != true {
t.Errorf("Expected to hit api but didn't")
}
}
func TestPullRequestsService_Approve_LowerCase(t *testing.T) {
setUp()
defer tearDown()
apiHit := false
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/123/approve", func(w http.ResponseWriter, r *http.Request) {
if m := "POST"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
apiHit = true
fmt.Fprint(w, `{"status": "success"}`)
})
err := client.PullRequests.Approve("Batman", "batCave", 123)
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
if apiHit != true {
t.Errorf("Expected to hit api but didn't")
}
}
func TestPullRequestsService_Merge(t *testing.T) {
setUp()
defer tearDown()
apiHit := false
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/123/merge", func(w http.ResponseWriter, r *http.Request) {
if m := "POST"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
apiHit = true
fmt.Fprint(w, `{"status": "success"}`)
})
err := client.PullRequests.Merge("Batman", "batCave", 123, "Merge baby")
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
if apiHit != true {
t.Errorf("Expected to hit api but didn't")
}
}
func TestPullRequestsService_Unapprove(t *testing.T) {
setUp()
defer tearDown()
apiHit := false
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/123/approve", func(w http.ResponseWriter, r *http.Request) {
if m := "DELETE"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
apiHit = true
fmt.Fprint(w, `{"status": "success"}`)
})
err := client.PullRequests.Unapprove("batman", "batcave", 123)
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
if apiHit != true {
t.Errorf("Expected to hit api but didn't")
}
}
func TestPullRequest_GetById(t *testing.T) {
setUp()
defer tearDown()
apiHit := false
mux.HandleFunc("/2.0/repositories/batman/batcave/pullrequests/123", func(w http.ResponseWriter, r *http.Request) {
if m := "GET"; m != r.Method {
t.Errorf("Request method = %v, expected %v", r.Method, m)
}
apiHit = true
fmt.Fprint(w, `{"title": "Recognition / GenericBundle"}`)
})
resp, err := client.PullRequests.GetById("batman", "batcave", 123)
if err != nil {
t.Errorf("Expected no err, got %v", err)
}
expected := &PullRequest{
Title: "Recognition / GenericBundle",
}
if !reflect.DeepEqual(resp, expected) {
t.Errorf("Response body = %v, expected %v", resp, expected)
}
}
func TestPullRequest_GetApprovals(t *testing.T) {
setUp()
defer tearDown()
expectedUser := User{DisplayName: "Iain Cambridge"}
pr := PullRequest{}
pr.Participants = []Participant{
Participant{Role: "REVIEWER", User: expectedUser, Approved: true},
Participant{Role: "REVIEWER", User: User{DisplayName: "Johnny"}, Approved: false},
}
found := pr.GetApprovals()
expected := []User{expectedUser}
if !reflect.DeepEqual(found, expected) {
t.Errorf("Approvals = %v, expected %v", found, expected)
}
}
func TestPullRequest_GetOwner(t *testing.T) {
setUp()
defer tearDown()
pr := PullRequest{}
pr.Destination.Repository.FullName = "batman/cave-system"
owner := pr.GetOwner()
expected := "batman"
if owner != expected {
t.Errorf("Expected %v, got %v", expected, owner)
}
}
func TestPullRequest_GetOwner_Unknown(t *testing.T) {
setUp()
defer tearDown()
pr := PullRequest{}
owner := pr.GetOwner()
expected := "unknown owner"
if owner != expected {
t.Errorf("Expected %v, got %v", expected, owner)
}
}
func TestPullRequest_GetRepo(t *testing.T) {
setUp()
defer tearDown()
pr := PullRequest{}
pr.Destination.Repository.FullName = "batman/cave-system"
repo := pr.GetRepoName()
expected := "cave-system"
if repo != expected {
t.Errorf("Expected %v, got %v", expected, repo)
}
}
func TestPullRequest_GetRepo_Unknown(t *testing.T) {
setUp()
defer tearDown()
pr := PullRequest{}
repo := pr.GetRepoName()
expected := "unknown repo"
if repo != expected {
t.Errorf("Expected %v, got %v", expected, repo)
}
}
|
package system
import "github.com/fanda-org/postmasters/database/models"
// Contact model
type Contact struct {
models.Base
Salutation *string `gorm:"size:5"`
FirstName *string `gorm:"size:50;index:ix_contact_firstname"`
LastName *string `gorm:"size:50"`
Email *string `gorm:"size:100;index:ix_contact_email"`
WorkPhone *string `gorm:"size:25"`
Mobile *string `gorm:"size:25;index:ix_contact_mobile"`
Designation *string `gorm:"size:25"`
Department *string `gorm:"size:25"`
IsPrimary bool `gorm:"not null"`
}
|
package main
import (
"io"
"mime"
"net/http"
"path/filepath"
)
func main() {
http.HandleFunc(`/`, serveFile)
if err := http.ListenAndServe(`:80`, nil); err != nil {
panic(err)
}
}
func serveFile(w http.ResponseWriter, r *http.Request) {
fileMime := mime.TypeByExtension(filepath.Ext(r.URL.Path))
w.Header().Set(`Content-Type`, fileMime)
fileData, err := http.Get(`https://raw.githubusercontent.com` + r.URL.Path)
if err != nil {
w.WriteHeader(404)
return
}
defer fileData.Body.Close()
_, err = io.Copy(w, fileData.Body)
if err != nil {
w.WriteHeader(500)
return
}
}
|
/*
Package file implements an output module for logging to a file using rlog.
*/
package file
import (
"fmt"
"github.com/rightscale/rlog/common"
"os"
"path/filepath"
)
//Configuration of file logging module
type fileLogger struct {
removeNewlines bool
fileHandle *os.File
loggedError bool
}
//NewFileLogger enables logging to a file. The path (path/filename) can be specified either relative
//to the application directory or as full path (example: "myLog.txt"). When removeNewlines is set,
//newlines and tabs are replaced with ASCII characters as in syslog. If overwrite is set, the log
//file is overwritten each time the application is restarted. If disabled, logs are appended.
func NewFileLogger(path string, removeNewlines bool, overwrite bool) (*fileLogger, error) {
f := new(fileLogger)
f.removeNewlines = removeNewlines
err := f.openFile(path, overwrite)
if err != nil {
return nil, err
}
return f, nil
}
// opens the log file using the given criteria.
func (conf *fileLogger) openFile(path string, overwrite bool) error {
var err error
parentDir, _ := filepath.Split(path)
if parentDir != "" {
var dirMode os.FileMode = 0775 // user/group-only read/write/traverse, world read/traverse
err = os.MkdirAll(parentDir, dirMode)
if err != nil {
return err
}
}
// open write-only (will never read back from log file).
var fh *os.File
var fileMode os.FileMode = 0664 // user/group-only read/write, world read
if overwrite {
// create or truncate
// note that os.Create() is too permissive (i.e. grants world read/write).
fh, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fileMode)
if err != nil {
return err
}
} else {
_, err = os.Stat(path)
if os.IsNotExist(err) {
// not present, create it
fh, err = os.OpenFile(path, os.O_WRONLY|os.O_CREATE, fileMode)
if err != nil {
return err
}
} else {
// append to existing
fh, err = os.OpenFile(path, os.O_APPEND|os.O_WRONLY, fileMode)
if err != nil {
return err
}
}
}
conf.fileHandle = fh
return nil
}
//LaunchModule is intended to run in a separate goroutine and used by rlog internally. It writes log
//messages to file Arguments: [dataChan] Channel to receive log messages. [flushChan] Channel to
//receive flush command
func (conf *fileLogger) LaunchModule(dataChan <-chan (*common.RlogMsg), flushChan chan (chan (bool))) {
prefix := common.SyslogHeader()
//Wait forever on data and flush channel
for {
select {
case logMsg := <-dataChan:
//Received log message, print it
err := conf.writeMsg(logMsg, prefix)
if err != nil {
// we may be able to work around intermittent failures by reopening.
if conf.reopenFile() != nil {
err = conf.writeMsg(logMsg, prefix)
}
}
if err != nil {
// panic if reopening did not resolve the issue.
panic(err)
}
case ret := <-flushChan:
//Flush and return success
conf.flush(dataChan, prefix)
ret <- true
}
}
}
//writeMsg writes message to file
func (conf *fileLogger) writeMsg(rawRlogMsg *common.RlogMsg, prefix string) error {
_, err := fmt.Fprintln(conf.fileHandle, common.FormatMessage(rawRlogMsg, prefix, conf.removeNewlines))
return err
}
//flush writes all pending log messages to file
//Arguments:[dataChan] data channel to access all pending messages, [prefix] log prefix
func (conf *fileLogger) flush(dataChan <-chan (*common.RlogMsg), prefix string) {
// we may already be panicking due to losing file handle.
if conf.fileHandle == nil {
return
}
// reopen file before flushing any messages to support rotation of file logs
// in response to SIGHUP, etc.
err := conf.reopenFile()
if err != nil {
// panic if unable to reopen log file so that service can be restarted by
// outer harness with alerts, etc.
panic(err)
}
for {
//Perform non blocking read until the channel is empty
select {
case logMsg := <-dataChan:
err = conf.writeMsg(logMsg, prefix)
if err != nil {
// we reopened before we began flushing so any failure during flush
// cannot logically be resolved by reopening again here.
panic(err)
}
default:
return
}
}
//Do not handle error, as there is nothing we can do about it
conf.fileHandle.Sync()
}
// reopen existing log file and/or create new file if log rotation renamed
// existing file.
func (conf *fileLogger) reopenFile() error {
// note that the trick here is that the file struct remembers the original
// file name before it was renamed by rotation, if ever.
oldFileHandle := conf.fileHandle
conf.fileHandle = nil
path := oldFileHandle.Name()
err := oldFileHandle.Close()
if err == nil {
err = conf.openFile(path, false)
}
return err
}
|
package filedb
import (
"bufio"
"bytes"
"encoding/json"
"log"
"os"
"github.com/josetom/go-chain/db/types"
)
type FileDbIterator struct {
db *os.File
start []byte
limit []byte
scanner *bufio.Scanner
hasSeeked bool
key []byte
value []byte
}
func newFileDbIterator(dbPath string, start []byte, limit []byte) types.Iterator {
d, err := os.OpenFile(dbPath, os.O_RDONLY, 0600)
scanner := bufio.NewScanner(d)
if err != nil {
log.Fatalln("DB File cannot be opened !") // This should not happen since FileDb should not have gotten created in the first place
}
return &FileDbIterator{
db: d,
scanner: scanner,
start: start,
limit: limit,
}
}
func (f *FileDbIterator) Key() []byte {
return f.key
}
func (f *FileDbIterator) Value() []byte {
return f.value
}
func (f *FileDbIterator) Next() bool {
if !f.hasSeeked {
return f.Seek(f.start)
} else {
// move pointer
isScanSuccess, record := f.scan()
if !isScanSuccess {
return false
}
f.key = record.Key
f.value = record.Value
return true
}
}
func (f *FileDbIterator) Seek(start []byte) bool {
for isScanSuccess, record := f.scan(); isScanSuccess; {
if start == nil || bytes.Equal(record.Key, start) {
f.key = record.Key
f.value = record.Value
f.hasSeeked = true
return true
}
}
return false
}
func (f *FileDbIterator) scan() (bool, types.Record) {
if !f.scanner.Scan() {
return false, types.Record{}
}
if err := f.scanner.Err(); err != nil {
return false, types.Record{}
}
var record types.Record
log.Println(string(f.scanner.Bytes()))
err := json.Unmarshal(f.scanner.Bytes(), &record)
log.Println(record)
if err != nil {
return false, types.Record{}
}
return true, record
}
|
package main
import (
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"log"
)
// 实现了base64编码
func main() {
// 声明内容
var origin = []byte("Hello World!")
// 声明buffer
var buf bytes.Buffer
// 自定一个64字节的字符串
var customEncode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
// 使用给出的字符集生成一个*base64.Encoding,字符集必须是32字节的字符串
e := base64.NewEncoding(customEncode)
// 创建一个新的base64流编码器
w := base64.NewEncoder(e, &buf)
// 写入
if _, err := w.Write(origin); err != nil {
log.Fatal(err)
}
// 关闭
if err := w.Close(); err != nil {
log.Fatal(err)
}
fmt.Println("base64编码内容: ", string(buf.Bytes()))
// 创建一个新的base64流解码器
r := base64.NewDecoder(base64.StdEncoding, &buf)
// 读取内容
b, err := ioutil.ReadAll(r)
if err != nil {
log.Fatal(err)
}
fmt.Println("base64解码内容: ", string(b))
// 使用标准的base64编码字符集编码
originEncode := base64.StdEncoding.EncodeToString(origin)
fmt.Println("base64编码内容: ", originEncode)
// 使用标准的base64编码字符集解码
originBytes, err := base64.StdEncoding.DecodeString(originEncode)
if err != nil {
log.Fatal(err)
}
fmt.Println("base64解码内容: ", string(originBytes))
// 获取数据进行base64编码后的最大长度
var ne = base64.StdEncoding.EncodedLen(len(origin))
// 声明[]byte
var dst = make([]byte, ne)
// 将src的数据编码后存入dst,最多写EncodedLen(len(src))字节数据到dst,并返回写入的字节数
base64.StdEncoding.Encode(dst, origin)
fmt.Println("base64编码内容: ", string(dst))
// 获取base64编码的数据解码后的最大长度
var nd = base64.StdEncoding.DecodedLen(len(dst))
// 声明[]byte
var originText = make([]byte, nd)
if _, err := base64.StdEncoding.Decode(originText, dst); err != nil {
log.Fatal(err)
}
fmt.Println("base64解码内容: ", string(originText))
// 创建与enc相同的新编码,指定的填充字符除外,或者nopadding禁用填充。填充字符不能是'\r'或'\n',不能包含在编码的字母表中,并且必须是等于或小于'\xff'的rune
base64.StdEncoding.WithPadding(base64.StdPadding)
// base64.StdEncoding 定义标准base64编码字符集
// base64.URLEncoding 定义用于URL和文件名的,base64编码字符集
// base64.RawStdEncoding 定义标准无填充字符的base64编码字符集
// base64.RawURLEncoding 定义用于URL和文件名的,无填充字符的base64编码字符集
} |
package main
import (
"fmt"
"os"
"github.com/lorenzoranucci/bookmark-search-backend/internal/pkg/infrastructure/cli"
)
var version = "dev"
var app = cli.GetApp(version)
func main() {
err := app.Run(os.Args)
if err != nil {
fmt.Println(err.Error())
}
}
|
package TestPerformance
import (
"fmt"
"reflect"
"testing"
"unsafe"
)
func Test_append(t *testing.T) {
arr1 := [5]int{1, 2, 3, 4, 5}
fmt.Printf("2th of arr1 addr: %x\n", unsafe.Pointer(&arr1[1]))
slice1 := arr1[1:2]
fmt.Printf("slice1: len is %d, cap is %d\n", len(slice1), cap(slice1))
fmt.Printf("slice1 data addr before append: %x\n", ((*reflect.SliceHeader)(unsafe.Pointer(&slice1))).Data)
slice1 = append(slice1, 6, 7, 8)
fmt.Printf("slice1 data addr after append: %x\n", ((*reflect.SliceHeader)(unsafe.Pointer(&slice1))).Data)
fmt.Println("slice1:", slice1)
fmt.Println("arr1:", arr1)
println("----------------------------------------")
arr2 := [5]int{1, 2, 3, 4, 5}
fmt.Printf("2th of arr2 addr: %x\n", unsafe.Pointer(&arr2[1]))
slice2 := arr2[1:3]
fmt.Printf("slice2 data addr before append: %x\n", ((*reflect.SliceHeader)(unsafe.Pointer(&slice2))).Data)
slice2 = append(slice2, 6, 7, 8)
fmt.Printf("slice2 data addr after append: %x\n", ((*reflect.SliceHeader)(unsafe.Pointer(&slice2))).Data)
fmt.Println("slice2:", slice2)
fmt.Println("arr2:", arr2)
}
|
package public
import (
"github.com/go-on/app"
"net/http"
)
type Public struct {
*app.Dispatcher
}
func (p *Public) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
p.Dispatcher.ServeHTTP(rw, r)
}
// the empty mountpath is the fallback for every path
func (p Public) MountPath() app.MountPath {
return app.MountPath("")
}
var _ app.App = &Public{}
func New() *Public {
return &Public{app.NewDispatcher()}
}
|
package tracks
import (
"errors"
"fmt"
"math/rand"
"time"
)
var (
ErrTrackNotExists = errors.New("Track not exists")
ErrGenerateFailed = errors.New("Fail to generate UUID")
)
type Tracks map[string]*Track
// track结构中现在只有一个path, 之后还会添加信息
type Track struct {
path string
playlist string
}
func (tk Track) Playlist() string {
return tk.playlist
}
func New() Tracks {
tks := make(map[string]*Track)
return tks
}
func (tks Tracks) Track(uuid string) (*Track, bool) {
tk, exists := tks[uuid]
return tk, exists
}
func Uuidgen(r *rand.Rand) string { //基于随机数的UUID生成器,Linux默认的
return fmt.Sprintf("%x%x-%x-%x-%x-%x%x%x",
r.Int31(), r.Int31(),
r.Int31(),
(r.Int31()&0x0fff)|0x4000, //Generates a 32-bit Hex number of the form 4xxx (4 indicates the UUID version)
r.Int31()%0x3fff+0x8000, //range [0x8000, 0xbfff]
r.Int31(), r.Int31(), r.Int31())
}
func (tks Tracks) AddTrack(path, playlist string) (string, error) {
var uuid string
r := rand.New(rand.NewSource(time.Now().UnixNano()))
uuid = Uuidgen(r)
if len(uuid) == 0 {
return "", ErrGenerateFailed
}
tk := &Track{path: path, playlist: playlist}
tks[uuid] = tk
return uuid, nil
}
func (tks Tracks) DelTrack(uuid string) error {
if _, exists := tks[uuid]; exists {
delete(tks, uuid)
return nil
}
return ErrTrackNotExists
}
|
package ch2
//调用独立的c代码,这种情况适用于调用复杂的C代码
//下面的代码前两行告诉了go程序在哪里找callC.h和callC.a
//#cgo CFLAGS: -I${SRCDIR}/clib
//#cgo LDFLAGS: ${SRCDIR}/clib/callC.a
//#include <stdlib.h>
//#include <callC.h>
import "C"
import (
"fmt"
"unsafe"
)
func callSeparateCCode() {
fmt.Println("Going to call a C function!")
C.cHello()
fmt.Println("Going to call another C function!")
//获取一个C的string对象
myMessage := C.CString("This is Mihalis!")
//是否这个字符串所占用的内存
defer C.free(unsafe.Pointer(myMessage))
C.printMessage(myMessage)
fmt.Println("All perfectly done!")
}
|
package typecheck
import (
"fmt"
"github.com/stephens2424/php/ast"
)
// Walker is a walker
type Walker struct {
ast.DefaultWalker
}
func (w *Walker) Walk(node ast.Node) {
switch n := node.(type) {
case ast.Block:
for _, stmt := range n.Statements {
w.Walk(stmt)
}
case *ast.IfStmt:
for _, branch := range n.Branches {
fmt.Println("parsed condition:", branch.Condition)
if !branch.Condition.EvaluatesTo().Contains(ast.Boolean) {
w.Errorf("If condition does not evaluate to boolean")
}
}
}
}
|
package mongo
import (
"2C_vehicle_ms/pkg"
"log"
"gopkg.in/mgo.v2"
"gopkg.in/mgo.v2/bson"
"github.com/night-codes/mgo-ai"
//"reflect"
)
type VehicleService struct {
collection *mgo.Collection
//hash root.Hash
}
func NewVehicleService(session *Session, dbName string, collectionName string) *VehicleService {
collection := session.GetCollection(dbName, collectionName)
collection.EnsureIndex(vehicleModelIndex())
return &VehicleService{collection}
}
func (p *VehicleService) CreateVehicle(v *root.Vehicle) (*root.Vehicle, error) {
vehicle := newVehicleModel(v)
// connect AutoIncrement to collection "vehicles"
session, err := mgo.Dial("vehicles-db:7005")
if err != nil {
log.Fatal(err)
}
ai.Connect(session.DB("2C_vehicle_db").C("vehicle"))
//vehicle.VehicleId = ai.Next("vehicle")
vehicle.ID = ai.Next("vehicle")
vehicle.VehicleId = vehicle.ID
err = p.collection.Insert(&vehicle)
return vehicle.toRootVehicle(), err
}
func (p *VehicleService) DeleteById(id int) error{
err := p.collection.Remove(bson.M{"_id": id})
return err
}
func (p *VehicleService) GetById(id int) (*root.Vehicle, error) {
model := vehicleModel{}
err := p.collection.Find(bson.M{"_id": id}).One(&model)
return model.toRootVehicle(), err
}
func (p *VehicleService) GetAll() ([]*root.Vehicle, error){
model := []vehicleModel{}
var salida []*root.Vehicle
err := p.collection.Find(nil).All(&model)
aux := vehicleModel{}
if len(model) > 0{
for i := 1; i < len(model); i++{
aux = model[i]
salida = append(salida, aux.toRootVehicle())
}
}
return salida, err
}
func (p *VehicleService) GetByPlate(plate string) ([]*root.Vehicle, error) {
model := []vehicleModel{}
var salida []*root.Vehicle
err := p.collection.Find(bson.M{"plate": plate}).All(&model)
aux := vehicleModel{}
log.Println("------------->")
log.Println(plate, model)
if len(model) > 0{
for i := 0; i < len(model); i++{
aux = model[i]
salida = append(salida, aux.toRootVehicle())
}
}
return salida, err
}
func (p *VehicleService) GetByUserId(userid int) ([]*root.Vehicle, error){
model := []vehicleModel{}
var salida []*root.Vehicle
err := p.collection.Find(bson.M{"user_id": userid}).All(&model)
aux := vehicleModel{}
log.Println("------------->")
log.Println(userid, model)
if len(model) > 0{
for i := 0; i < len(model); i++{
aux = model[i]
salida = append(salida, aux.toRootVehicle())
}
}
return salida, err
}
func (p *VehicleService) UpdateById(id int, v *root.Vehicle) (*root.Vehicle, error){
vehicle := updateVehicleModel(v)
vehicle.ID = uint64(id)
vehicle.VehicleId = uint64(id)
err := p.collection.UpdateId(id, &vehicle)
return vehicle.toRootVehicle(), err
}
|
package main
import (
"os"
"strconv"
"strings"
"time"
logging "github.com/op/go-logging"
)
// Logger
var log = logging.MustGetLogger("libivrt-app")
var app appState
// Main function
func main() {
app.started = time.Now()
format := logging.MustStringFormatter(
"%{color}%{time:15:04:05.000} %{shortfunc} ▶ \t%{level:.4s} %{id:03x}%{color:reset} %{message}",
)
backend := logging.NewLogBackend(os.Stderr, "", 0)
backendFormatter := logging.NewBackendFormatter(backend, format)
logging.SetBackend(backend, backendFormatter)
log.Info("libvirt API - James Farrugia 2018")
conf := doInit(os.Args[1:])
app.config = &conf
lvInit()
err := doStartAPI(conf.host, conf.port)
if err != nil {
log.Error(err.Error())
}
}
// Goes through cmd args and sets up the host, port and service type
func doInit(args []string) config {
conf := config{host: "127.0.0.1", port: 8080}
for _, arg := range args {
if strings.HasPrefix(arg, "-host=") {
conf.host = arg[6:]
}
if strings.HasPrefix(arg, "-port=") {
portStr := arg[6:]
var err error
conf.port, err = strconv.Atoi(portStr)
if err != nil {
panic("Port must be a number")
}
}
}
return conf
}
|
package main
type specialForm func(args []interface{}, env map[string]interface{}) (interface{}, error)
type variadicProc struct {
param string
body func(env map[string]interface{}) (interface{}, error)
}
type proc struct {
params []string
body func(env map[string]interface{}) (interface{}, error)
}
|
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package encryption
import (
"bytes"
"io"
"testing"
"storj.io/common/testrand"
)
func TestAesGcm(t *testing.T) {
key := testrand.Key()
var firstNonce AESGCMNonce
testrand.Read(firstNonce[:])
encrypter, err := NewAESGCMEncrypter(&key, &firstNonce, 4*1024)
if err != nil {
t.Fatal(err)
}
data := testrand.BytesInt(encrypter.InBlockSize() * 10)
encrypted := TransformReader(io.NopCloser(bytes.NewReader(data)), encrypter, 0)
decrypter, err := NewAESGCMDecrypter(&key, &firstNonce, 4*1024)
if err != nil {
t.Fatal(err)
}
decrypted := TransformReader(encrypted, decrypter, 0)
data2, err := io.ReadAll(decrypted)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, data2) {
t.Fatalf("encryption/decryption failed")
}
}
|
//nolint:unparam // we don't care about these linters in test cases
package backoff
import (
"context"
"errors"
"math"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var errTest = errors.New("test")
const (
intervalDelta = 100 * time.Millisecond // allowed deviation to pass the test
retryCount = 5
)
func assertInterval(t assert.TestingT, expected time.Duration, actual time.Duration) bool {
return assert.GreaterOrEqual(t, actual.Microseconds(), expected.Microseconds()) &&
assert.LessOrEqual(t, actual.Microseconds(), (expected+intervalDelta).Microseconds())
}
func TestNoError(t *testing.T) {
err := Retry(ZeroBackOff(), func() error {
return nil
})
assert.NoError(t, err)
}
func TestPermanentError(t *testing.T) {
err := Retry(ZeroBackOff(), func() error {
return Permanent(errTest)
})
assert.EqualError(t, err, errTest.Error())
}
func TestZeroBackOff(t *testing.T) {
var count uint
last := time.Now()
err := Retry(ZeroBackOff(), func() error {
now := time.Now()
assertInterval(t, 0, now.Sub(last))
last = now
if count >= retryCount {
return nil
}
count++
return errTest
})
assert.NoError(t, err)
assert.EqualValues(t, retryCount, count)
}
func TestConstantBackOff(t *testing.T) {
const interval = 2 * intervalDelta
p := ConstantBackOff(interval)
var (
count uint
last time.Time
)
err := Retry(p, func() error {
now := time.Now()
if count > 0 {
assertInterval(t, interval, now.Sub(last))
}
last = now
if count >= retryCount {
return nil
}
count++
return errTest
})
assert.NoError(t, err)
assert.EqualValues(t, retryCount, count)
}
func TestExponentialBackOff(t *testing.T) {
const (
interval = 2 * intervalDelta
factor = 1.5
)
p := ExponentialBackOff(interval, factor)
var (
count uint
last time.Time
)
err := Retry(p, func() error {
now := time.Now()
if count > 0 {
expected := time.Duration(float64(interval) * math.Pow(factor, float64(count-1)))
assertInterval(t, expected, now.Sub(last))
}
last = now
if count >= retryCount {
return nil
}
count++
return errTest
})
assert.NoError(t, err)
assert.EqualValues(t, retryCount, count)
}
func TestExponentialBackOffParallel(t *testing.T) {
const (
interval = 20 * time.Millisecond
factor = 1.5
parallelism = 3
)
var wg sync.WaitGroup
p := ExponentialBackOff(interval, factor)
test := func() {
defer wg.Done()
var (
count uint
last time.Time
)
err := Retry(p, func() error {
now := time.Now()
if count > 0 {
expected := time.Duration(float64(interval) * math.Pow(factor, float64(count-1)))
assertInterval(t, expected, now.Sub(last))
}
last = now
if count >= retryCount {
return nil
}
count++
return errTest
})
assert.NoError(t, err)
assert.EqualValues(t, retryCount, count)
}
wg.Add(parallelism)
for i := 0; i < parallelism; i++ {
go test()
}
wg.Wait()
}
func BenchmarkBackOff(b *testing.B) {
p := ZeroBackOff().With(
MaxRetries(b.N),
MaxInterval(time.Microsecond),
Timeout(time.Now().Add(time.Minute)),
Cancel(context.Background()),
Jitter(0.5),
)
b.ResetTimer()
_ = Retry(p, func() error {
return errTest
})
}
|
package main
import (
"bufio"
"fmt"
"os"
"sort"
)
var writer = bufio.NewWriter(os.Stdout)
var reader = bufio.NewReader(os.Stdin)
func printf(f string, a ...interface{}) { fmt.Fprintf(writer, f, a...) }
func scanf(f string, a ...interface{}) { fmt.Fscanf(reader, f, a...) }
type Person struct {
age int
order int
name string
}
type SortBy []Person
func (a SortBy) Len() int { return len(a) }
func (a SortBy) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a SortBy) Less(i, j int) bool {
if a[i].age == a[j].age {
return a[i].order < a[j].order
}
return a[i].age < a[j].age
}
func main() {
defer writer.Flush()
var n int
scanf("%d\n", &n)
arr := make(SortBy, n)
for i := 0; i < n; i++ {
scanf("%d %s\n", &arr[i].age, &arr[i].name)
arr[i].order = i
}
sort.Sort(arr)
for _, i := range arr {
printf("%d %s\n", i.age, i.name)
}
}
|
// date: 2019-03-18
package broker
|
/*
Copyright 2018 The Tilt Dev Authors
Copyright 2023 Docker Compose CLI authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package sync
import (
"archive/tar"
"bytes"
"context"
"fmt"
"io"
"io/fs"
"os"
"path"
"path/filepath"
"strings"
"github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
"github.com/compose-spec/compose-go/types"
moby "github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/archive"
)
type archiveEntry struct {
path string
info os.FileInfo
header *tar.Header
}
type LowLevelClient interface {
ContainersForService(ctx context.Context, projectName string, serviceName string) ([]moby.Container, error)
Exec(ctx context.Context, containerID string, cmd []string, in io.Reader) error
}
type Tar struct {
client LowLevelClient
projectName string
}
var _ Syncer = &Tar{}
func NewTar(projectName string, client LowLevelClient) *Tar {
return &Tar{
projectName: projectName,
client: client,
}
}
func (t *Tar) Sync(ctx context.Context, service types.ServiceConfig, paths []PathMapping) error {
containers, err := t.client.ContainersForService(ctx, t.projectName, service.Name)
if err != nil {
return err
}
var pathsToCopy []PathMapping
var pathsToDelete []string
for _, p := range paths {
if _, err := os.Stat(p.HostPath); err != nil && errors.Is(err, fs.ErrNotExist) {
pathsToDelete = append(pathsToDelete, p.ContainerPath)
} else {
pathsToCopy = append(pathsToCopy, p)
}
}
var deleteCmd []string
if len(pathsToDelete) != 0 {
deleteCmd = append([]string{"rm", "-rf"}, pathsToDelete...)
}
copyCmd := []string{"tar", "-v", "-C", "/", "-x", "-f", "-"}
var eg multierror.Group
writers := make([]*io.PipeWriter, len(containers))
for i := range containers {
containerID := containers[i].ID
r, w := io.Pipe()
writers[i] = w
eg.Go(func() error {
if len(deleteCmd) != 0 {
if err := t.client.Exec(ctx, containerID, deleteCmd, nil); err != nil {
return fmt.Errorf("deleting paths in %s: %w", containerID, err)
}
}
if err := t.client.Exec(ctx, containerID, copyCmd, r); err != nil {
return fmt.Errorf("copying files to %s: %w", containerID, err)
}
return nil
})
}
multiWriter := newLossyMultiWriter(writers...)
tarReader := tarArchive(pathsToCopy)
defer func() {
_ = tarReader.Close()
multiWriter.Close()
}()
_, err = io.Copy(multiWriter, tarReader)
if err != nil {
return err
}
multiWriter.Close()
return eg.Wait().ErrorOrNil()
}
type ArchiveBuilder struct {
tw *tar.Writer
// A shared I/O buffer to help with file copying.
copyBuf *bytes.Buffer
}
func NewArchiveBuilder(writer io.Writer) *ArchiveBuilder {
tw := tar.NewWriter(writer)
return &ArchiveBuilder{
tw: tw,
copyBuf: &bytes.Buffer{},
}
}
func (a *ArchiveBuilder) Close() error {
return a.tw.Close()
}
// ArchivePathsIfExist creates a tar archive of all local files in `paths`. It quietly skips any paths that don't exist.
func (a *ArchiveBuilder) ArchivePathsIfExist(paths []PathMapping) error {
// In order to handle overlapping syncs, we
// 1) collect all the entries,
// 2) de-dupe them, with last-one-wins semantics
// 3) write all the entries
//
// It's not obvious that this is the correct behavior. A better approach
// (that's more in-line with how syncs work) might ignore files in earlier
// path mappings when we know they're going to be "synced" over.
// There's a bunch of subtle product decisions about how overlapping path
// mappings work that we're not sure about.
var entries []archiveEntry
for _, p := range paths {
newEntries, err := a.entriesForPath(p.HostPath, p.ContainerPath)
if err != nil {
return fmt.Errorf("inspecting %q: %w", p.HostPath, err)
}
entries = append(entries, newEntries...)
}
entries = dedupeEntries(entries)
for _, entry := range entries {
err := a.writeEntry(entry)
if err != nil {
return fmt.Errorf("archiving %q: %w", entry.path, err)
}
}
return nil
}
func (a *ArchiveBuilder) writeEntry(entry archiveEntry) error {
pathInTar := entry.path
header := entry.header
if header.Typeflag != tar.TypeReg {
// anything other than a regular file (e.g. dir, symlink) just needs the header
if err := a.tw.WriteHeader(header); err != nil {
return fmt.Errorf("writing %q header: %w", pathInTar, err)
}
return nil
}
file, err := os.Open(pathInTar)
if err != nil {
// In case the file has been deleted since we last looked at it.
if os.IsNotExist(err) {
return nil
}
return err
}
defer func() {
_ = file.Close()
}()
// The size header must match the number of contents bytes.
//
// There is room for a race condition here if something writes to the file
// after we've read the file size.
//
// For small files, we avoid this by first copying the file into a buffer,
// and using the size of the buffer to populate the header.
//
// For larger files, we don't want to copy the whole thing into a buffer,
// because that would blow up heap size. There is some danger that this
// will lead to a spurious error when the tar writer validates the sizes.
// That error will be disruptive but will be handled as best as we
// can downstream.
useBuf := header.Size < 5000000
if useBuf {
a.copyBuf.Reset()
_, err = io.Copy(a.copyBuf, file)
if err != nil && err != io.EOF {
return fmt.Errorf("copying %q: %w", pathInTar, err)
}
header.Size = int64(len(a.copyBuf.Bytes()))
}
// wait to write the header until _after_ the file is successfully opened
// to avoid generating an invalid tar entry that has a header but no contents
// in the case the file has been deleted
err = a.tw.WriteHeader(header)
if err != nil {
return fmt.Errorf("writing %q header: %w", pathInTar, err)
}
if useBuf {
_, err = io.Copy(a.tw, a.copyBuf)
} else {
_, err = io.Copy(a.tw, file)
}
if err != nil && err != io.EOF {
return fmt.Errorf("copying %q: %w", pathInTar, err)
}
// explicitly flush so that if the entry is invalid we will detect it now and
// provide a more meaningful error
if err := a.tw.Flush(); err != nil {
return fmt.Errorf("finalizing %q: %w", pathInTar, err)
}
return nil
}
// tarPath writes the given source path into tarWriter at the given dest (recursively for directories).
// e.g. tarring my_dir --> dest d: d/file_a, d/file_b
// If source path does not exist, quietly skips it and returns no err
func (a *ArchiveBuilder) entriesForPath(localPath, containerPath string) ([]archiveEntry, error) {
localInfo, err := os.Stat(localPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil
}
return nil, err
}
localPathIsDir := localInfo.IsDir()
if localPathIsDir {
// Make sure we can trim this off filenames to get valid relative filepaths
if !strings.HasSuffix(localPath, string(filepath.Separator)) {
localPath += string(filepath.Separator)
}
}
containerPath = strings.TrimPrefix(containerPath, "/")
result := make([]archiveEntry, 0)
err = filepath.Walk(localPath, func(curLocalPath string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("walking %q: %w", curLocalPath, err)
}
linkname := ""
if info.Mode()&os.ModeSymlink != 0 {
var err error
linkname, err = os.Readlink(curLocalPath)
if err != nil {
return err
}
}
var name string
//nolint:gocritic
if localPathIsDir {
// Name of file in tar should be relative to source directory...
tmp, err := filepath.Rel(localPath, curLocalPath)
if err != nil {
return fmt.Errorf("making %q relative to %q: %w", curLocalPath, localPath, err)
}
// ...and live inside `dest`
name = path.Join(containerPath, filepath.ToSlash(tmp))
} else if strings.HasSuffix(containerPath, "/") {
name = containerPath + filepath.Base(curLocalPath)
} else {
name = containerPath
}
header, err := archive.FileInfoHeader(name, info, linkname)
if err != nil {
// Not all types of files are allowed in a tarball. That's OK.
// Mimic the Docker behavior and just skip the file.
return nil
}
result = append(result, archiveEntry{
path: curLocalPath,
info: info,
header: header,
})
return nil
})
if err != nil {
return nil, err
}
return result, nil
}
func tarArchive(ops []PathMapping) io.ReadCloser {
pr, pw := io.Pipe()
go func() {
ab := NewArchiveBuilder(pw)
err := ab.ArchivePathsIfExist(ops)
if err != nil {
_ = pw.CloseWithError(fmt.Errorf("adding files to tar: %w", err))
} else {
// propagate errors from the TarWriter::Close() because it performs a final
// Flush() and any errors mean the tar is invalid
if err := ab.Close(); err != nil {
_ = pw.CloseWithError(fmt.Errorf("closing tar: %w", err))
} else {
_ = pw.Close()
}
}
}()
return pr
}
// Dedupe the entries with last-entry-wins semantics.
func dedupeEntries(entries []archiveEntry) []archiveEntry {
seenIndex := make(map[string]int, len(entries))
result := make([]archiveEntry, 0, len(entries))
for i, entry := range entries {
seenIndex[entry.header.Name] = i
}
for i, entry := range entries {
if seenIndex[entry.header.Name] == i {
result = append(result, entry)
}
}
return result
}
|
package ch07
// Stack
func trap(height []int) int {
result := 0
stack := []int{}
for i, h := range height {
for len(stack) > 0 && height[stack[len(stack)-1]] < h {
top := stack[len(stack)-1]
stack = stack[:len(stack)-1]
if len(stack) == 0 {
break
}
leftHeight := height[stack[len(stack)-1]]
depth := h - height[top]
if leftHeight < h {
depth = leftHeight - height[top]
}
distance := i - stack[len(stack)-1] - 1
result += distance * depth
}
stack = append(stack, i)
}
return result
}
// Two Pointer
func trap2(height []int) int {
result := 0
if len(height) <= 2 {
return 0
}
left := 0
right := len(height) - 1
leftMax := height[left]
rightMax := height[right]
for left < right {
if height[left] > leftMax {
leftMax = height[left]
}
if height[right] > rightMax {
rightMax = height[right]
}
if leftMax < rightMax {
result += leftMax - height[left]
left++
} else {
result += rightMax - height[right]
right--
}
}
return result
}
|
package iafon
import (
"net/http"
)
type Context struct {
Rsp http.ResponseWriter
Req *http.Request
Param map[string]string
Udata map[string]interface{}
}
|
// Copyright 2020 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package rangefeed
import (
"context"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/kv/kvclient/kvcoord"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/errors"
)
// dbAdapter is an implementation of the kvDB interface using a real *kv.DB.
type dbAdapter struct {
db *kv.DB
distSender *kvcoord.DistSender
targetScanBytes int64
}
var _ kvDB = (*dbAdapter)(nil)
// TODO(ajwerner): Hook up a memory monitor. Fortunately most users of the
// initial scan are reading scant amounts of data.
// defaultTargetScanBytes was pulled out of thin air. The main reason is that
// this thing is not hooked up to a memory monitor.
const defaultTargetScanBytes = 1 << 19 // 512 KiB
// newDBAdapter construct a kvDB using a *kv.DB.
func newDBAdapter(db *kv.DB) (*dbAdapter, error) {
var distSender *kvcoord.DistSender
{
txnWrapperSender, ok := db.NonTransactionalSender().(*kv.CrossRangeTxnWrapperSender)
if !ok {
return nil, errors.Errorf("failed to extract a %T from %T",
(*kv.CrossRangeTxnWrapperSender)(nil), db.NonTransactionalSender())
}
distSender, ok = txnWrapperSender.Wrapped().(*kvcoord.DistSender)
if !ok {
return nil, errors.Errorf("failed to extract a %T from %T",
(*kvcoord.DistSender)(nil), txnWrapperSender.Wrapped())
}
}
return &dbAdapter{
db: db,
distSender: distSender,
targetScanBytes: defaultTargetScanBytes,
}, nil
}
// RangeFeed is part of the kvDB interface.
func (dbc *dbAdapter) RangeFeed(
ctx context.Context,
span roachpb.Span,
startFrom hlc.Timestamp,
withDiff bool,
eventC chan<- *roachpb.RangeFeedEvent,
) error {
return dbc.distSender.RangeFeed(ctx, span, startFrom, withDiff, eventC)
}
// Scan is part of the kvDB interface.
func (dbc *dbAdapter) Scan(
ctx context.Context, span roachpb.Span, asOf hlc.Timestamp, rowFn func(value roachpb.KeyValue),
) error {
return dbc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error {
txn.SetFixedTimestamp(ctx, asOf)
sp := span
var b kv.Batch
for {
b.Header.TargetBytes = dbc.targetScanBytes
b.Scan(sp.Key, sp.EndKey)
if err := txn.Run(ctx, &b); err != nil {
return err
}
res := b.Results[0]
for _, row := range res.Rows {
rowFn(roachpb.KeyValue{Key: row.Key, Value: *row.Value})
}
if res.ResumeSpan == nil {
return nil
}
sp = res.ResumeSpanAsValue()
b = kv.Batch{}
}
})
}
|
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package driver
import (
"context"
"fmt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"reflect"
cloudMock "sigs.k8s.io/aws-fsx-csi-driver/pkg/cloud/mocks"
"sigs.k8s.io/aws-fsx-csi-driver/pkg/driver/internal"
"testing"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/golang/mock/gomock"
driverMocks "sigs.k8s.io/aws-fsx-csi-driver/pkg/driver/mocks"
)
var (
volumeID = "voltest"
)
func TestNodePublishVolume(t *testing.T) {
var (
dnsname = "fs-0a2d0632b5ff567e9.fsx.us-west-2.amazonaws.com"
mountname = "random"
targetPath = "/target/path"
stdVolCap = &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
}
)
testCases := []struct {
name string
testFunc func(t *testing.T)
}{
{
name: "success: normal",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
source := dnsname + "@tcp:/" + mountname
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil)
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
mockMounter.EXPECT().Mount(gomock.Eq(source), gomock.Eq(targetPath), gomock.Eq("lustre"), gomock.Any()).Return(nil)
_, err := driver.NodePublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "success: missing mountname for static provisioning, default 'fsx' used",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
source := dnsname + "@tcp:/fsx"
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil)
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
mockMounter.EXPECT().Mount(gomock.Eq(source), gomock.Eq(targetPath), gomock.Eq("lustre"), gomock.Any()).Return(nil)
_, err := driver.NodePublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "success: normal with read only mount",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
source := dnsname + "@tcp:/" + mountname
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
Readonly: true,
}
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil)
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
mockMounter.EXPECT().Mount(gomock.Eq(source), gomock.Eq(targetPath), gomock.Eq("lustre"), gomock.Eq([]string{"ro"})).Return(nil)
_, err := driver.NodePublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "success: normal with flock mount options",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
source := dnsname + "@tcp:/" + mountname
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{
MountFlags: []string{"flock"},
},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
},
},
TargetPath: targetPath,
}
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil)
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
mockMounter.EXPECT().Mount(gomock.Eq(source), gomock.Eq(targetPath), gomock.Eq("lustre"), gomock.Eq([]string{"flock"})).Return(nil)
_, err := driver.NodePublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodePublishVolume is failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: missing dns name",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
_, err := driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: missing target path",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
}
_, err := driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: missing volume capability",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
TargetPath: targetPath,
}
_, err := driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: unsupported volume capability",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: &csi.VolumeCapability{
AccessType: &csi.VolumeCapability_Mount{
Mount: &csi.VolumeCapability_MountVolume{},
},
AccessMode: &csi.VolumeCapability_AccessMode{
Mode: csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY,
},
},
TargetPath: targetPath,
}
_, err := driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: mounter failed to MakeDir",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
err := fmt.Errorf("failed to MakeDir")
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(err)
_, err = driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail: mounter failed to Mount",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodePublishVolumeRequest{
VolumeId: "volumeId",
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
source := dnsname + "@tcp:/" + mountname
err := fmt.Errorf("failed to Mount")
mockMounter.EXPECT().MakeDir(gomock.Eq(targetPath)).Return(nil)
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
mockMounter.EXPECT().Mount(gomock.Eq(source), gomock.Eq(targetPath), gomock.Eq("lustre"), gomock.Any()).Return(err)
_, err = driver.NodePublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodePublishVolume is not failed: %v", err)
}
mockCtl.Finish()
},
},
{
name: "fail another operation in-flight on given volumeId",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
awsDriver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
req := &csi.NodePublishVolumeRequest{
VolumeId: volumeID,
VolumeContext: map[string]string{
volumeContextDnsName: dnsname,
volumeContextMountName: mountname,
},
VolumeCapability: stdVolCap,
TargetPath: targetPath,
}
awsDriver.inFlight.Insert(volumeID)
_, err := awsDriver.NodePublishVolume(context.TODO(), req)
expectErr(t, err, codes.Aborted)
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.testFunc)
}
}
func TestNodeUnpublishVolume(t *testing.T) {
var (
targetPath = "/target/path"
)
testCases := []struct {
name string
testFunc func(t *testing.T)
}{
{
name: "success: normal",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodeUnpublishVolumeRequest{
VolumeId: "volumeId",
TargetPath: targetPath,
}
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil)
mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(nil)
_, err := driver.NodeUnpublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodeUnpublishVolume is failed: %v", err)
}
},
},
{
name: "success: target already unmounted",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodeUnpublishVolumeRequest{
VolumeId: "volumeId",
TargetPath: targetPath,
}
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(true, nil)
_, err := driver.NodeUnpublishVolume(ctx, req)
if err != nil {
t.Fatalf("NodeUnpublishVolume is failed: %v", err)
}
},
},
{
name: "fail: targetPath is missing",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodeUnpublishVolumeRequest{
VolumeId: "volumeId",
}
_, err := driver.NodeUnpublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodeUnpublishVolume is not failed: %v", err)
}
},
},
{
name: "fail: mounter failed to umount",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
driver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
ctx := context.Background()
req := &csi.NodeUnpublishVolumeRequest{
VolumeId: "volumeId",
TargetPath: targetPath,
}
mockMounter.EXPECT().IsLikelyNotMountPoint(gomock.Eq(targetPath)).Return(false, nil)
mountErr := fmt.Errorf("Unmount failed")
mockMounter.EXPECT().Unmount(gomock.Eq(targetPath)).Return(mountErr)
_, err := driver.NodeUnpublishVolume(ctx, req)
if err == nil {
t.Fatalf("NodeUnpublishVolume is not failed: %v", err)
}
},
},
{
name: "fail another operation in-flight on given volumeId",
testFunc: func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
mockMetadata := cloudMock.NewMockMetadataService(mockCtl)
mockMounter := driverMocks.NewMockMounter(mockCtl)
awsDriver := &nodeService{
metadata: mockMetadata,
mounter: mockMounter,
inFlight: internal.NewInFlight(),
}
req := &csi.NodeUnpublishVolumeRequest{
VolumeId: volumeID,
TargetPath: targetPath,
}
awsDriver.inFlight.Insert(volumeID)
_, err := awsDriver.NodeUnpublishVolume(context.TODO(), req)
expectErr(t, err, codes.Aborted)
},
},
}
for _, tc := range testCases {
t.Run(tc.name, tc.testFunc)
}
}
func TestRemoveNotReadyTaint(t *testing.T) {
nodeName := "test-node-123"
testCases := []struct {
name string
setup func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error)
expResult error
}{
{
name: "missing CSI_NODE_NAME",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
return func() (kubernetes.Interface, error) {
t.Fatalf("Unexpected call to k8s client getter")
return nil, nil
}
},
expResult: nil,
},
{
name: "failed to setup k8s client",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
t.Setenv("CSI_NODE_NAME", nodeName)
return func() (kubernetes.Interface, error) {
return nil, fmt.Errorf("Failed setup!")
}
},
expResult: nil,
},
{
name: "failed to get node",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
t.Setenv("CSI_NODE_NAME", nodeName)
getNodeMock, _ := getNodeMock(mockCtl, nodeName, nil, fmt.Errorf("Failed to get node!"))
return func() (kubernetes.Interface, error) {
return getNodeMock, nil
}
},
expResult: fmt.Errorf("Failed to get node!"),
},
{
name: "no taints to remove",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
t.Setenv("CSI_NODE_NAME", nodeName)
getNodeMock, _ := getNodeMock(mockCtl, nodeName, &corev1.Node{}, nil)
return func() (kubernetes.Interface, error) {
return getNodeMock, nil
}
},
expResult: nil,
},
{
name: "failed to patch node",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
t.Setenv("CSI_NODE_NAME", nodeName)
getNodeMock, mockNode := getNodeMock(mockCtl, nodeName, &corev1.Node{
Spec: corev1.NodeSpec{
Taints: []corev1.Taint{
{
Key: AgentNotReadyNodeTaintKey,
Effect: "NoExecute",
},
},
},
}, nil)
mockNode.EXPECT().Patch(gomock.Any(), gomock.Eq(nodeName), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, fmt.Errorf("Failed to patch node!"))
return func() (kubernetes.Interface, error) {
return getNodeMock, nil
}
},
expResult: fmt.Errorf("Failed to patch node!"),
},
{
name: "success",
setup: func(t *testing.T, mockCtl *gomock.Controller) func() (kubernetes.Interface, error) {
t.Setenv("CSI_NODE_NAME", nodeName)
getNodeMock, mockNode := getNodeMock(mockCtl, nodeName, &corev1.Node{
Spec: corev1.NodeSpec{
Taints: []corev1.Taint{
{
Key: AgentNotReadyNodeTaintKey,
Effect: "NoSchedule",
},
},
},
}, nil)
mockNode.EXPECT().Patch(gomock.Any(), gomock.Eq(nodeName), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil)
return func() (kubernetes.Interface, error) {
return getNodeMock, nil
}
},
expResult: nil,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
mockCtl := gomock.NewController(t)
defer mockCtl.Finish()
k8sClientGetter := tc.setup(t, mockCtl)
result := removeNotReadyTaint(k8sClientGetter)
if !reflect.DeepEqual(result, tc.expResult) {
t.Fatalf("Expected result `%v`, got result `%v`", tc.expResult, result)
}
})
}
}
func getNodeMock(mockCtl *gomock.Controller, nodeName string, returnNode *corev1.Node, returnError error) (kubernetes.Interface, *driverMocks.MockNodeInterface) {
mockClient := driverMocks.NewMockKubernetesClient(mockCtl)
mockCoreV1 := driverMocks.NewMockCoreV1Interface(mockCtl)
mockNode := driverMocks.NewMockNodeInterface(mockCtl)
mockClient.EXPECT().CoreV1().Return(mockCoreV1).MinTimes(1)
mockCoreV1.EXPECT().Nodes().Return(mockNode).MinTimes(1)
mockNode.EXPECT().Get(gomock.Any(), gomock.Eq(nodeName), gomock.Any()).Return(returnNode, returnError).MinTimes(1)
return mockClient, mockNode
}
func expectErr(t *testing.T, actualErr error, expectedCode codes.Code) {
if actualErr == nil {
t.Fatalf("Expect error but got no error")
}
status, ok := status.FromError(actualErr)
if !ok {
t.Fatalf("Failed to get error status code from error: %v", actualErr)
}
if status.Code() != expectedCode {
t.Fatalf("Expected error code %d, got %d message %s", expectedCode, status.Code(), status.Message())
}
}
|
package etherscan
import (
"github.com/gin-gonic/gin"
"github.com/trustwallet/blockatlas/pkg/logger"
"net/http"
)
func (p *Platform) getBalance(c *gin.Context) {
token := c.Query("token")
address := c.Param("address")
var err error
var balance string
if token != "" {
//srcPage, err = p.client.GetTokenBalance(address, token)
balance, err = p.client.GetBalance(address)
} else {
balance, err = p.client.GetBalance(address)
}
if apiError(c, err) {
return
}
logger.Info("Balance", balance)
c.JSON(http.StatusOK, &balance)
}
|
package utils
import (
"os"
"strings"
jsoniter "github.com/json-iterator/go"
)
// PrintAsJSON prints the provided value as YAML document to the console
func PrintAsJSON(data any) error {
j, err := ConvertToJSON(data)
if err != nil {
return err
}
PrintMessage(j)
return nil
}
// WriteToFileAsJSON converts the provided value to YAML and writes it to the specified file
func WriteToFileAsJSON(filePath string, data any, fileMode os.FileMode) error {
j, err := ConvertToJSON(data)
if err != nil {
return err
}
err = os.WriteFile(filePath, []byte(j), fileMode)
if err != nil {
return err
}
return nil
}
// ConvertToJSON converts the provided value to a JSON-encoded string
func ConvertToJSON(data any) (string, error) {
var json = jsoniter.Config{
EscapeHTML: true,
ObjectFieldMustBeSimpleString: false,
SortMapKeys: true,
ValidateJsonRawMessage: true,
}
j, err := json.Froze().MarshalIndent(data, "", strings.Repeat(" ", 3))
if err != nil {
return "", err
}
return string(j), nil
}
// ConvertToJSONFast converts the provided value to a JSON-encoded string using 'ConfigFastest' config and json.Marshal without indents
func ConvertToJSONFast(data any) (string, error) {
var json = jsoniter.Config{
EscapeHTML: false,
MarshalFloatWith6Digits: true,
ObjectFieldMustBeSimpleString: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
}
j, err := json.Froze().MarshalToString(data)
if err != nil {
return "", err
}
return j, nil
}
// ConvertFromJSON converts the provided JSON-encoded string to Go data types
func ConvertFromJSON(jsonString string) (any, error) {
var json = jsoniter.Config{
EscapeHTML: false,
MarshalFloatWith6Digits: true,
ObjectFieldMustBeSimpleString: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
}
var data any
err := json.Froze().Unmarshal([]byte(jsonString), &data)
if err != nil {
return "", err
}
return data, nil
}
|
package main
import "fmt"
// Action represents a move made by a player over the course of a game.
type Action struct {
mark rune
row, col int
}
func (a Action) String() string {
return fmt.Sprintf("%q in (%d, %d)", a.mark, a.row, a.col)
}
|
package main
import (
"flag"
"log"
"os"
"github.com/gongo/go-airplay"
)
var opts struct {
position float64
showHelpFlag bool
}
func init() {
flag.Float64Var(&opts.position, "p", 0.0, "Number of seconds to move (second)")
flag.BoolVar(&opts.showHelpFlag, "h", false, "Show this message")
flag.Parse()
if opts.showHelpFlag {
flag.Usage()
os.Exit(0)
}
if opts.position < 0 {
log.Fatal("options: position should not negative")
}
}
func main() {
client, _ := airplay.NewClient()
client.Scrub(opts.position)
}
|
package main
import (
"fmt"
// "fmt"
"github.com/gorilla/websocket"
"log"
"math/rand"
"net/http"
"regexp"
"strings"
"time"
)
const (
f_date = "2006-01-02" //长日期格式
f_shortdate = "06-01-02" //短日期格式
f_times = "15:04:05" //长时间格式
f_shorttime = "15:04" //短时间格式
f_datetime = "2006-01-02 15:04:05" //日期时间格式
f_newdatetime = "2006/01/02 15~04~05" //非标准分隔符的日期时间格式
f_newtime = "15~04~05" //非标准分隔符的时间格式
)
const (
//对方写入会话等待时间
// Time allowed to write a message to the peer.
writeWait = 10 * time.Second
//对方读取下次消息等待时间
// Time allowed to read the next pong message from the peer.
pongWait = 60 * time.Second
//对方ping周期
// Send pings to peer with this period. Must be less than pongWait.
pingPeriod = (pongWait * 9) / 10
//对方最大写入字节数
// Maximum message size allowed from peer.
maxMessageSize = 512
//验证字符串
authToken = "123456"
)
//服务器配置信息
var upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
// connection 是websocket的conntion和hub的中间人
// connection is an middleman between the websocket connection and the hub.
type connection struct {
// The websocket connection.
//websocket的连接
ws *websocket.Conn
// Buffered channel of outbound messages.
//出站消息缓存通道
send chan []byte
//验证状态
auth bool
//验证状态
username []byte
createip []byte
}
//读取connection中的数据导入到hub中,实则发广播消息
//服务器读取的所有客户端的发来的消息
// readPump pumps messages from the websocket connection to the hub.
func (c *connection) readPump() {
defer func() {
h.unregister <- c
c.ws.Close()
}()
c.ws.SetReadLimit(maxMessageSize)
c.ws.SetReadDeadline(time.Now().Add(pongWait))
c.ws.SetPongHandler(func(string) error { c.ws.SetReadDeadline(time.Now().Add(pongWait)); return nil })
for {
_, message, err := c.ws.ReadMessage()
if err != nil {
break
}
mtype := 2 //用户消息
text := string(message)
reg := regexp.MustCompile(`=[^&]+`)
s := reg.FindAllString(text, -1)
//默认all
if len(s) == 2 {
fromuser := strings.Replace(s[0], "=", "", 1)
token := strings.Replace(s[1], "=", "", 1)
if token == authToken {
c.username = []byte(fromuser)
c.auth = true
message = []byte(fromuser + " join")
mtype = 1 //系统消息
}
}
if c.username == nil {
remoteIp := strings.Split(c.ws.RemoteAddr().String(), ":")[0]
c.username = c.GetRandomString(5)
c.createip = []byte(remoteIp)
c.auth = true
message = []byte(text)
mtype = 2
}
touser := []byte("all")
reg2 := regexp.MustCompile(`^@.*? `)
s2 := reg2.FindAllString(text, -1)
if len(s2) == 1 {
s2[0] = strings.Replace(s2[0], "@", "", 1)
s2[0] = strings.Replace(s2[0], " ", "", 1)
touser = []byte(s2[0])
fmt.Println("touser=" + string(touser))
}
if c.auth == true {
// t := time.Now().Unix()
t := time.Now().Format(f_times)
h.broadcast <- &tmessage{content: message, fromuser: c.username, touser: touser, mtype: mtype, createtime: t}
}
}
}
//给消息,指定消息类型和荷载
// write writes a message with the given message type and payload.
func (c *connection) write(mt int, payload []byte) error {
c.ws.SetWriteDeadline(time.Now().Add(writeWait))
return c.ws.WriteMessage(mt, payload)
}
//从hub到connection写数据
//服务器端发送消息给客户端
// writePump pumps messages from the hub to the websocket connection.
func (c *connection) writePump() {
//定时执行
ticker := time.NewTicker(pingPeriod)
defer func() {
ticker.Stop()
c.ws.Close()
}()
for {
select {
case message, ok := <-c.send:
if !ok {
c.write(websocket.CloseMessage, []byte{})
return
}
if err := c.write(websocket.TextMessage, message); err != nil {
return
}
case <-ticker.C:
if err := c.write(websocket.PingMessage, []byte{}); err != nil {
return
}
}
}
}
func (c *connection) GetRandomString(num int) []byte {
str := "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
bytes := []byte(str)
result := []byte{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < num; i++ {
result = append(result, bytes[r.Intn(len(bytes))])
}
return result
}
//处理客户端对websocket请求
// serveWs handles websocket requests from the peer.
func serveWs(w http.ResponseWriter, r *http.Request) {
//设定环境变量
ws, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
}
//初始化connection
c := &connection{send: make(chan []byte, 256), ws: ws, auth: false}
//加入注册通道,意思是只要连接的人都加入register通道
h.register <- c
go c.writePump() //服务器端发送消息给客户端
c.readPump() //服务器读取的所有客户端的发来的消息
} |
package main
import (
"fmt"
"math"
)
const min = 2
func main() {
primeNum := []int{}
// * The Channel is how to communicate between main() function and GoRoutine.
// * It's How to Communicate by putting Channel as a Parameter instead of returning Something.
channel := make(chan int)
// * This is the GoRoutine.
// * The GoRoutine makes process asynchronous.
// * but GoRoutine is valid for program is running.
// * And the main() function doesn't wait the GoRoutine.
// * So if main() function has nothing to do except GoRoutine, the program ends.
go generatePrimeNumber(primeNum, min, 10000000, channel)
go generatePrimeNumber(primeNum, min, 10000000, channel)
result := <-channel // * but main() function waits until the Channel sends message.
fmt.Println(result)
fmt.Println(<-channel) // * It can also use it like this.
// time.Sleep(time.Second * 25)
}
func generatePrimeNumber(primeNum []int, start, endPoint int, channel chan int) {
isPrime := true
end := start + endPoint
for i := start; i < end; i++ {
for j := min; j < int(math.Sqrt(float64(end))); j++ {
if i != j && i%j == 0 {
isPrime = false
break
}
}
if isPrime {
primeNum = append(primeNum, i)
}
isPrime = true
// time.Sleep(time.Second)
}
channel <- len(primeNum) //* Usage
// return len(primeNum)
}
|
package main
// 1. 使用 make 创建 map 变量时可以指定第二个参数,不过会被忽略。
// 2. cap() 函数适用于数组、数组指针、slice 和 channel,不适用于 map,可以使用 len() 返回map的元素个数。
func main() {
m := make(map[string]int, 2)
cap(m)
}
|
package main
import (
"fmt"
)
type Point struct {
Lat float64
Long float64
}
type Coordinate struct {
X float64
Y float64
}
func main() {
a := Point{
Lat: 35.677568,
Long: 139.717064,
}
b := Point{
Lat: 35.677542,
Long: 139.716965,
}
fmt.Println(direction(a, b))
fmt.Println(direction(b, a))
fmt.Println(vectorProduct(a, b))
fmt.Println(vectorProduct(b, a))
fmt.Println()
c := Coordinate{
X: 35.677568,
Y: 139.717064,
}
d := Coordinate{
X: 35.677542,
Y: 139.716965,
}
fmt.Println(_direction(c, d))
fmt.Println(_direction(d, c))
fmt.Println(_vectorProduct(c, d))
fmt.Println(_vectorProduct(d, c))
fmt.Println()
}
func direction(before Point, after Point) float64 {
return (after.Long - before.Long) / (after.Lat - before.Lat)
}
func _direction(before Coordinate, after Coordinate) float64 {
return (after.Y - before.Y) / (after.X - before.X)
}
func vectorProduct(start Point, end Point) float64 {
return ((start.Lat * end.Long) - (start.Long * end.Lat))
}
func _vectorProduct(start Coordinate, end Coordinate) float64 {
return ((start.X * end.Y) - (start.Y * end.X))
}
|
package controllers
import (
"html/template"
"net/http"
"appengine"
"appengine/datastore"
"time"
"fmt"
"github.com/gorilla/mux"
"github.com/gorilla/securecookie"
"models"
"tools"
)
var wikiSecret []byte = securecookie.GenerateRandomKey(32)
var wikiUserIdCookie *securecookie.SecureCookie
var currentUser *models.User
type NavItem struct {
URL string
Name string
}
func wikiFrontPage(w http.ResponseWriter, r *http.Request) {
renderWikiFrontPage(w)
}
func wikiSignup(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
form := struct {
Username string
Password string
Verify string
Email string
ErrorUsername string
ErrorPassword string
ErrorVerify string
ErrorEmail string
}{
"", "", "", "", "", "", "", "",
}
writeForm(w, form)
}
if r.Method == "POST" {
errorUsername := ""
errorPassword := ""
errorVerify := ""
errorEmail := ""
// Get form field values
username := r.FormValue("username")
password := r.FormValue("password")
verify := r.FormValue("verify")
email := r.FormValue("email")
// Validate form fields
if ! (validUsername(username) && validPassword(password) && (password == verify) && validEmail(email)) {
if !validUsername(username) {
errorUsername = "That's not a valid username"
}
if !validPassword(password) {
errorPassword = "That's not a valid password"
}
if(password != verify) {
errorVerify = "Your passwords didn't match"
}
if !validEmail(email) {
errorEmail = "That's not a valid email"
}
password = ""
verify = ""
form := struct {
Username string
Password string
Verify string
Email string
ErrorUsername string
ErrorPassword string
ErrorVerify string
ErrorEmail string
}{
username,
password,
verify,
email,
errorUsername,
errorPassword,
errorVerify,
errorEmail,
}
writeForm(w, form)
} else {
user := models.UserByUsername(r, username)
if(len(user.Username) > 0) {
errorUsername = "That user already exists"
form := struct {
Username string
Password string
Verify string
Email string
ErrorUsername string
ErrorPassword string
ErrorVerify string
ErrorEmail string
}{
username,
password,
verify,
email,
errorUsername,
errorPassword,
errorVerify,
errorEmail,
}
writeForm(w, form)
} else {
c := appengine.NewContext(r)
userID, _, _ := datastore.AllocateIDs(c, "User", nil, 1)
key := datastore.NewKey(c, "User", "", userID, nil)
u := models.User{ userID, username, password, verify, email, time.Now() }
_, err := datastore.Put(c, key, &u)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
wikiUserIdCookie = securecookie.New(wikiSecret, nil)
stringID := fmt.Sprintf("%d", u.Id)
tools.StoreCookie(w, r, wikiUserIdCookie, "user_id", stringID)
// set the current user
currentUser = &u
// redirect to the wiki front page
http.Redirect(w, r, "/wiki", http.StatusFound)
return
}
}
}
}
func wikiLogin(w http.ResponseWriter, r *http.Request) {
if r.Method == "GET" {
form := struct {
Username string
Password string
ErrorLogin string
}{
"", "", "",
}
writeLoginForm(w, form)
}
if r.Method == "POST" {
// Get form field values
username := r.FormValue("username")
password := r.FormValue("password")
// Validate form fields
user := models.UserByUsernameAndPassword(r, username, password)
if(len(user.Username) > 0) {
if(username == user.Username && password == user.Password) {
if(wikiUserIdCookie == nil){
wikiUserIdCookie = securecookie.New(wikiSecret, nil)
}
stringID := fmt.Sprintf("%d", user.Id)
tools.StoreCookie(w, r, wikiUserIdCookie, "user_id", stringID)
// set the current user
currentUser = user
// redirect to the wiki front page
http.Redirect(w, r, "/wiki", http.StatusFound)
return
}
}
form := struct {
Username string
Password string
ErrorLogin string
}{
username,
password,
"Invalid Login",
}
writeLoginForm(w, form)
}
}
func wikiLogout(w http.ResponseWriter, r *http.Request) {
tools.ClearCookie(w, "user_id")
// clear the current user
currentUser = nil
// redirect to the wiki front page
http.Redirect(w, r, "/wiki", http.StatusFound)
return
}
func wikiEdit(w http.ResponseWriter, r *http.Request) {
// get the page name in the URL
vars := mux.Vars(r)
pageName := vars["page"]
if r.Method == "GET" {
// fetch the page only if you are already looged in
if(currentUser != nil) {
// if the page does not exist redirect to new page form
if page, err := models.GetPage(r, pageName); err != nil {
// redirect to the wiki page
http.Redirect(w, r, "/wiki/" + pageName, http.StatusFound)
return
} else {
renderNewPageForm(w, page.Content)
}
} else {
// redirect to the login page
http.Redirect(w, r, "/wiki/login", http.StatusFound)
return
}
}
if r.Method == "POST" {
content := r.FormValue("content")
// if the page does not exist redirect to the new page form
if page, err := models.GetPage(r, pageName); err != nil {
renderNewPageForm(w, nil)
} else {
// update page
models.UpdatePage(r, *page, pageName, content)
// redirect to the wiki page
http.Redirect(w, r, "/wiki/" + pageName, http.StatusFound)
return
}
}
}
func wikiPage(w http.ResponseWriter, r *http.Request) {
// get the page name in the URL
vars := mux.Vars(r)
pageName := vars["page"]
if r.Method == "GET" {
// fetch the page
// if the page does not exist redirect to the new page form
if page, err := models.GetPage(r, pageName); err != nil {
renderNewPageForm(w, nil)
} else {
renderPageView(w, *page)
}
}
if r.Method == "POST" {
content := r.FormValue("content")
err := models.AddPage(r, pageName, content)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// redirect to the wiki front page
http.Redirect(w, r, "/wiki", http.StatusFound)
return
}
}
func authenticationItems() []NavItem {
if currentUser != nil {
return []NavItem{ {URL: "/wiki/logout", Name: "logout(" + currentUser.Username + ")" } }
}
return []NavItem{ {URL: "/wiki/signup", Name: "signup" },
{URL: "/wiki/login", Name: "login"} }
}
func navigationItems(pageURL string) []NavItem {
if currentUser != nil {
return []NavItem{ {URL: "/wiki/logout", Name: "logout(" + currentUser.Username + ")" },
{URL: "/wiki/_edit/" + pageURL, Name: "edit"} }
}
return []NavItem{ {URL: "/wiki/login", Name: "login"},
{URL: "/wiki/signup", Name: "signup" } }
}
func renderWikiFrontPage(w http.ResponseWriter) {
t, _ := template.ParseFiles("templates/navigation.html", "templates/wiki.html")
if err := t.ExecuteTemplate(w, "tmpl_wiki", authenticationItems()); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func renderNewPageForm(w http.ResponseWriter, data interface{}) {
t, _ := template.ParseFiles("templates/newpage.html")
if err := t.Execute(w, data); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func renderPageView(w http.ResponseWriter, page models.Page) {
t, _ := template.ParseFiles("templates/navigation.html", "templates/page.html")
article := struct {
Navigation []NavItem
Page models.Page
}{
navigationItems(page.Name),
page,
}
if err := t.ExecuteTemplate(w, "tmpl_page", article); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
} |
package util
import (
"fmt"
"os"
"time"
)
var (
pid = os.Getpid()
)
// NewTraceID 创建追踪ID
func NewTraceID() string {
return fmt.Sprintf("trace-id-%d-%s",
pid,
time.Now().Format("2006.01.02.15.04.05.999999"))
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Binary syscall_test_runner runs the syscall test suites in gVisor
// containers and on the host platform.
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strings"
"syscall"
"testing"
"time"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/syndtr/gocapability/capability"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/seccheck"
"gvisor.dev/gvisor/pkg/test/testutil"
"gvisor.dev/gvisor/runsc/specutils"
"gvisor.dev/gvisor/test/runner/gtest"
"gvisor.dev/gvisor/test/trace/config"
"gvisor.dev/gvisor/test/uds"
)
var (
debug = flag.Bool("debug", false, "enable debug logs")
oneSandbox = flag.Bool("one-sandbox", false, "run all test cases in one sandbox")
strace = flag.Bool("strace", false, "enable strace logs")
platform = flag.String("platform", "ptrace", "platform to run on")
platformSupport = flag.String("platform-support", "", "String passed to the test as GVISOR_PLATFORM_SUPPORT environment variable. Used to determine which syscall tests are expected to work with the current platform.")
network = flag.String("network", "none", "network stack to run on (sandbox, host, none)")
useTmpfs = flag.Bool("use-tmpfs", false, "mounts tmpfs for /tmp")
fusefs = flag.Bool("fusefs", false, "mounts a fusefs for /tmp")
fileAccess = flag.String("file-access", "exclusive", "mounts root in exclusive or shared mode")
overlay = flag.Bool("overlay", false, "wrap filesystem mounts with writable tmpfs overlay")
container = flag.Bool("container", false, "run tests in their own namespaces (user ns, network ns, etc), pretending to be root. Implicitly enabled if network=host, or if using network namespaces")
setupContainerPath = flag.String("setup-container", "", "path to setup_container binary (for use with --container)")
trace = flag.Bool("trace", false, "enables all trace points")
directfs = flag.Bool("directfs", false, "enables directfs (for all gofer mounts)")
addHostUDS = flag.Bool("add-host-uds", false, "expose a tree of UDS to test communication with the host")
addHostConnector = flag.Bool("add-host-connector", false, "create goroutines that connect to bound UDS that will be created by sandbox")
addHostFIFO = flag.Bool("add-host-fifo", false, "expose a tree of FIFO to test communication with the host")
ioUring = flag.Bool("iouring", false, "Enables IO_URING API for asynchronous I/O")
// TODO(gvisor.dev/issue/4572): properly support leak checking for runsc, and
// set to true as the default for the test runner.
leakCheck = flag.Bool("leak-check", false, "check for reference leaks")
waitForPid = flag.Duration("delay-for-debugger", 0, "Print out the sandbox PID and wait for the specified duration to start the test. This is useful for attaching a debugger to the runsc-sandbox process.")
)
const (
// Environment variable used by platform_util.cc to determine platform capabilities.
platformSupportEnvVar = "GVISOR_PLATFORM_SUPPORT"
)
// getSetupContainerPath returns the path to the setup_container binary.
func getSetupContainerPath() string {
if *setupContainerPath != "" {
return *setupContainerPath
}
setupContainer, err := testutil.FindFile("test/runner/setup_container/setup_container")
if err != nil {
fatalf("cannot find setup_container: %v", err)
}
return setupContainer
}
// runTestCaseNative runs the test case directly on the host machine.
func runTestCaseNative(testBin string, tc *gtest.TestCase, args []string, t *testing.T) {
// These tests might be running in parallel, so make sure they have a
// unique test temp dir.
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
if err != nil {
t.Fatalf("could not create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
// Replace TEST_TMPDIR in the current environment with something
// unique.
env := os.Environ()
newEnvVar := "TEST_TMPDIR=" + tmpDir
var found bool
for i, kv := range env {
if strings.HasPrefix(kv, "TEST_TMPDIR=") {
env[i] = newEnvVar
found = true
break
}
}
if !found {
env = append(env, newEnvVar)
}
// Remove shard env variables so that the gunit binary does not try to
// interpret them.
env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
if *addHostUDS {
socketDir, cleanup, err := uds.CreateBoundUDSTree("/tmp")
if err != nil {
t.Fatalf("failed to create socket tree: %v", err)
}
defer cleanup()
env = append(env, "TEST_UDS_TREE="+socketDir)
// On Linux, the concept of "attach" location doesn't exist.
// Just pass the same path to make these tests identical.
env = append(env, "TEST_UDS_ATTACH_TREE="+socketDir)
}
if *addHostConnector {
connectorDir, cleanup, err := uds.CreateSocketConnectors("/tmp")
if err != nil {
t.Fatalf("failed to create socket connectors: %v", err)
}
defer cleanup()
env = append(env, "TEST_CONNECTOR_TREE="+connectorDir)
}
if *addHostFIFO {
pipeDir, cleanup, err := uds.CreateFifoTree("/tmp")
if err != nil {
t.Fatalf("failed to create pipe tree: %v", err)
}
defer cleanup()
env = append(env, "TEST_FIFO_TREE="+pipeDir)
// On Linux, the concept of "attach" location doesn't exist.
// Just pass the same path to make these tests identical.
env = append(env, "TEST_FIFO_ATTACH_TREE="+pipeDir)
}
if *platformSupport != "" {
env = append(env, fmt.Sprintf("%s=%s", platformSupportEnvVar, *platformSupport))
}
if args == nil {
args = tc.Args()
}
cmd := exec.Command(testBin, args...)
cmd.Env = env
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &unix.SysProcAttr{}
if specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {
cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWUTS
}
if specutils.HasCapabilities(capability.CAP_NET_ADMIN) {
cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWNET
}
if *container || (cmd.SysProcAttr.Cloneflags&unix.CLONE_NEWNET != 0) {
// setup_container takes in its target argv as positional arguments.
cmd.Path = getSetupContainerPath()
cmd.Args = append([]string{cmd.Path}, cmd.Args...)
cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWUSER | unix.CLONE_NEWNET | unix.CLONE_NEWIPC | unix.CLONE_NEWUTS
// Set current user/group as root inside the namespace.
cmd.SysProcAttr.UidMappings = []syscall.SysProcIDMap{
{ContainerID: 0, HostID: os.Getuid(), Size: 1},
}
cmd.SysProcAttr.GidMappings = []syscall.SysProcIDMap{
{ContainerID: 0, HostID: os.Getgid(), Size: 1},
}
cmd.SysProcAttr.GidMappingsEnableSetgroups = false
cmd.SysProcAttr.Credential = &syscall.Credential{
Uid: 0,
Gid: 0,
}
}
if err := cmd.Run(); err != nil {
ws := err.(*exec.ExitError).Sys().(syscall.WaitStatus)
t.Errorf("test %q exited with status %d, want 0", tc.FullName(), ws.ExitStatus())
}
}
// runRunsc runs spec in runsc in a standard test configuration.
//
// runsc logs will be saved to a path in TEST_UNDECLARED_OUTPUTS_DIR.
//
// Returns an error if the sandboxed application exits non-zero.
func runRunsc(tc *gtest.TestCase, spec *specs.Spec) error {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
return fmt.Errorf("SetupBundleDir failed: %v", err)
}
defer cleanup()
rootDir, cleanup, err := testutil.SetupRootDir()
if err != nil {
return fmt.Errorf("SetupRootDir failed: %v", err)
}
defer cleanup()
name := tc.FullName()
id := testutil.RandomContainerID()
log.Infof("Running test %q in container %q", name, id)
specutils.LogSpecDebug(spec, false)
args := []string{
"-root", rootDir,
"-network", *network,
"-log-format=text",
"-TESTONLY-unsafe-nonroot=true",
"-TESTONLY-allow-packet-endpoint-write=true",
fmt.Sprintf("-panic-signal=%d", unix.SIGTERM),
fmt.Sprintf("-iouring=%t", *ioUring),
"-watchdog-action=panic",
"-platform", *platform,
"-file-access", *fileAccess,
"-gvisor-gro=200000ns",
}
if *network == "host" && !testutil.TestEnvSupportsNetAdmin {
log.Warningf("Testing with network=host but test environment does not support net admin or raw sockets. Raw sockets will not be enabled.")
} else {
args = append(args, "-net-raw")
}
if *overlay {
args = append(args, "-overlay2=all:dir=/tmp")
} else {
args = append(args, "-overlay2=none")
}
if *debug {
args = append(args, "-debug", "-log-packets=true")
}
if *strace {
args = append(args, "-strace")
}
if *addHostUDS {
args = append(args, "-host-uds=open")
}
if *addHostConnector {
args = append(args, "-host-uds=create")
}
if *addHostFIFO {
args = append(args, "-host-fifo=open")
}
if *leakCheck {
args = append(args, "-ref-leak-mode=log-names")
}
if *trace {
flag, err := enableAllTraces(rootDir)
if err != nil {
return fmt.Errorf("enabling all traces: %w", err)
}
log.Infof("Enabling all trace points: %s", flag)
args = append(args, flag)
}
if *directfs {
args = append(args, "-directfs")
} else {
args = append(args, "-directfs=false")
}
testLogDir := ""
if undeclaredOutputsDir, ok := unix.Getenv("TEST_UNDECLARED_OUTPUTS_DIR"); ok {
// Create log directory dedicated for this test.
testLogDir = filepath.Join(undeclaredOutputsDir, strings.Replace(name, "/", "_", -1))
if err := os.MkdirAll(testLogDir, 0755); err != nil {
return fmt.Errorf("could not create test dir: %v", err)
}
debugLogDir, err := ioutil.TempDir(testLogDir, "runsc")
if err != nil {
return fmt.Errorf("could not create temp dir: %v", err)
}
debugLogDir += "/"
log.Infof("runsc logs: %s", debugLogDir)
args = append(args, "-debug-log", debugLogDir)
args = append(args, "-coverage-report", debugLogDir)
// Default -log sends messages to stderr which makes reading the test log
// difficult. Instead, drop them when debug log is enabled given it's a
// better place for these messages.
args = append(args, "-log=/dev/null")
}
// Current process doesn't have CAP_SYS_ADMIN, create user namespace and run
// as root inside that namespace to get it.
sysProcAttr := &unix.SysProcAttr{
Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNS,
// Set current user/group as root inside the namespace.
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: os.Getuid(), Size: 1},
},
GidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: os.Getgid(), Size: 1},
},
GidMappingsEnableSetgroups: false,
Credential: &syscall.Credential{
Uid: 0,
Gid: 0,
},
}
var cmdArgs []string
if *waitForPid != 0 {
createArgs := append(args, "create", "-pid-file", filepath.Join(testLogDir, "pid"), "--bundle", bundleDir, id)
defer os.Remove(filepath.Join(testLogDir, "pid"))
createCmd := exec.Command(specutils.ExePath, createArgs...)
createCmd.SysProcAttr = sysProcAttr
createCmd.Stdout = os.Stdout
createCmd.Stderr = os.Stderr
if err := createCmd.Run(); err != nil {
return fmt.Errorf("could not create sandbox: %v", err)
}
sandboxPidBytes, err := os.ReadFile(filepath.Join(testLogDir, "pid"))
if err != nil {
return fmt.Errorf("could not read pid file: %v", err)
}
log.Infof("Sandbox process ID is %s. You can attach to it from a debugger of your choice.", sandboxPidBytes)
log.Infof("For example, with Delve you can call: $ dlv attach %s", sandboxPidBytes)
log.Infof("The test will automatically start after %s.", *waitForPid)
log.Infof("You may also signal the test process to start the test immediately: $ kill -SIGUSR1 %d", os.Getpid())
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, unix.SIGUSR1)
select {
case <-sigCh:
case <-time.After(*waitForPid):
}
signal.Reset(unix.SIGUSR1)
cmdArgs = append(args, "start", id)
} else {
cmdArgs = append(args, "run", "--bundle", bundleDir, id)
}
cmd := exec.Command(specutils.ExePath, cmdArgs...)
cmd.SysProcAttr = sysProcAttr
if *container || *network == "host" || (cmd.SysProcAttr.Cloneflags&unix.CLONE_NEWNET != 0) {
cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWNET
cmd.Path = getSetupContainerPath()
cmd.Args = append([]string{cmd.Path}, cmd.Args...)
}
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
sig := make(chan os.Signal, 1)
defer close(sig)
signal.Notify(sig, unix.SIGTERM)
defer signal.Stop(sig)
go func() {
s, ok := <-sig
if !ok {
return
}
log.Warningf("%s: Got signal: %v", name, s)
done := make(chan bool, 1)
dArgs := append([]string{}, args...)
dArgs = append(dArgs, "debug", "--stacks", id)
go func(dArgs []string) {
debug := exec.Command(specutils.ExePath, dArgs...)
debug.Stdout = os.Stdout
debug.Stderr = os.Stderr
debug.Run()
done <- true
}(dArgs)
timeout := time.After(3 * time.Second)
select {
case <-timeout:
log.Infof("runsc debug --stacks is timeouted")
case <-done:
}
log.Warningf("Send SIGTERM to the sandbox process")
dArgs = append(args, "debug",
fmt.Sprintf("--signal=%d", unix.SIGTERM),
id)
signal := exec.Command(specutils.ExePath, dArgs...)
signal.Stdout = os.Stdout
signal.Stderr = os.Stderr
signal.Run()
}()
err = cmd.Run()
if *waitForPid != 0 {
if err != nil {
return fmt.Errorf("could not start container: %v", err)
}
waitArgs := append(args, "wait", id)
waitCmd := exec.Command(specutils.ExePath, waitArgs...)
waitCmd.SysProcAttr = sysProcAttr
waitCmd.Stderr = os.Stderr
buf := bytes.NewBuffer(nil)
waitCmd.Stdout = buf
err = waitCmd.Run()
wres := struct {
ID string `json:"id"`
ExitStatus int `json:"exitStatus"`
}{}
if err := json.NewDecoder(buf).Decode(&wres); err != nil {
return fmt.Errorf("could not decode wait result: %v", err)
}
if wres.ExitStatus != 0 {
return fmt.Errorf("test failed with status: %d", wres.ExitStatus)
}
}
if err == nil && len(testLogDir) > 0 {
// If the test passed, then we erase the log directory. This speeds up
// uploading logs in continuous integration & saves on disk space.
os.RemoveAll(testLogDir)
}
return err
}
// setupHostUDSTree updates the spec to expose a UDS files tree for testing
// communication with the host.
func setupHostUDSTree(spec *specs.Spec) (cleanup func(), err error) {
socketDir, cleanup, err := uds.CreateBoundUDSTree("/tmp")
if err != nil {
return nil, fmt.Errorf("failed to create socket tree: %v", err)
}
// Standard access to entire tree.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp/sockets",
Source: socketDir,
Type: "bind",
})
// Individial attach points for each socket to test mounts that attach
// directly to the sockets.
for _, protocol := range []string{"stream", "seqpacket"} {
for _, name := range []string{"echo", "nonlistening"} {
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: filepath.Join("/tmp/sockets-attach", protocol, name),
Source: filepath.Join(socketDir, protocol, name),
Type: "bind",
})
}
}
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp/sockets-attach/dgram/null",
Source: filepath.Join(socketDir, "dgram/null"),
Type: "bind",
})
spec.Process.Env = append(spec.Process.Env, "TEST_UDS_TREE=/tmp/sockets")
spec.Process.Env = append(spec.Process.Env, "TEST_UDS_ATTACH_TREE=/tmp/sockets-attach")
return cleanup, nil
}
// setupHostFifoTree starts goroutines that will attempt to connect to sockets
// in a directory that will be bind mounted into the container.
func setupHostConnectorTree(spec *specs.Spec) (cleanup func(), err error) {
connectorDir, cleanup, err := uds.CreateSocketConnectors("/tmp")
if err != nil {
return nil, fmt.Errorf("failed to create connector tree: %v", err)
}
// Standard access to entire tree.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp/connectors",
Source: connectorDir,
Type: "bind",
})
// We can not create individual attach points for sockets that have not been
// created yet.
spec.Process.Env = append(spec.Process.Env, "TEST_CONNECTOR_TREE=/tmp/connectors")
return cleanup, nil
}
// setupHostFifoTree updates the spec to expose FIFO file tree for testing
// communication with the host.
func setupHostFifoTree(spec *specs.Spec) (cleanup func(), err error) {
fifoDir, cleanup, err := uds.CreateFifoTree("/tmp")
if err != nil {
return nil, fmt.Errorf("failed to create FIFO tree: %v", err)
}
// Standard access to entire tree.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp/pipes",
Source: fifoDir,
Type: "bind",
})
// Individual attach points for each pipe to test mounts that attach
// directly to the pipe.
for _, name := range []string{"in", "out"} {
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: filepath.Join("/tmp/pipes-attach", name),
Source: filepath.Join(fifoDir, name),
Type: "bind",
})
}
spec.Process.Env = append(spec.Process.Env, "TEST_FIFO_TREE=/tmp/pipes")
spec.Process.Env = append(spec.Process.Env, "TEST_FIFO_ATTACH_TREE=/tmp/pipes-attach")
return cleanup, nil
}
// runsTestCaseRunsc runs the test case in runsc.
func runTestCaseRunsc(testBin string, tc *gtest.TestCase, args []string, t *testing.T) {
// Run a new container with the test executable and filter for the
// given test suite and name.
if args == nil {
args = tc.Args()
}
var spec *specs.Spec
if *fusefs {
fuseServer, err := testutil.FindFile("test/runner/fuse/fuse")
if err != nil {
fatalf("cannot find fuse: %v", err)
}
cmdArgs := append([]string{testBin}, args...)
cmd := strings.Join(cmdArgs, " ")
spec = testutil.NewSpecWithArgs([]string{fuseServer, fmt.Sprintf("--debug=%t", *debug), fmt.Sprintf("--cmd=\"%s\"", cmd)}...)
} else {
spec = testutil.NewSpecWithArgs(append([]string{testBin}, args...)...)
}
// Mark the root as writeable, as some tests attempt to
// write to the rootfs, and expect EACCES, not EROFS.
spec.Root.Readonly = false
// Test spec comes with pre-defined mounts that we don't want. Reset it.
spec.Mounts = nil
testTmpDir := "/tmp"
if *useTmpfs {
// Forces '/tmp' to be mounted as tmpfs, otherwise test that rely on
// features only available in gVisor's internal tmpfs may fail.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/tmp",
Type: "tmpfs",
})
} else {
// Use a gofer-backed directory for $TEST_TMPDIR.
//
// Tests might be running in parallel, so make sure each has a
// unique test temp dir.
//
// Some tests (e.g., sticky) access this mount from other
// users, so make sure it is world-accessible.
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
if err != nil {
t.Fatalf("could not create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
if err := os.Chmod(tmpDir, 0777); err != nil {
t.Fatalf("could not chmod temp dir: %v", err)
}
testTmpDir = tmpDir
// Note that tmpDir exists in container rootfs mount, whose cacheability is
// set by fileAccess flag appropriately.
}
if *fusefs {
// In fuse tests, the fuse server forwards all filesystem ops from /tmp
// to /fuse.
spec.Mounts = append(spec.Mounts, specs.Mount{
Destination: "/fuse",
Type: "tmpfs",
})
}
if *network == "host" && !testutil.TestEnvSupportsNetAdmin {
log.Warningf("Testing with network=host but test environment does not support net admin or raw sockets. Dropping CAP_NET_ADMIN and CAP_NET_RAW.")
specutils.DropCapability(spec.Process.Capabilities, "CAP_NET_ADMIN")
specutils.DropCapability(spec.Process.Capabilities, "CAP_NET_RAW")
}
// Set environment variables that indicate we are running in gVisor with
// the given platform, network, and filesystem stack.
const (
platformVar = "TEST_ON_GVISOR"
networkVar = "GVISOR_NETWORK"
ioUringVar = "IOURING_ENABLED"
fuseVar = "GVISOR_FUSE_TEST"
)
env := append(os.Environ(), platformVar+"="+*platform, networkVar+"="+*network)
if *platformSupport != "" {
env = append(env, fmt.Sprintf("%s=%s", platformSupportEnvVar, *platformSupport))
}
if *ioUring {
env = append(env, ioUringVar+"=TRUE")
} else {
env = append(env, ioUringVar+"=FALSE")
}
if *fusefs {
env = append(env, fuseVar+"=TRUE")
} else {
env = append(env, fuseVar+"=FALSE")
}
// Remove shard env variables so that the gunit binary does not try to
// interpret them.
env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
// Set TEST_TMPDIR to testTmpDir, which has been appropriately configured.
env = filterEnv(env, []string{"TEST_TMPDIR"})
env = append(env, fmt.Sprintf("TEST_TMPDIR=%s", testTmpDir))
spec.Process.Env = env
if *addHostUDS {
cleanup, err := setupHostUDSTree(spec)
if err != nil {
t.Fatalf("error creating UDS tree: %v", err)
}
defer cleanup()
}
if *addHostConnector {
cleanup, err := setupHostConnectorTree(spec)
if err != nil {
t.Fatalf("error creating connector tree: %v", err)
}
defer cleanup()
}
if *addHostFIFO {
cleanup, err := setupHostFifoTree(spec)
if err != nil {
t.Fatalf("error creating FIFO tree: %v", err)
}
defer cleanup()
}
if err := runRunsc(tc, spec); err != nil {
t.Errorf("test %q failed with error %v, want nil", tc.FullName(), err)
}
}
// filterEnv returns an environment with the excluded variables removed.
func filterEnv(env, exclude []string) []string {
var out []string
for _, kv := range env {
ok := true
for _, k := range exclude {
if strings.HasPrefix(kv, k+"=") {
ok = false
break
}
}
if ok {
out = append(out, kv)
}
}
return out
}
func fatalf(s string, args ...any) {
fmt.Fprintf(os.Stderr, s+"\n", args...)
os.Exit(1)
}
func matchString(a, b string) (bool, error) {
return a == b, nil
}
func main() {
flag.Parse()
if flag.NArg() != 1 {
fatalf("test must be provided")
}
testBin := flag.Args()[0] // Only argument.
log.SetLevel(log.Info)
if *debug {
log.SetLevel(log.Debug)
}
if *platform != "native" {
if err := testutil.ConfigureExePath(); err != nil {
panic(err.Error())
}
}
// Make sure stdout and stderr are opened with O_APPEND, otherwise logs
// from outside the sandbox can (and will) stomp on logs from inside
// the sandbox.
for _, f := range []*os.File{os.Stdout, os.Stderr} {
flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)
if err != nil {
fatalf("error getting file flags for %v: %v", f, err)
}
if flags&unix.O_APPEND == 0 {
flags |= unix.O_APPEND
if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {
fatalf("error setting file flags for %v: %v", f, err)
}
}
}
// Get all test cases in each binary.
testCases, err := gtest.ParseTestCases(testBin, true)
if err != nil {
fatalf("ParseTestCases(%q) failed: %v", testBin, err)
}
// Get subset of tests corresponding to shard.
indices, err := testutil.TestIndicesForShard(len(testCases))
if err != nil {
fatalf("TestsForShard() failed: %v", err)
}
// Resolve the absolute path for the binary.
testBin, err = filepath.Abs(testBin)
if err != nil {
fatalf("Abs() failed: %v", err)
}
var tests []testing.InternalTest
if *oneSandbox {
tc := gtest.TestCase{
Suite: "main",
Name: "test",
}
tests = append(tests, testing.InternalTest{
Name: fmt.Sprintf("%s_%s", tc.Suite, tc.Name),
F: func(t *testing.T) {
args := gtest.BuildTestArgs(indices, testCases)
if *platform == "native" {
// Run the test case on host.
runTestCaseNative(testBin, &tc, args, t)
} else {
// Run the test case in runsc.
runTestCaseRunsc(testBin, &tc, args, t)
}
},
})
} else {
// Run the tests.
for _, tci := range indices {
// Capture tc.
tc := testCases[tci]
tests = append(tests, testing.InternalTest{
Name: fmt.Sprintf("%s_%s", tc.Suite, tc.Name),
F: func(t *testing.T) {
if *platform == "native" {
// Run the test case on host.
runTestCaseNative(testBin, &tc, nil, t)
} else {
// Run the test case in runsc.
runTestCaseRunsc(testBin, &tc, nil, t)
}
},
})
}
}
testing.Main(matchString, tests, nil, nil)
}
func enableAllTraces(dir string) (string, error) {
builder := config.Builder{}
if err := builder.LoadAllPoints(specutils.ExePath); err != nil {
return "", err
}
builder.AddSink(seccheck.SinkConfig{
Name: "null",
})
path := filepath.Join(dir, "pod_init.json")
cfgFile, err := os.Create(path)
if err != nil {
return "", err
}
defer cfgFile.Close()
if err := builder.WriteInitConfig(cfgFile); err != nil {
return "", fmt.Errorf("writing config file: %w", err)
}
return "--pod-init-config=" + path, nil
}
|
package content
import (
"bytes"
"compress/gzip"
"encoding/base64"
"io"
"strings"
)
func GUnzip(content []byte) ([]byte, error) {
r, err := gzip.NewReader(bytes.NewBuffer(content))
if err != nil {
return nil, err
}
return io.ReadAll(r)
}
func Base64GZ(data []byte) (string, error) {
gz, err := Gzip(data)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(gz), nil
}
func Decode(content, encoding string) ([]byte, error) {
var data []byte
if encoding == "base64" || strings.HasPrefix(encoding, "base64+") {
d, err := base64.StdEncoding.DecodeString(content)
if err != nil {
return nil, err
}
data = d
encoding = strings.TrimPrefix(encoding, "base64")
encoding = strings.TrimPrefix(encoding, "+")
} else {
data = []byte(content)
}
if encoding == "gz" {
return GUnzip(data)
}
return data, nil
}
func Gzip(data []byte) ([]byte, error) {
buf := &bytes.Buffer{}
w := gzip.NewWriter(buf)
if _, err := w.Write(data); err != nil {
return nil, err
}
if err := w.Close(); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
|
package telegram
import (
"fmt"
"log"
"os"
"syscall"
"github.com/c0re100/RadioBot/config"
"github.com/c0re100/go-tdlib"
"golang.org/x/crypto/ssh/terminal"
)
var (
bot *tdlib.Client
botID int32
userBot *tdlib.Client
userBotID int32
)
// New create telegram session
func New() (*tdlib.Client, *tdlib.Client) {
checkPlayerIsActive() // Check music player is running
tdlib.SetLogVerbosityLevel(0)
tdlib.SetFilePath("./errors.txt")
if _, err := os.Stat("instance"); os.IsNotExist(err) {
if err := os.Mkdir("instance", 0755); err != nil {
log.Fatal("Failed to create instance dir...")
}
}
err := botLogin()
if err != nil {
log.Fatal("bot login failed:", err)
}
checkGroupIsExist(bot)
if !config.IsWebEnabled() {
err = userLogin()
if err != nil {
log.Fatal("userbot login failed:", err)
}
checkGroupIsExist(userBot)
}
if listErr := savePlaylistIndexAndName(); listErr != nil {
log.Println(listErr)
}
createReceiver()
return bot, userBot
}
func newClient(name string) *tdlib.Client {
return tdlib.NewClient(tdlib.Config{
APIID: config.GetAPIID(),
APIHash: config.GetAPIHash(),
SystemLanguageCode: "en",
DeviceModel: "Radio Controller",
SystemVersion: "1.0",
ApplicationVersion: "1.0",
UseMessageDatabase: true,
UseFileDatabase: true,
UseChatInfoDatabase: true,
UseTestDataCenter: false,
DatabaseDirectory: "./instance/" + name + "-db",
FileDirectory: "./instance/" + name + "-files",
IgnoreFileNames: false,
})
}
func botLogin() error {
bot = newClient("bot")
for {
currentState, _ := bot.Authorize()
if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateWaitPhoneNumberType {
_, err := bot.CheckAuthenticationBotToken(config.GetBotToken())
if err != nil {
log.Fatal(err)
}
} else if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateReadyType {
me, err := bot.GetMe()
if err != nil {
return err
}
botID = me.Id
fmt.Println(me.Username + " connected.")
break
}
}
return nil
}
func userLogin() error {
userBot = newClient("user")
for {
currentState, _ := userBot.Authorize()
if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateWaitPhoneNumberType {
fmt.Print("Enter phone: ")
var number string
fmt.Scanln(&number)
_, err := userBot.SendPhoneNumber(number)
if err != nil {
fmt.Printf("Error sending phone number: %v", err)
}
} else if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateWaitCodeType {
fmt.Print("Enter code: ")
var code string
fmt.Scanln(&code)
_, err := userBot.SendAuthCode(code)
if err != nil {
fmt.Printf("Error sending auth code : %v", err)
}
} else if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateWaitPasswordType {
fmt.Print("Enter Password: ")
bytePassword, err := terminal.ReadPassword(int(syscall.Stdin))
if err != nil {
fmt.Println(err)
}
_, err = userBot.SendAuthPassword(string(bytePassword))
if err != nil {
fmt.Printf("Error sending auth password: %v", err)
}
} else if currentState.GetAuthorizationStateEnum() == tdlib.AuthorizationStateReadyType {
me, err := userBot.GetMe()
if err != nil {
return err
}
userBotID = me.Id
fmt.Println("\nHello!", me.FirstName, me.LastName, "("+me.Username+")")
break
}
}
return nil
}
func createReceiver() {
go newMessages()
go callbackQuery()
if !config.IsWebEnabled() {
go newGroupCallUpdate()
go newGroupCallPtcpUpdate()
joinGroupCall()
}
}
func checkGroupIsExist(cl *tdlib.Client) {
chatID := config.GetChatID()
if chatID == 0 {
uName := config.GetChatUsername()
if uName == "" {
log.Fatal("Username should not empty.")
}
s, err := cl.SearchPublicChat(uName)
if err != nil {
log.Fatal("SearchPublicChat error:", err)
}
_, err = cl.GetChat(s.Id)
if err != nil {
log.Fatal("GetChat error:", err)
}
config.SetChatID(s.Id)
config.SaveConfig()
} else {
_, err := cl.GetChat(config.GetChatID())
if err != nil {
log.Fatal("GetChat error:", err)
}
}
}
|
package lastfm
import (
"bufio"
"bytes"
"crypto/md5"
"encoding/hex"
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"sort"
"time"
"github.com/spf13/viper"
)
var (
token, sk string
tokenExpired int64
)
// Authentication Guide:
// https://www.last.fm/api/desktopauth
// Step 1. GetToken
// Step 2. Authentication by user, direct to user.
// Step 3. GetSession, with signature
func Auth() {
tokenOk, skOk := checkAuth()
if skOk {
return
}
if !tokenOk {
fmt.Println("Fetching last.fm token...")
if err := getToken(); err != nil {
log.Fatal("last.fm: token fetch failed, err: ", err)
}
}
fmt.Println("Please open the link below, and grant the permission.")
fmt.Println(authPage())
fmt.Println("Press 'Enter' to continue...")
bufio.NewReader(os.Stdin).ReadBytes('\n')
//get session and save to the config file
if err := session(); err != nil {
fmt.Println("last.fm session fetch failed, Please contact the author.")
log.Fatal("last.fm: session err: ", err)
}
return
}
func checkAuth() (tokenOk, skOk bool) {
domain = viper.GetString("lastfm.domain")
apiUrl = domain + "/2.0"
token = viper.GetString("lastfm.auth.token")
apiKey = viper.GetString("lastfm.auth.api_key")
tokenExpired = viper.GetInt64("lastfm.auth.token_expired")
sharedSecret = viper.GetString("lastfm.auth.shared_secret")
sk = viper.GetString("lastfm.auth.sk")
if token != "" && tokenExpired > time.Now().Unix() {
tokenOk = true
log.Println("last.fm token found.")
}
if sk != "" {
skOk = true
log.Println("last.fm session key found")
}
return
}
// https://www.last.fm/api/show/auth.getToken
// token valid for 60 minutes
func getToken() error {
requestUrl := fmt.Sprintf("%s/?method=auth.gettoken&api_key=%s&format=json", apiUrl, apiKey)
resp, err := getRequest(requestUrl)
if err != nil {
return err
}
result := toMap(resp)
ok := false
token, ok = result["token"]
if !ok {
return fmt.Errorf("parseToken failed")
}
viper.Set("lastfm.auth.token", token)
viper.Set("lastfm.auth.token_expired", time.Now().Add(60 * time.Minute).Unix())
viper.WriteConfig()
return nil
}
// generate signature
func signature(v *url.Values) (sig string) {
ordered := prepareSigText(*v)
text := ordered + sharedSecret
log.Println("last.fm: signature - before md5 ", text)
data := []byte(text)
hashed := md5.Sum(data)
return hex.EncodeToString(hashed[:])
}
// sort query first, then return string with format of <key><value>
func prepareSigText(v url.Values) (text string) {
if v == nil {
return ""
}
var buf bytes.Buffer
keys := make([]string, 0, len(v))
for k := range v {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
vs := v[k]
for _, v := range vs {
buf.WriteString(k)
buf.WriteString(v)
}
}
return buf.String()
}
func authPage() string {
return fmt.Sprintf("http://www.last.fm/api/auth/?api_key=%s&token=%s", apiKey, token)
}
// https://www.last.fm/api/show/auth.getSession
// Session keys have an infinite lifetime by default
func session() error {
v := url.Values{}
v.Set("method", "auth.session")
v.Set("api_key", apiKey)
v.Set("token", token)
sig := signature(&v)
v.Set("api_sig", sig)
v.Set("format", "json")
requestUrl, _ := url.Parse(apiUrl)
requestUrl.RawQuery = v.Encode()
resp, err := getRequest(requestUrl.String())
if err != nil {
return err
}
key, ok := parseKey(resp)
if !ok {
return fmt.Errorf("parse session key failed")
}
log.Println("last.fm session Key:", key)
viper.Set("lastfm.auth.token", "")
viper.Set("lastfm.auth.token_expired", 0)
viper.Set("lastfm.auth.sk", key)
viper.WriteConfig()
return nil
}
func parseKey(b []byte) (string, bool) {
type sessionResponse struct {
Session struct {
Name string `json:"name"`
Key string `json:"key"`
} `json:"session"`
}
var s sessionResponse
json.Unmarshal(b, &s)
if s.Session.Key == "" {
return "", false
}
return s.Session.Key, true
}
func resetAuth() {
viper.Set("lastfm.auth.token", "")
viper.Set("lastfm.auth.token_expired", 0)
viper.Set("lastfm.auth.sk", "")
viper.WriteConfig()
}
|
func swapPairs(head *ListNode) *ListNode {
if head == nil || head.Next == nil {
return head
}
// h(1)
// l r n
// 1->2->3->4->
// l(1) r(2) h(3)
// l(1)->(3); r(2)->(1); p(1)
// l(3) r(4) h(5)
// l(3)->(5); r(4)->(3); p(1)->(4); p(3)
newHead := head.Next
var prev *ListNode
for {
if head == nil || head.Next == nil {
break
}
lH := head
rH := head.Next
rHn := rH.Next
head = rHn
lH.Next = rHn
rH.Next = lH
if prev != nil {
prev.Next = rH
}
prev = lH
}
return newHead
} |
package main
import (
"bufio"
"fmt"
"net"
"os"
"runtime"
"sync/atomic"
"time"
)
var connCount int64
func connect() {
defer func() {
atomic.AddInt64(&connCount, -1)
}()
atomic.AddInt64(&connCount, 1)
conn, err := net.Dial("tcp", "127.0.0.1:8080")
if err != nil {
fmt.Printf("err:%s\n", err)
return
}
text := "hello"
for {
conn.Write([]byte(text))
message, err := bufio.NewReader(conn).ReadString('\n')
if err != nil {
return
}
_ = message
}
}
func main() {
fmt.Println("process id", os.Getpid())
timer := time.NewTimer(time.Second * 60 * 60 * 3)
tick1 := time.NewTicker(time.Second * 1)
tick2 := time.NewTicker(time.Second * 3)
loop:
for {
select {
case <-tick1.C:
go connect()
case <-tick2.C:
fmt.Println("number of goroutines", runtime.NumGoroutine())
fmt.Println("number of connect", atomic.LoadInt64(&connCount))
case <-timer.C:
tick1.Stop()
tick2.Stop()
timer.Stop()
break loop
}
}
}
// runtime.GOMAXPROCS()
// runtime.NumGoroutine()
|
package router
import (
"belajar/reservation/pkg/v1/header"
"belajar/reservation/pkg/v1/utils/errors"
"context"
"fmt"
"github.com/kenshaw/envcfg"
"github.com/gorilla/handlers"
"github.com/sirupsen/logrus"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
"google.golang.org/grpc"
"log"
"net/http"
"net/http/httptest"
"net/http/httputil"
mnpb "belajar/reservation/proto/v1/menu"
)
// NewHTTPServer creates the http server serve mux.
func NewHTTPServer(config *envcfg.Envcfg, logger *logrus.Logger) error {
gwruntime.HTTPError = errors.CustomHTTPError
ctx := context.Background()
ctx, cancel := context.WithCancel(ctx)
defer cancel()
// Connect to the GRPC server
addr := "0.0.0.0:" + config.GetKey("grpc.port")
conn, err := grpc.Dial(addr, grpc.WithInsecure())
if err != nil {
log.Fatalf("fail to dial: %v", err)
return err
}
defer conn.Close()
// Create new grpc-gateway
rmux := gwruntime.NewServeMux(gwruntime.WithForwardResponseOption(header.HttpResponseModifier))
// register gateway endpoints
for _, f := range []func(ctx context.Context, mux *gwruntime.ServeMux, conn *grpc.ClientConn) error{
// register grpc service handler
//hlpb.RegisterHealthServiceHandler,
// register grpc service handler
mnpb.RegisterMenuHandler,
} {
if err = f(ctx, rmux, conn); err != nil {
log.Fatal(err)
return err
}
}
// create http server mux
mux := http.NewServeMux()
mux.Handle("/", rmux)
// run swagger server
if config.GetKey("runtime.environment") == "development" {
CreateSwagger(mux)
}
// Where ORIGIN_ALLOWED is like `scheme://dns[:port]`, or `*` (insecure)
headersOk := handlers.AllowedHeaders([]string{"X-Requested-With", "Content-Type", "X-CSRF-Token", "Authorization", "Timezone-Offset"})
originsOk := handlers.AllowedOrigins([]string{"*"})
methodsOk := handlers.AllowedMethods([]string{"GET", "HEAD", "POST", "PUT", "OPTIONS"})
// running rest http server
log.Println("[SERVER] REST HTTP server is ready")
err = http.ListenAndServe("0.0.0.0:"+config.PortString(), handlers.CORS(headersOk, originsOk, methodsOk)(mux))
if err != nil {
log.Fatal(err)
return err
}
return nil
}
// CreateSwagger creates the swagger server serve mux.
func CreateSwagger(gwmux *http.ServeMux) {
// register swagger service server
gwmux.HandleFunc("/corp/rest/v1/banks/docs.json", func(w http.ResponseWriter, r *http.Request) {
http.ServeFile(w, r, "swagger/docs.json")
})
// load swagger-ui file
fs := http.FileServer(http.Dir("swagger/swagger-ui"))
gwmux.Handle("/corp/rest/v1/banks/docs/", http.StripPrefix("/corp/rest/v1/banks/docs", fs))
}
// logHandler is log middleware
func logHandler(fn http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
x, err := httputil.DumpRequest(r, true)
if err != nil {
http.Error(w, fmt.Sprint(err), http.StatusInternalServerError)
return
}
log.Println(fmt.Sprintf("request:\n %q", x))
rec := httptest.NewRecorder()
fn(rec, r)
log.Println("response: ")
log.Println(fmt.Sprintf("Response Code: %v", rec.Code))
log.Println(fmt.Sprintf("Headers: {%q}", rec.Header()))
log.Println(fmt.Sprintf("Response: %q", rec.Body))
// this copies the recorded response to the response writer
for k, v := range rec.HeaderMap {
w.Header()[k] = v
}
w.WriteHeader(rec.Code)
rec.Body.WriteTo(w)
}
}
|
package main_test
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_Something(t *testing.T) {
assert.Equal(t, "string", "string")
}
func Test_SomethingElse(t *testing.T) {
assert.Equal(t, "string", "string")
}
func Test_EvenMore(t *testing.T) {
assert.Equal(t, "string", "string")
}
|
//line sql.y:6
package sqlparser
import __yyfmt__ "fmt"
//line sql.y:6
import "strings"
func setParseTree(yylex interface{}, stmt Statement) {
yylex.(*Tokenizer).ParseTree = stmt
}
func setAllowComments(yylex interface{}, allow bool) {
yylex.(*Tokenizer).AllowComments = allow
}
func incNesting(yylex interface{}) bool {
yylex.(*Tokenizer).nesting++
if yylex.(*Tokenizer).nesting == 200 {
return true
}
return false
}
func decNesting(yylex interface{}) {
yylex.(*Tokenizer).nesting--
}
func forceEOF(yylex interface{}) {
yylex.(*Tokenizer).ForceEOF = true
}
//line sql.y:36
type yySymType struct {
yys int
empty struct{}
statement Statement
selStmt SelectStatement
byt byte
bytes []byte
bytes2 [][]byte
str string
selectExprs SelectExprs
selectExpr SelectExpr
columns Columns
colName *ColName
tableExprs TableExprs
tableExpr TableExpr
smTableExpr SimpleTableExpr
tableName *TableName
indexHints *IndexHints
expr Expr
boolExpr BoolExpr
valExpr ValExpr
colTuple ColTuple
valExprs ValExprs
values Values
rowTuple RowTuple
subquery *Subquery
caseExpr *CaseExpr
whens []*When
when *When
orderBy OrderBy
order *Order
limit *Limit
insRows InsertRows
updateExprs UpdateExprs
updateExpr *UpdateExpr
sqlID SQLName
sqlIDs []SQLName
}
const LEX_ERROR = 57346
const UNION = 57347
const MINUS = 57348
const EXCEPT = 57349
const INTERSECT = 57350
const SELECT = 57351
const INSERT = 57352
const UPDATE = 57353
const DELETE = 57354
const FROM = 57355
const WHERE = 57356
const GROUP = 57357
const HAVING = 57358
const ORDER = 57359
const BY = 57360
const LIMIT = 57361
const FOR = 57362
const ALL = 57363
const DISTINCT = 57364
const AS = 57365
const EXISTS = 57366
const ASC = 57367
const DESC = 57368
const INTO = 57369
const DUPLICATE = 57370
const KEY = 57371
const DEFAULT = 57372
const SET = 57373
const LOCK = 57374
const KEYRANGE = 57375
const VALUES = 57376
const LAST_INSERT_ID = 57377
const NEXT = 57378
const VALUE = 57379
const JOIN = 57380
const STRAIGHT_JOIN = 57381
const LEFT = 57382
const RIGHT = 57383
const INNER = 57384
const OUTER = 57385
const CROSS = 57386
const NATURAL = 57387
const USE = 57388
const FORCE = 57389
const ON = 57390
const ID = 57391
const STRING = 57392
const NUMBER = 57393
const VALUE_ARG = 57394
const LIST_ARG = 57395
const COMMENT = 57396
const NULL = 57397
const TRUE = 57398
const FALSE = 57399
const OR = 57400
const AND = 57401
const NOT = 57402
const BETWEEN = 57403
const CASE = 57404
const WHEN = 57405
const THEN = 57406
const ELSE = 57407
const LE = 57408
const GE = 57409
const NE = 57410
const NULL_SAFE_EQUAL = 57411
const IS = 57412
const LIKE = 57413
const REGEXP = 57414
const IN = 57415
const SHIFT_LEFT = 57416
const SHIFT_RIGHT = 57417
const UNARY = 57418
const END = 57419
const CREATE = 57420
const ALTER = 57421
const DROP = 57422
const RENAME = 57423
const ANALYZE = 57424
const TABLE = 57425
const INDEX = 57426
const VIEW = 57427
const TO = 57428
const IGNORE = 57429
const IF = 57430
const UNIQUE = 57431
const USING = 57432
const SHOW = 57433
const DESCRIBE = 57434
const EXPLAIN = 57435
var yyToknames = [...]string{
"$end",
"error",
"$unk",
"LEX_ERROR",
"UNION",
"MINUS",
"EXCEPT",
"INTERSECT",
"SELECT",
"INSERT",
"UPDATE",
"DELETE",
"FROM",
"WHERE",
"GROUP",
"HAVING",
"ORDER",
"BY",
"LIMIT",
"FOR",
"ALL",
"DISTINCT",
"AS",
"EXISTS",
"ASC",
"DESC",
"INTO",
"DUPLICATE",
"KEY",
"DEFAULT",
"SET",
"LOCK",
"KEYRANGE",
"VALUES",
"LAST_INSERT_ID",
"NEXT",
"VALUE",
"JOIN",
"STRAIGHT_JOIN",
"LEFT",
"RIGHT",
"INNER",
"OUTER",
"CROSS",
"NATURAL",
"USE",
"FORCE",
"ON",
"'('",
"','",
"')'",
"ID",
"STRING",
"NUMBER",
"VALUE_ARG",
"LIST_ARG",
"COMMENT",
"NULL",
"TRUE",
"FALSE",
"OR",
"AND",
"NOT",
"BETWEEN",
"CASE",
"WHEN",
"THEN",
"ELSE",
"'='",
"'<'",
"'>'",
"LE",
"GE",
"NE",
"NULL_SAFE_EQUAL",
"IS",
"LIKE",
"REGEXP",
"IN",
"'|'",
"'&'",
"SHIFT_LEFT",
"SHIFT_RIGHT",
"'+'",
"'-'",
"'*'",
"'/'",
"'%'",
"'^'",
"'~'",
"UNARY",
"'.'",
"END",
"CREATE",
"ALTER",
"DROP",
"RENAME",
"ANALYZE",
"TABLE",
"INDEX",
"VIEW",
"TO",
"IGNORE",
"IF",
"UNIQUE",
"USING",
"SHOW",
"DESCRIBE",
"EXPLAIN",
}
var yyStatenames = [...]string{}
const yyEofCode = 1
const yyErrCode = 2
const yyInitialStackSize = 16
//line yacctab:1
var yyExca = [...]int{
-1, 1,
1, -1,
-2, 0,
-1, 69,
92, 222,
-2, 221,
}
const yyNprod = 226
const yyPrivate = 57344
var yyTokenNames []string
var yyStates []string
const yyLast = 747
var yyAct = [...]int{
99, 95, 168, 403, 94, 171, 356, 280, 64, 269,
93, 321, 366, 211, 209, 170, 3, 228, 210, 250,
262, 190, 213, 222, 83, 60, 65, 84, 38, 198,
40, 79, 14, 44, 41, 341, 343, 71, 67, 276,
50, 73, 66, 53, 76, 43, 378, 44, 46, 47,
48, 204, 129, 377, 376, 72, 75, 114, 89, 49,
45, 353, 295, 139, 202, 122, 51, 52, 155, 156,
157, 152, 106, 88, 152, 69, 107, 108, 109, 118,
417, 110, 133, 235, 142, 74, 205, 137, 113, 119,
140, 263, 342, 312, 124, 172, 233, 234, 232, 173,
175, 176, 263, 106, 142, 141, 140, 96, 97, 121,
231, 355, 174, 98, 126, 115, 183, 128, 67, 219,
142, 67, 66, 194, 193, 66, 188, 112, 74, 201,
203, 200, 153, 154, 155, 156, 157, 152, 89, 218,
194, 69, 187, 223, 225, 226, 227, 192, 224, 236,
237, 238, 251, 240, 241, 242, 243, 244, 245, 246,
247, 248, 249, 217, 195, 167, 169, 106, 92, 291,
62, 111, 138, 239, 208, 412, 251, 255, 141, 140,
89, 89, 158, 159, 153, 154, 155, 156, 157, 152,
252, 254, 260, 142, 296, 297, 298, 256, 62, 273,
62, 74, 257, 259, 92, 92, 125, 264, 80, 251,
268, 120, 177, 178, 220, 221, 253, 180, 181, 141,
140, 362, 251, 14, 15, 16, 17, 135, 251, 134,
294, 277, 255, 191, 142, 299, 301, 302, 303, 386,
274, 107, 108, 109, 191, 18, 110, 215, 92, 14,
300, 253, 251, 92, 92, 383, 305, 229, 278, 251,
357, 89, 120, 309, 106, 271, 135, 67, 67, 278,
266, 66, 319, 372, 357, 317, 179, 306, 320, 308,
120, 316, 311, 307, 28, 29, 30, 31, 106, 106,
92, 92, 62, 329, 272, 331, 339, 328, 132, 330,
267, 375, 78, 374, 92, 333, 214, 350, 19, 20,
22, 21, 23, 336, 346, 354, 230, 359, 337, 348,
352, 24, 25, 26, 360, 364, 367, 351, 215, 334,
251, 332, 363, 338, 335, 286, 287, 313, 361, 14,
282, 285, 286, 287, 283, 229, 284, 288, 57, 42,
373, 81, 379, 282, 285, 286, 287, 283, 381, 284,
288, 409, 56, 67, 315, 368, 117, 384, 398, 196,
382, 92, 380, 410, 255, 116, 92, 392, 68, 390,
131, 293, 54, 186, 59, 399, 322, 214, 400, 367,
185, 215, 215, 215, 215, 371, 404, 404, 404, 401,
405, 406, 402, 323, 230, 270, 370, 327, 67, 191,
63, 416, 66, 418, 61, 407, 415, 14, 419, 33,
420, 1, 292, 411, 77, 413, 414, 289, 82, 28,
29, 30, 31, 136, 87, 197, 39, 258, 275, 104,
199, 70, 61, 32, 391, 184, 393, 394, 105, 123,
214, 214, 214, 214, 127, 318, 265, 130, 408, 34,
35, 36, 37, 387, 106, 365, 251, 69, 107, 108,
109, 369, 326, 110, 102, 103, 388, 389, 91, 310,
113, 182, 261, 92, 101, 92, 92, 104, 100, 395,
396, 397, 358, 314, 143, 61, 105, 189, 90, 96,
97, 85, 340, 281, 279, 98, 212, 86, 206, 55,
27, 207, 106, 216, 87, 69, 107, 108, 109, 112,
58, 110, 102, 103, 13, 12, 91, 11, 113, 10,
14, 151, 150, 158, 159, 153, 154, 155, 156, 157,
152, 9, 8, 7, 6, 104, 5, 96, 97, 85,
4, 2, 0, 98, 105, 0, 87, 87, 0, 0,
0, 0, 0, 216, 0, 0, 0, 112, 104, 0,
106, 0, 0, 69, 107, 108, 109, 105, 0, 110,
102, 103, 0, 0, 91, 0, 113, 0, 0, 0,
0, 0, 290, 106, 216, 0, 69, 107, 108, 109,
0, 0, 110, 102, 103, 96, 97, 91, 385, 113,
0, 98, 0, 0, 106, 0, 0, 69, 107, 108,
109, 0, 0, 110, 0, 112, 0, 0, 96, 97,
113, 0, 0, 0, 98, 0, 0, 87, 151, 150,
158, 159, 153, 154, 155, 156, 157, 152, 112, 96,
97, 324, 0, 0, 325, 98, 0, 216, 216, 216,
216, 0, 0, 0, 0, 0, 0, 0, 0, 112,
344, 345, 145, 148, 347, 0, 0, 0, 160, 161,
162, 163, 164, 165, 166, 149, 146, 147, 144, 151,
150, 158, 159, 153, 154, 155, 156, 157, 152, 349,
150, 158, 159, 153, 154, 155, 156, 157, 152, 304,
0, 0, 0, 0, 0, 0, 0, 151, 150, 158,
159, 153, 154, 155, 156, 157, 152, 151, 150, 158,
159, 153, 154, 155, 156, 157, 152, 151, 150, 158,
159, 153, 154, 155, 156, 157, 152,
}
var yyPact = [...]int{
214, -1000, -1000, 424, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -71,
-56, -39, -51, -40, -1000, -1000, -1000, 408, 361, -1000,
-1000, -1000, 326, -1000, -70, 148, 397, 89, -67, -45,
76, -1000, -43, 76, -1000, 148, -73, 156, -73, 148,
-1000, -1000, -1000, -1000, -1000, 463, 76, -1000, 58, 348,
335, -13, -1000, 148, 161, -1000, 40, -1000, -27, -1000,
148, 31, 154, -1000, -1000, 148, -1000, -50, 148, 356,
250, 76, -1000, 216, -1000, -1000, 149, -29, 117, 609,
-1000, 544, 521, -1000, -1000, -1000, 565, 565, 565, 215,
215, -1000, -1000, -1000, 215, 215, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, 565, 370, -1000, 148, 89, 148, 395,
89, 565, 76, -1000, 345, -77, -1000, 34, -1000, 148,
-1000, -1000, 148, -1000, 118, 463, -1000, -1000, 76, 33,
544, 544, 85, 565, 54, 19, 565, 565, 565, 85,
565, 565, 565, 565, 565, 565, 565, 565, 565, 565,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 8, 609, 158,
279, 201, 609, -1000, 23, -1000, -1000, 415, 463, -1000,
408, 188, 36, 657, 148, -1000, -1000, 239, 230, -1000,
388, 544, -1000, 657, -1000, -1000, -1000, 246, 76, -1000,
-63, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 219,
315, -1000, -1000, 146, 358, 240, -30, -1000, -1000, -1000,
8, 28, -1000, -1000, 136, -1000, -1000, 657, -1000, 23,
-1000, -1000, 54, 565, 565, 565, 657, 657, 647, -1000,
100, 619, -1000, -18, -18, -15, -15, -15, 48, 48,
-1000, -1000, -1000, 565, -1000, 657, -1000, 177, 463, 177,
213, 25, -1000, 544, -1000, 330, 89, 89, 388, 367,
385, 117, 148, -1000, -1000, 148, -1000, 392, 118, 118,
118, 118, -1000, 293, 267, -1000, 291, 275, 295, -11,
-1000, 148, 148, -1000, 208, 148, -1000, -1000, -1000, 201,
-1000, 657, 657, 637, 565, 657, -1000, 177, -1000, 188,
-32, -1000, 565, 44, 226, 215, 424, 212, 171, -1000,
367, -1000, 565, 565, -1000, -1000, 390, 377, 315, 225,
302, -1000, -1000, -1000, -1000, 265, -1000, 263, -1000, -1000,
-1000, -46, -47, -54, -1000, -1000, -1000, -1000, -1000, 565,
657, -1000, 101, -1000, 657, 565, -1000, 342, 205, -1000,
-1000, -1000, 89, -1000, 558, 189, -1000, 451, -1000, 388,
544, 565, 544, 544, -1000, -1000, 215, 215, 215, 657,
-1000, 657, 339, 215, -1000, 565, 565, -1000, -1000, -1000,
367, 117, 166, 117, 117, 76, 76, 76, 404, -1000,
657, -1000, 341, 125, -1000, 125, 125, 89, -1000, 400,
1, -1000, 76, -1000, -1000, 161, -1000, 76, -1000, 76,
-1000,
}
var yyPgo = [...]int{
0, 551, 15, 550, 546, 544, 543, 542, 541, 529,
527, 525, 524, 443, 520, 510, 509, 24, 27, 507,
14, 18, 13, 506, 504, 7, 503, 22, 25, 502,
3, 21, 73, 498, 494, 493, 10, 2, 23, 17,
5, 492, 1, 488, 171, 4, 484, 482, 20, 481,
479, 472, 471, 9, 465, 12, 463, 11, 458, 456,
455, 6, 8, 26, 445, 349, 302, 441, 440, 438,
436, 435, 0, 433, 378, 427, 422, 40, 421, 419,
112, 19,
}
var yyR1 = [...]int{
0, 78, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 2, 2, 2, 3, 3, 4, 5,
6, 7, 7, 7, 8, 8, 8, 9, 10, 10,
10, 11, 12, 12, 12, 79, 13, 14, 14, 15,
15, 15, 15, 15, 16, 16, 17, 17, 18, 18,
18, 19, 19, 73, 73, 73, 20, 20, 21, 21,
22, 22, 22, 23, 23, 23, 23, 76, 76, 75,
75, 75, 24, 24, 24, 24, 25, 25, 25, 25,
26, 26, 27, 27, 28, 28, 29, 29, 29, 29,
30, 30, 31, 31, 32, 32, 32, 32, 32, 32,
33, 33, 33, 33, 33, 33, 33, 33, 33, 33,
33, 33, 33, 33, 38, 38, 38, 38, 38, 38,
34, 34, 34, 34, 34, 34, 34, 39, 39, 39,
44, 40, 40, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 43, 46, 49, 49, 47, 47,
48, 50, 50, 45, 45, 36, 36, 36, 36, 51,
51, 52, 52, 53, 53, 54, 54, 55, 56, 56,
56, 57, 57, 57, 58, 58, 58, 59, 59, 60,
60, 61, 61, 35, 35, 41, 41, 42, 42, 62,
62, 63, 64, 64, 66, 66, 67, 67, 65, 65,
68, 68, 68, 68, 68, 69, 69, 70, 70, 71,
71, 72, 74, 80, 81, 77,
}
var yyR2 = [...]int{
0, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 12, 6, 3, 8, 8, 8, 7,
3, 5, 8, 4, 6, 7, 4, 5, 4, 5,
5, 3, 2, 2, 2, 0, 2, 0, 2, 1,
2, 1, 1, 1, 0, 1, 1, 3, 1, 2,
3, 1, 1, 0, 1, 2, 1, 3, 1, 1,
3, 3, 3, 3, 5, 5, 3, 0, 1, 0,
1, 2, 1, 2, 2, 1, 2, 3, 2, 3,
2, 2, 1, 3, 1, 3, 0, 5, 5, 5,
1, 3, 0, 2, 1, 3, 3, 2, 3, 3,
1, 1, 3, 3, 4, 3, 4, 3, 4, 5,
6, 3, 2, 6, 1, 2, 1, 2, 1, 2,
1, 1, 1, 1, 1, 1, 1, 3, 1, 1,
3, 1, 3, 1, 1, 1, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 2, 2, 2, 3,
4, 5, 4, 1, 1, 5, 0, 1, 1, 2,
4, 0, 2, 1, 3, 1, 1, 1, 1, 0,
3, 0, 2, 0, 3, 1, 3, 2, 0, 1,
1, 0, 2, 4, 0, 2, 4, 0, 3, 1,
3, 0, 5, 2, 1, 1, 3, 3, 1, 1,
3, 3, 1, 1, 0, 2, 0, 3, 0, 1,
1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
2, 1, 1, 1, 1, 0,
}
var yyChk = [...]int{
-1000, -78, -1, -2, -3, -4, -5, -6, -7, -8,
-9, -10, -11, -12, 9, 10, 11, 12, 31, 94,
95, 97, 96, 98, 107, 108, 109, -15, 5, 6,
7, 8, -13, -79, -13, -13, -13, -13, 99, -70,
101, 105, -65, 101, 103, 99, 99, 100, 101, 99,
-77, -77, -77, -2, 21, -16, 36, 22, -14, -65,
-28, -74, 52, 13, -62, -63, -45, -72, -74, 52,
-67, 104, 100, -72, 52, 99, -72, -74, -66, 104,
52, -66, -74, -17, -18, 86, -19, -74, -32, -37,
-33, 63, -80, -36, -45, -42, 84, 85, 90, -72,
-43, -46, 59, 60, 24, 33, 49, 53, 54, 55,
58, -44, 104, 65, -72, 57, 27, 31, 92, -28,
50, 69, 92, -74, 63, 52, -77, -74, -77, 102,
-74, 24, 48, -72, 13, 50, -73, -72, 23, 92,
62, 61, 76, -34, 79, 63, 77, 78, 64, 76,
81, 80, 89, 84, 85, 86, 87, 88, 82, 83,
69, 70, 71, 72, 73, 74, 75, -32, -37, -32,
-2, -40, -37, -37, -80, -37, -37, -80, -80, -44,
-80, -80, -49, -37, -64, 20, 13, -28, -62, -74,
-31, 14, -63, -37, -72, -77, 24, -71, 106, -68,
97, 95, 30, 96, 17, 52, -74, -74, -77, -20,
-21, -22, -23, -27, -44, -80, -74, -18, -72, 86,
-32, -32, -38, 58, 63, 59, 60, -37, -39, -80,
-44, 56, 79, 77, 78, 64, -37, -37, -37, -38,
-37, -37, -37, -37, -37, -37, -37, -37, -37, -37,
-81, 51, -81, 50, -81, -37, -81, -17, 22, -17,
-36, -47, -48, 66, -27, -59, 31, -80, -31, -53,
17, -32, 48, -72, -77, -69, 102, -31, 50, -24,
-25, -26, 38, 42, 44, 39, 40, 41, 45, -75,
-74, 23, -76, 23, -20, 92, 58, 59, 60, -40,
-39, -37, -37, -37, 62, -37, -81, -17, -81, 50,
-50, -48, 68, -32, -35, 34, -2, -62, -60, -45,
-53, -57, 19, 18, -74, -74, -51, 15, -21, -22,
-21, -22, 38, 38, 38, 43, 38, 43, 38, -25,
-29, 46, 103, 47, -74, -74, -81, -74, -81, 62,
-37, -81, -36, 93, -37, 67, -61, 48, -41, -42,
-61, -81, 50, -57, -37, -54, -55, -37, -77, -52,
16, 18, 48, 48, 38, 38, 100, 100, 100, -37,
-81, -37, 28, 50, -45, 50, 50, -56, 25, 26,
-53, -32, -40, -32, -32, -80, -80, -80, 29, -42,
-37, -55, -57, -30, -72, -30, -30, 11, -58, 20,
32, -81, 50, -81, -81, -62, 11, 79, -72, -72,
-72,
}
var yyDef = [...]int{
0, -2, 1, 2, 3, 4, 5, 6, 7, 8,
9, 10, 11, 12, 35, 35, 35, 35, 35, 217,
208, 0, 0, 0, 225, 225, 225, 0, 39, 41,
42, 43, 44, 37, 208, 0, 0, 0, 206, 0,
0, 218, 0, 0, 209, 0, 204, 0, 204, 0,
32, 33, 34, 15, 40, 0, 0, 45, 36, 0,
0, 84, 222, 0, 20, 199, 0, 163, 0, -2,
0, 0, 0, 225, 221, 0, 225, 0, 0, 0,
0, 0, 31, 0, 46, 48, 53, 0, 51, 52,
94, 0, 0, 133, 134, 135, 0, 0, 0, 163,
0, 153, 100, 101, 0, 0, 223, 165, 166, 167,
168, 198, 154, 156, 0, 38, 0, 0, 0, 92,
0, 0, 0, 225, 0, 219, 23, 0, 26, 0,
28, 205, 0, 225, 0, 0, 49, 54, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
120, 121, 122, 123, 124, 125, 126, 97, 0, 0,
0, 0, 131, 146, 0, 147, 148, 0, 0, 112,
0, 0, 0, 157, 0, 202, 203, 187, 92, 85,
173, 0, 200, 201, 164, 21, 207, 0, 0, 225,
215, 210, 211, 212, 213, 214, 27, 29, 30, 92,
56, 58, 59, 69, 67, 0, 82, 47, 55, 50,
95, 96, 99, 114, 0, 116, 118, 102, 103, 0,
128, 129, 0, 0, 0, 0, 105, 107, 0, 111,
136, 137, 138, 139, 140, 141, 142, 143, 144, 145,
98, 224, 130, 0, 197, 131, 149, 0, 0, 0,
0, 161, 158, 0, 14, 0, 0, 0, 173, 181,
0, 93, 0, 220, 24, 0, 216, 169, 0, 0,
0, 0, 72, 0, 0, 75, 0, 0, 0, 86,
70, 0, 0, 68, 0, 0, 115, 117, 119, 0,
104, 106, 108, 0, 0, 132, 150, 0, 152, 0,
0, 159, 0, 0, 191, 0, 194, 191, 0, 189,
181, 19, 0, 0, 225, 25, 171, 0, 57, 63,
0, 66, 73, 74, 76, 0, 78, 0, 80, 81,
60, 0, 0, 0, 71, 61, 62, 83, 127, 0,
109, 151, 0, 155, 162, 0, 16, 0, 193, 195,
17, 188, 0, 18, 182, 174, 175, 178, 22, 173,
0, 0, 0, 0, 77, 79, 0, 0, 0, 110,
113, 160, 0, 0, 190, 0, 0, 177, 179, 180,
181, 172, 170, 64, 65, 0, 0, 0, 0, 196,
183, 176, 184, 0, 90, 0, 0, 0, 13, 0,
0, 87, 0, 88, 89, 192, 185, 0, 91, 0,
186,
}
var yyTok1 = [...]int{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 88, 81, 3,
49, 51, 86, 84, 50, 85, 92, 87, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
70, 69, 71, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 89, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 80, 3, 90,
}
var yyTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 72, 73, 74, 75, 76, 77,
78, 79, 82, 83, 91, 93, 94, 95, 96, 97,
98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 109,
}
var yyTok3 = [...]int{
0,
}
var yyErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1
/* parser for yacc output */
var (
yyDebug = 0
yyErrorVerbose = false
)
type yyLexer interface {
Lex(lval *yySymType) int
Error(s string)
}
type yyParser interface {
Parse(yyLexer) int
Lookahead() int
}
type yyParserImpl struct {
lval yySymType
stack [yyInitialStackSize]yySymType
char int
}
func (p *yyParserImpl) Lookahead() int {
return p.char
}
func yyNewParser() yyParser {
return &yyParserImpl{}
}
const yyFlag = -1000
func yyTokname(c int) string {
if c >= 1 && c-1 < len(yyToknames) {
if yyToknames[c-1] != "" {
return yyToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func yyStatname(s int) string {
if s >= 0 && s < len(yyStatenames) {
if yyStatenames[s] != "" {
return yyStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func yyErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !yyErrorVerbose {
return "syntax error"
}
for _, e := range yyErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + yyTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := yyPact[state]
for tok := TOKSTART; tok-1 < len(yyToknames); tok++ {
if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if yyDef[state] == -2 {
i := 0
for yyExca[i] != -1 || yyExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; yyExca[i] >= 0; i += 2 {
tok := yyExca[i]
if tok < TOKSTART || yyExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if yyExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += yyTokname(tok)
}
return res
}
func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = yyTok1[0]
goto out
}
if char < len(yyTok1) {
token = yyTok1[char]
goto out
}
if char >= yyPrivate {
if char < yyPrivate+len(yyTok2) {
token = yyTok2[char-yyPrivate]
goto out
}
}
for i := 0; i < len(yyTok3); i += 2 {
token = yyTok3[i+0]
if token == char {
token = yyTok3[i+1]
goto out
}
}
out:
if token == 0 {
token = yyTok2[1] /* unknown char */
}
if yyDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
}
return char, token
}
func yyParse(yylex yyLexer) int {
return yyNewParser().Parse(yylex)
}
func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
var yyn int
var yyVAL yySymType
var yyDollar []yySymType
_ = yyDollar // silence set and not used
yyS := yyrcvr.stack[:]
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
yystate := 0
yyrcvr.char = -1
yytoken := -1 // yyrcvr.char translated into internal numbering
defer func() {
// Make sure we report no lookahead when not parsing.
yystate = -1
yyrcvr.char = -1
yytoken = -1
}()
yyp := -1
goto yystack
ret0:
return 0
ret1:
return 1
yystack:
/* put a state and value onto the stack */
if yyDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
}
yyp++
if yyp >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyS[yyp] = yyVAL
yyS[yyp].yys = yystate
yynewstate:
yyn = yyPact[yystate]
if yyn <= yyFlag {
goto yydefault /* simple state */
}
if yyrcvr.char < 0 {
yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
}
yyn += yytoken
if yyn < 0 || yyn >= yyLast {
goto yydefault
}
yyn = yyAct[yyn]
if yyChk[yyn] == yytoken { /* valid shift */
yyrcvr.char = -1
yytoken = -1
yyVAL = yyrcvr.lval
yystate = yyn
if Errflag > 0 {
Errflag--
}
goto yystack
}
yydefault:
/* default state action */
yyn = yyDef[yystate]
if yyn == -2 {
if yyrcvr.char < 0 {
yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
}
/* look through exception table */
xi := 0
for {
if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
yyn = yyExca[xi+0]
if yyn < 0 || yyn == yytoken {
break
}
}
yyn = yyExca[xi+1]
if yyn < 0 {
goto ret0
}
}
if yyn == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
yylex.Error(yyErrorMessage(yystate, yytoken))
Nerrs++
if yyDebug >= 1 {
__yyfmt__.Printf("%s", yyStatname(yystate))
__yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for yyp >= 0 {
yyn = yyPact[yyS[yyp].yys] + yyErrCode
if yyn >= 0 && yyn < yyLast {
yystate = yyAct[yyn] /* simulate a shift of "error" */
if yyChk[yystate] == yyErrCode {
goto yystack
}
}
/* the current p has no shift on "error", pop stack */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
}
yyp--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if yyDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
}
if yytoken == yyEofCode {
goto ret1
}
yyrcvr.char = -1
yytoken = -1
goto yynewstate /* try again in the same state */
}
}
/* reduction by production yyn */
if yyDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
}
yynt := yyn
yypt := yyp
_ = yypt // guard against "declared and not used"
yyp -= yyR2[yyn]
// yyp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if yyp+1 >= len(yyS) {
nyys := make([]yySymType, len(yyS)*2)
copy(nyys, yyS)
yyS = nyys
}
yyVAL = yyS[yyp+1]
/* consult goto table to find next state */
yyn = yyR1[yyn]
yyg := yyPgo[yyn]
yyj := yyg + yyS[yyp].yys + 1
if yyj >= yyLast {
yystate = yyAct[yyg]
} else {
yystate = yyAct[yyj]
if yyChk[yystate] != -yyn {
yystate = yyAct[yyg]
}
}
// dummy call; replaced with literal code
switch yynt {
case 1:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:170
{
setParseTree(yylex, yyDollar[1].statement)
}
case 2:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:176
{
yyVAL.statement = yyDollar[1].selStmt
}
case 13:
yyDollar = yyS[yypt-12 : yypt+1]
//line sql.y:192
{
yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Distinct: yyDollar[3].str, SelectExprs: yyDollar[4].selectExprs, From: yyDollar[6].tableExprs, Where: NewWhere(WhereStr, yyDollar[7].boolExpr), GroupBy: GroupBy(yyDollar[8].valExprs), Having: NewWhere(HavingStr, yyDollar[9].boolExpr), OrderBy: yyDollar[10].orderBy, Limit: yyDollar[11].limit, Lock: yyDollar[12].str}
}
case 14:
yyDollar = yyS[yypt-6 : yypt+1]
//line sql.y:196
{
if yyDollar[4].sqlID != "value" {
yylex.Error("expecting value after next")
return 1
}
yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), SelectExprs: SelectExprs{Nextval{}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[6].smTableExpr}}}
}
case 15:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:204
{
yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt}
}
case 16:
yyDollar = yyS[yypt-8 : yypt+1]
//line sql.y:210
{
yyVAL.statement = &Insert{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[5].tableName, Columns: yyDollar[6].columns, Rows: yyDollar[7].insRows, OnDup: OnDup(yyDollar[8].updateExprs)}
}
case 17:
yyDollar = yyS[yypt-8 : yypt+1]
//line sql.y:214
{
cols := make(Columns, 0, len(yyDollar[7].updateExprs))
vals := make(ValTuple, 0, len(yyDollar[7].updateExprs))
for _, col := range yyDollar[7].updateExprs {
cols = append(cols, &NonStarExpr{Expr: col.Name})
vals = append(vals, col.Expr)
}
yyVAL.statement = &Insert{Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[5].tableName, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprs)}
}
case 18:
yyDollar = yyS[yypt-8 : yypt+1]
//line sql.y:226
{
yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), Table: yyDollar[3].tableName, Exprs: yyDollar[5].updateExprs, Where: NewWhere(WhereStr, yyDollar[6].boolExpr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit}
}
case 19:
yyDollar = yyS[yypt-7 : yypt+1]
//line sql.y:232
{
yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Table: yyDollar[4].tableName, Where: NewWhere(WhereStr, yyDollar[5].boolExpr), OrderBy: yyDollar[6].orderBy, Limit: yyDollar[7].limit}
}
case 20:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:238
{
yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].updateExprs}
}
case 21:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:244
{
yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[4].sqlID}
}
case 22:
yyDollar = yyS[yypt-8 : yypt+1]
//line sql.y:248
{
// Change this to an alter statement
yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].sqlID, NewName: yyDollar[7].sqlID}
}
case 23:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:253
{
yyVAL.statement = &DDL{Action: CreateStr, NewName: SQLName(yyDollar[3].sqlID)}
}
case 24:
yyDollar = yyS[yypt-6 : yypt+1]
//line sql.y:259
{
yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].sqlID, NewName: yyDollar[4].sqlID}
}
case 25:
yyDollar = yyS[yypt-7 : yypt+1]
//line sql.y:263
{
// Change this to a rename statement
yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].sqlID, NewName: yyDollar[7].sqlID}
}
case 26:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:268
{
yyVAL.statement = &DDL{Action: AlterStr, Table: SQLName(yyDollar[3].sqlID), NewName: SQLName(yyDollar[3].sqlID)}
}
case 27:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:274
{
yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].sqlID, NewName: yyDollar[5].sqlID}
}
case 28:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:280
{
yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].sqlID}
}
case 29:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:284
{
// Change this to an alter statement
yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].sqlID, NewName: yyDollar[5].sqlID}
}
case 30:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:289
{
yyVAL.statement = &DDL{Action: DropStr, Table: SQLName(yyDollar[4].sqlID)}
}
case 31:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:295
{
yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].sqlID, NewName: yyDollar[3].sqlID}
}
case 32:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:301
{
yyVAL.statement = &Other{}
}
case 33:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:305
{
yyVAL.statement = &Other{}
}
case 34:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:309
{
yyVAL.statement = &Other{}
}
case 35:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:314
{
setAllowComments(yylex, true)
}
case 36:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:318
{
yyVAL.bytes2 = yyDollar[2].bytes2
setAllowComments(yylex, false)
}
case 37:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:324
{
yyVAL.bytes2 = nil
}
case 38:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:328
{
yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes)
}
case 39:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:334
{
yyVAL.str = UnionStr
}
case 40:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:338
{
yyVAL.str = UnionAllStr
}
case 41:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:342
{
yyVAL.str = SetMinusStr
}
case 42:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:346
{
yyVAL.str = ExceptStr
}
case 43:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:350
{
yyVAL.str = IntersectStr
}
case 44:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:355
{
yyVAL.str = ""
}
case 45:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:359
{
yyVAL.str = DistinctStr
}
case 46:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:365
{
yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr}
}
case 47:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:369
{
yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr)
}
case 48:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:375
{
yyVAL.selectExpr = &StarExpr{}
}
case 49:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:379
{
yyVAL.selectExpr = &NonStarExpr{Expr: yyDollar[1].expr, As: yyDollar[2].sqlID}
}
case 50:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:383
{
yyVAL.selectExpr = &StarExpr{TableName: yyDollar[1].sqlID}
}
case 51:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:389
{
yyVAL.expr = yyDollar[1].boolExpr
}
case 52:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:393
{
yyVAL.expr = yyDollar[1].valExpr
}
case 53:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:398
{
yyVAL.sqlID = ""
}
case 54:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:402
{
yyVAL.sqlID = yyDollar[1].sqlID
}
case 55:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:406
{
yyVAL.sqlID = yyDollar[2].sqlID
}
case 56:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:412
{
yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr}
}
case 57:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:416
{
yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr)
}
case 60:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:426
{
yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].smTableExpr, As: yyDollar[2].sqlID, Hints: yyDollar[3].indexHints}
}
case 61:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:430
{
yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].sqlID}
}
case 62:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:434
{
yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs}
}
case 63:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:447
{
yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr}
}
case 64:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:451
{
yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, On: yyDollar[5].boolExpr}
}
case 65:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:455
{
yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, On: yyDollar[5].boolExpr}
}
case 66:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:459
{
yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr}
}
case 67:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:464
{
yyVAL.empty = struct{}{}
}
case 68:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:466
{
yyVAL.empty = struct{}{}
}
case 69:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:469
{
yyVAL.sqlID = ""
}
case 70:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:473
{
yyVAL.sqlID = yyDollar[1].sqlID
}
case 71:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:477
{
yyVAL.sqlID = yyDollar[2].sqlID
}
case 72:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:483
{
yyVAL.str = JoinStr
}
case 73:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:487
{
yyVAL.str = JoinStr
}
case 74:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:491
{
yyVAL.str = JoinStr
}
case 75:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:495
{
yyVAL.str = StraightJoinStr
}
case 76:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:501
{
yyVAL.str = LeftJoinStr
}
case 77:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:505
{
yyVAL.str = LeftJoinStr
}
case 78:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:509
{
yyVAL.str = RightJoinStr
}
case 79:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:513
{
yyVAL.str = RightJoinStr
}
case 80:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:519
{
yyVAL.str = NaturalJoinStr
}
case 81:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:523
{
if yyDollar[2].str == LeftJoinStr {
yyVAL.str = NaturalLeftJoinStr
} else {
yyVAL.str = NaturalRightJoinStr
}
}
case 82:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:533
{
yyVAL.smTableExpr = &TableName{Name: yyDollar[1].sqlID}
}
case 83:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:537
{
yyVAL.smTableExpr = &TableName{Qualifier: yyDollar[1].sqlID, Name: yyDollar[3].sqlID}
}
case 84:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:543
{
yyVAL.tableName = &TableName{Name: yyDollar[1].sqlID}
}
case 85:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:547
{
yyVAL.tableName = &TableName{Qualifier: yyDollar[1].sqlID, Name: yyDollar[3].sqlID}
}
case 86:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:552
{
yyVAL.indexHints = nil
}
case 87:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:556
{
yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].sqlIDs}
}
case 88:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:560
{
yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].sqlIDs}
}
case 89:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:564
{
yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].sqlIDs}
}
case 90:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:570
{
yyVAL.sqlIDs = []SQLName{yyDollar[1].sqlID}
}
case 91:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:574
{
yyVAL.sqlIDs = append(yyDollar[1].sqlIDs, yyDollar[3].sqlID)
}
case 92:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:579
{
yyVAL.boolExpr = nil
}
case 93:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:583
{
yyVAL.boolExpr = yyDollar[2].boolExpr
}
case 95:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:590
{
yyVAL.boolExpr = &AndExpr{Left: yyDollar[1].boolExpr, Right: yyDollar[3].boolExpr}
}
case 96:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:594
{
yyVAL.boolExpr = &OrExpr{Left: yyDollar[1].boolExpr, Right: yyDollar[3].boolExpr}
}
case 97:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:598
{
yyVAL.boolExpr = &NotExpr{Expr: yyDollar[2].boolExpr}
}
case 98:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:602
{
yyVAL.boolExpr = &ParenBoolExpr{Expr: yyDollar[2].boolExpr}
}
case 99:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:606
{
yyVAL.boolExpr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].boolExpr}
}
case 100:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:612
{
yyVAL.boolExpr = BoolVal(true)
}
case 101:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:616
{
yyVAL.boolExpr = BoolVal(false)
}
case 102:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:620
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: yyDollar[2].str, Right: yyDollar[3].valExpr}
}
case 103:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:624
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: InStr, Right: yyDollar[3].colTuple}
}
case 104:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:628
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: NotInStr, Right: yyDollar[4].colTuple}
}
case 105:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:632
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: LikeStr, Right: yyDollar[3].valExpr}
}
case 106:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:636
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: NotLikeStr, Right: yyDollar[4].valExpr}
}
case 107:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:640
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: RegexpStr, Right: yyDollar[3].valExpr}
}
case 108:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:644
{
yyVAL.boolExpr = &ComparisonExpr{Left: yyDollar[1].valExpr, Operator: NotRegexpStr, Right: yyDollar[4].valExpr}
}
case 109:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:648
{
yyVAL.boolExpr = &RangeCond{Left: yyDollar[1].valExpr, Operator: BetweenStr, From: yyDollar[3].valExpr, To: yyDollar[5].valExpr}
}
case 110:
yyDollar = yyS[yypt-6 : yypt+1]
//line sql.y:652
{
yyVAL.boolExpr = &RangeCond{Left: yyDollar[1].valExpr, Operator: NotBetweenStr, From: yyDollar[4].valExpr, To: yyDollar[6].valExpr}
}
case 111:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:656
{
yyVAL.boolExpr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].valExpr}
}
case 112:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:660
{
yyVAL.boolExpr = &ExistsExpr{Subquery: yyDollar[2].subquery}
}
case 113:
yyDollar = yyS[yypt-6 : yypt+1]
//line sql.y:664
{
yyVAL.boolExpr = &KeyrangeExpr{Start: yyDollar[3].valExpr, End: yyDollar[5].valExpr}
}
case 114:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:670
{
yyVAL.str = IsNullStr
}
case 115:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:674
{
yyVAL.str = IsNotNullStr
}
case 116:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:678
{
yyVAL.str = IsTrueStr
}
case 117:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:682
{
yyVAL.str = IsNotTrueStr
}
case 118:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:686
{
yyVAL.str = IsFalseStr
}
case 119:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:690
{
yyVAL.str = IsNotFalseStr
}
case 120:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:696
{
yyVAL.str = EqualStr
}
case 121:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:700
{
yyVAL.str = LessThanStr
}
case 122:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:704
{
yyVAL.str = GreaterThanStr
}
case 123:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:708
{
yyVAL.str = LessEqualStr
}
case 124:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:712
{
yyVAL.str = GreaterEqualStr
}
case 125:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:716
{
yyVAL.str = NotEqualStr
}
case 126:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:720
{
yyVAL.str = NullSafeEqualStr
}
case 127:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:726
{
yyVAL.colTuple = ValTuple(yyDollar[2].valExprs)
}
case 128:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:730
{
yyVAL.colTuple = yyDollar[1].subquery
}
case 129:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:734
{
yyVAL.colTuple = ListArg(yyDollar[1].bytes)
}
case 130:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:740
{
yyVAL.subquery = &Subquery{yyDollar[2].selStmt}
}
case 131:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:746
{
yyVAL.valExprs = ValExprs{yyDollar[1].valExpr}
}
case 132:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:750
{
yyVAL.valExprs = append(yyDollar[1].valExprs, yyDollar[3].valExpr)
}
case 133:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:756
{
yyVAL.valExpr = yyDollar[1].valExpr
}
case 134:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:760
{
yyVAL.valExpr = yyDollar[1].colName
}
case 135:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:764
{
yyVAL.valExpr = yyDollar[1].rowTuple
}
case 136:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:768
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: BitAndStr, Right: yyDollar[3].valExpr}
}
case 137:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:772
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: BitOrStr, Right: yyDollar[3].valExpr}
}
case 138:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:776
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: BitXorStr, Right: yyDollar[3].valExpr}
}
case 139:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:780
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: PlusStr, Right: yyDollar[3].valExpr}
}
case 140:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:784
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: MinusStr, Right: yyDollar[3].valExpr}
}
case 141:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:788
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: MultStr, Right: yyDollar[3].valExpr}
}
case 142:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:792
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: DivStr, Right: yyDollar[3].valExpr}
}
case 143:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:796
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: ModStr, Right: yyDollar[3].valExpr}
}
case 144:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:800
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: ShiftLeftStr, Right: yyDollar[3].valExpr}
}
case 145:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:804
{
yyVAL.valExpr = &BinaryExpr{Left: yyDollar[1].valExpr, Operator: ShiftRightStr, Right: yyDollar[3].valExpr}
}
case 146:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:808
{
if num, ok := yyDollar[2].valExpr.(NumVal); ok {
yyVAL.valExpr = num
} else {
yyVAL.valExpr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].valExpr}
}
}
case 147:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:816
{
if num, ok := yyDollar[2].valExpr.(NumVal); ok {
// Handle double negative
if num[0] == '-' {
yyVAL.valExpr = num[1:]
} else {
yyVAL.valExpr = append(NumVal("-"), num...)
}
} else {
yyVAL.valExpr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].valExpr}
}
}
case 148:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:829
{
yyVAL.valExpr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].valExpr}
}
case 149:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:833
{
yyVAL.valExpr = &FuncExpr{Name: string(yyDollar[1].sqlID)}
}
case 150:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:837
{
yyVAL.valExpr = &FuncExpr{Name: string(yyDollar[1].sqlID), Exprs: yyDollar[3].selectExprs}
}
case 151:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:841
{
yyVAL.valExpr = &FuncExpr{Name: string(yyDollar[1].sqlID), Distinct: true, Exprs: yyDollar[4].selectExprs}
}
case 152:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:845
{
yyVAL.valExpr = &FuncExpr{Name: yyDollar[1].str, Exprs: yyDollar[3].selectExprs}
}
case 153:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:849
{
yyVAL.valExpr = yyDollar[1].caseExpr
}
case 154:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:855
{
yyVAL.str = "if"
}
case 155:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:861
{
yyVAL.caseExpr = &CaseExpr{Expr: yyDollar[2].valExpr, Whens: yyDollar[3].whens, Else: yyDollar[4].valExpr}
}
case 156:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:866
{
yyVAL.valExpr = nil
}
case 157:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:870
{
yyVAL.valExpr = yyDollar[1].valExpr
}
case 158:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:876
{
yyVAL.whens = []*When{yyDollar[1].when}
}
case 159:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:880
{
yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when)
}
case 160:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:886
{
yyVAL.when = &When{Cond: yyDollar[2].boolExpr, Val: yyDollar[4].valExpr}
}
case 161:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:891
{
yyVAL.valExpr = nil
}
case 162:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:895
{
yyVAL.valExpr = yyDollar[2].valExpr
}
case 163:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:901
{
yyVAL.colName = &ColName{Name: yyDollar[1].sqlID}
}
case 164:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:905
{
yyVAL.colName = &ColName{Qualifier: yyDollar[1].sqlID, Name: yyDollar[3].sqlID}
}
case 165:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:911
{
yyVAL.valExpr = StrVal(yyDollar[1].bytes)
}
case 166:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:915
{
yyVAL.valExpr = NumVal(yyDollar[1].bytes)
}
case 167:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:919
{
yyVAL.valExpr = ValArg(yyDollar[1].bytes)
}
case 168:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:923
{
yyVAL.valExpr = &NullVal{}
}
case 169:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:928
{
yyVAL.valExprs = nil
}
case 170:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:932
{
yyVAL.valExprs = yyDollar[3].valExprs
}
case 171:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:937
{
yyVAL.boolExpr = nil
}
case 172:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:941
{
yyVAL.boolExpr = yyDollar[2].boolExpr
}
case 173:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:946
{
yyVAL.orderBy = nil
}
case 174:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:950
{
yyVAL.orderBy = yyDollar[3].orderBy
}
case 175:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:956
{
yyVAL.orderBy = OrderBy{yyDollar[1].order}
}
case 176:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:960
{
yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order)
}
case 177:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:966
{
yyVAL.order = &Order{Expr: yyDollar[1].valExpr, Direction: yyDollar[2].str}
}
case 178:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:971
{
yyVAL.str = AscScr
}
case 179:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:975
{
yyVAL.str = AscScr
}
case 180:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:979
{
yyVAL.str = DescScr
}
case 181:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:984
{
yyVAL.limit = nil
}
case 182:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:988
{
yyVAL.limit = &Limit{Rowcount: yyDollar[2].valExpr}
}
case 183:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:992
{
yyVAL.limit = &Limit{Offset: yyDollar[2].valExpr, Rowcount: yyDollar[4].valExpr}
}
case 184:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:997
{
yyVAL.str = ""
}
case 185:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:1001
{
yyVAL.str = ForUpdateStr
}
case 186:
yyDollar = yyS[yypt-4 : yypt+1]
//line sql.y:1005
{
if yyDollar[3].sqlID != "share" {
yylex.Error("expecting share")
return 1
}
if yyDollar[4].sqlID != "mode" {
yylex.Error("expecting mode")
return 1
}
yyVAL.str = ShareModeStr
}
case 187:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1018
{
yyVAL.columns = nil
}
case 188:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1022
{
yyVAL.columns = yyDollar[2].columns
}
case 189:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1028
{
yyVAL.columns = Columns{&NonStarExpr{Expr: yyDollar[1].colName}}
}
case 190:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1032
{
yyVAL.columns = append(yyVAL.columns, &NonStarExpr{Expr: yyDollar[3].colName})
}
case 191:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1037
{
yyVAL.updateExprs = nil
}
case 192:
yyDollar = yyS[yypt-5 : yypt+1]
//line sql.y:1041
{
yyVAL.updateExprs = yyDollar[5].updateExprs
}
case 193:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:1047
{
yyVAL.insRows = yyDollar[2].values
}
case 194:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1051
{
yyVAL.insRows = yyDollar[1].selStmt
}
case 195:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1057
{
yyVAL.values = Values{yyDollar[1].rowTuple}
}
case 196:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1061
{
yyVAL.values = append(yyDollar[1].values, yyDollar[3].rowTuple)
}
case 197:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1067
{
yyVAL.rowTuple = ValTuple(yyDollar[2].valExprs)
}
case 198:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1071
{
yyVAL.rowTuple = yyDollar[1].subquery
}
case 199:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1077
{
yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr}
}
case 200:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1081
{
yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr)
}
case 201:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1087
{
yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].valExpr}
}
case 204:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1096
{
yyVAL.empty = struct{}{}
}
case 205:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:1098
{
yyVAL.empty = struct{}{}
}
case 206:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1101
{
yyVAL.empty = struct{}{}
}
case 207:
yyDollar = yyS[yypt-3 : yypt+1]
//line sql.y:1103
{
yyVAL.empty = struct{}{}
}
case 208:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1106
{
yyVAL.str = ""
}
case 209:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1108
{
yyVAL.str = IgnoreStr
}
case 210:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1112
{
yyVAL.empty = struct{}{}
}
case 211:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1114
{
yyVAL.empty = struct{}{}
}
case 212:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1116
{
yyVAL.empty = struct{}{}
}
case 213:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1118
{
yyVAL.empty = struct{}{}
}
case 214:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1120
{
yyVAL.empty = struct{}{}
}
case 215:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1123
{
yyVAL.empty = struct{}{}
}
case 216:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1125
{
yyVAL.empty = struct{}{}
}
case 217:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1128
{
yyVAL.empty = struct{}{}
}
case 218:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1130
{
yyVAL.empty = struct{}{}
}
case 219:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1133
{
yyVAL.empty = struct{}{}
}
case 220:
yyDollar = yyS[yypt-2 : yypt+1]
//line sql.y:1135
{
yyVAL.empty = struct{}{}
}
case 221:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1139
{
yyVAL.sqlID = SQLName(strings.ToLower(string(yyDollar[1].bytes)))
}
case 222:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1145
{
yyVAL.sqlID = SQLName(yyDollar[1].bytes)
}
case 223:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1151
{
if incNesting(yylex) {
yylex.Error("max nesting level reached")
return 1
}
}
case 224:
yyDollar = yyS[yypt-1 : yypt+1]
//line sql.y:1160
{
decNesting(yylex)
}
case 225:
yyDollar = yyS[yypt-0 : yypt+1]
//line sql.y:1165
{
forceEOF(yylex)
}
}
goto yystack /* stack new state and value */
}
|
package main
import (
"flag"
"github.com/michaelvlaar/etcd-endpointer/examples/echoservice/api/protos"
"golang.org/x/net/context"
"google.golang.org/grpc"
"gopkg.in/inconshreveable/log15.v2"
"time"
)
var (
endpoint = flag.String("endpoint", ":9090", "GeneralsView API endpoint. Usage <host>:<port>.")
userID = flag.Uint64("userID", 0, "The userID to simulate for loadbalancing")
message = flag.String("message", "Hello etcd-endpointer", "Type any string")
log log15.Logger
)
func init() {
log = log15.New()
}
func main() {
flag.Parse()
conn, err := grpc.Dial(*endpoint)
if err != nil {
log.Crit("could not connect to gRPC endpoint", "error", err)
}
defer func() {
if err := conn.Close(); err != nil {
log.Error("Error closing connection", "error", err)
}
}()
client := echo.NewEchoserviceClient(conn)
request := &echo.EchoRequest{
UserID: *userID,
Message: *message,
}
for {
response, err := client.EchoMessage(context.Background(), request)
if err != nil {
log.Error("error sending EchoMessage", "error", err)
}
log.Info("Received EchoMessage", "request", request, "response", response)
time.Sleep(time.Duration(1) * time.Second)
}
}
|
// Copyright 2014 Joseph Hager. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package engi
import (
"math"
"time"
)
type Clock struct {
elapsed float64
elapsedStep float64
step float64
numStep uint32
delta float64
fps float64
frames uint64
start time.Time
frame time.Time
}
func NewClock(step float64) *Clock {
clock := new(Clock)
clock.start = time.Now()
clock.step = step
clock.Tick()
return clock
}
func (c *Clock) Tick() {
now := time.Now()
c.frames += 1
c.delta = now.Sub(c.frame).Seconds()
c.elapsed += c.delta
c.frame = now
if c.elapsed >= 1 {
c.fps = float64(c.frames)
c.elapsed = math.Mod(c.elapsed, 1)
c.frames = 0
c.numStep = 0.0
}
if (c.elapsed / c.step) >= float64(c.numStep) {
c.numStep++
responder.Step(c.step, c.numStep)
}
}
func (c *Clock) Delta() float32 {
return float32(c.delta)
}
func (c *Clock) Fps() float32 {
return float32(c.fps)
}
func (c *Clock) Time() float32 {
return float32(time.Now().Sub(c.start).Seconds())
}
|
// Tomato static website generator
// Copyright Quentin Ribac, 2018
// Free software license can be found in the LICENSE file.
package main
import (
"fmt"
"reflect"
"testing"
)
func TestSiteinfo_MainAuthorHelper(t *testing.T) {
testCases := []struct {
siteinfo Siteinfo
want string
}{
{Siteinfo{Authors: []Author{{"A", "a"}, {"B", "b"}}}, "<address><a href=\"mailto:a\">A</a></address>"},
}
for tci, tc := range testCases {
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
if got := tc.siteinfo.MainAuthorHelper(); got != tc.want {
t.Errorf("got %s; want %s", got, tc.want)
}
})
}
}
func TestSiteinfo_CopyrightHelper(t *testing.T) {
testCases := []struct {
siteinfo Siteinfo
want string
}{
{Siteinfo{Copyright: "test [test](test)"}, "<p>test <a href=\"test\">test</a></p>\n"},
{Siteinfo{}, ""},
}
for tci, tc := range testCases {
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
if got := tc.siteinfo.CopyrightHelper(&Page{}); got != tc.want {
t.Errorf("got %s; want %s", got, tc.want)
}
})
}
}
func TestSiteinfo_SubtitleHelper(t *testing.T) {
testCases := []struct {
siteinfo Siteinfo
want string
}{
{Siteinfo{Subtitle: "sous-titre [test](test)"}, "<p>sous-titre <a href=\"test\">test</a></p>\n"},
{Siteinfo{}, ""},
}
for tci, tc := range testCases {
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
if got := tc.siteinfo.SubtitleHelper(&Page{}); got != tc.want {
t.Errorf("got %s; want %s", got, tc.want)
}
})
}
}
func TestSiteinfo_DescriptionHelper(t *testing.T) {
testCases := []struct {
siteinfo Siteinfo
want string
}{
{Siteinfo{Description: "description [test](test)"}, "<p>description <a href=\"test\">test</a></p>\n"},
{Siteinfo{}, ""},
}
for tci, tc := range testCases {
t.Run(fmt.Sprintf("%d", tci), func(t *testing.T) {
if got := tc.siteinfo.DescriptionHelper(&Page{}); got != tc.want {
t.Errorf("got %s; want %s", got, tc.want)
}
})
}
}
func TestSiteinfo_FindAuthor(t *testing.T) {
author := Author{Name: "episte"}
testCases := []struct {
siteinfo *Siteinfo
want *Author
name string
shouldFail bool
}{
{&Siteinfo{}, nil, "", true},
{&Siteinfo{Authors: []Author{author}}, &author, "episte", false},
}
for ti, tc := range testCases {
t.Run(fmt.Sprintf("%d", ti), func(t *testing.T) {
if got, err := tc.siteinfo.FindAuthor(tc.name); !tc.shouldFail && err != nil || tc.shouldFail && err == nil || reflect.DeepEqual(got, tc.want) == false {
if tc.shouldFail {
t.Errorf("got %s, %s; want %s, a non-nil error", got, err, tc.want)
} else {
t.Errorf("got %s, %s; want %s, nil", got, err, tc.want)
}
}
})
}
}
|
package handlers_test
import (
"bosh-dns/dns/server/handlers"
"bosh-dns/dns/server/handlers/handlersfakes"
"bosh-dns/dns/server/internal/internalfakes"
"bosh-dns/dns/server/monitoring/monitoringfakes"
"github.com/miekg/dns"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ bool = Describe("metricsHandler", func() {
var (
metricsHandler handlers.MetricsDNSHandler
fakeMetricsReporter *monitoringfakes.FakeMetricsReporter
fakeWriter *internalfakes.FakeResponseWriter
fakeDnsHandler *handlersfakes.FakeDNSHandler
response *dns.Msg
)
BeforeEach(func() {
fakeMetricsReporter = &monitoringfakes.FakeMetricsReporter{}
fakeDnsHandler = &handlersfakes.FakeDNSHandler{}
fakeWriter = &internalfakes.FakeResponseWriter{}
metricsHandler = handlers.NewMetricsDNSHandler(fakeMetricsReporter, fakeDnsHandler)
})
Describe("ServeDNS", func() {
It("collects metrics", func() {
metricsHandler.ServeDNS(fakeWriter, response)
Expect(fakeMetricsReporter.ReportCallCount()).To(Equal(1))
Expect(fakeDnsHandler.ServeDNSCallCount()).To(Equal(1))
})
})
})
|
package core
import (
"gonum.org/v1/gonum/floats"
"gonum.org/v1/gonum/stat"
"math"
)
/* Table */
// DataTable is an array of (userId, itemId, rating).
type DataTable struct {
Ratings []float64
Users []int
Items []int
}
// NewDataTable creates a new raw data set.
func NewDataTable(users, items []int, ratings []float64) *DataTable {
return &DataTable{
Users: users,
Items: items,
Ratings: ratings,
}
}
func (dataSet *DataTable) Len() int {
return len(dataSet.Ratings)
}
func (dataSet *DataTable) Get(i int) (int, int, float64) {
return dataSet.Users[i], dataSet.Items[i], dataSet.Ratings[i]
}
func (dataSet *DataTable) ForEach(f func(userId, itemId int, rating float64)) {
for i := 0; i < dataSet.Len(); i++ {
f(dataSet.Users[i], dataSet.Items[i], dataSet.Ratings[i])
}
}
func (dataSet *DataTable) Mean() float64 {
return stat.Mean(dataSet.Ratings, nil)
}
func (dataSet *DataTable) StdDev() float64 {
mean := dataSet.Mean()
sum := 0.0
dataSet.ForEach(func(userId, itemId int, rating float64) {
sum += (rating - mean) * (rating - mean)
})
return math.Sqrt(sum / float64(dataSet.Len()))
}
func (dataSet *DataTable) Min() float64 {
return floats.Min(dataSet.Ratings)
}
func (dataSet *DataTable) Max() float64 {
return floats.Max(dataSet.Ratings)
}
// Subset returns a subset of the data set.
func (dataSet *DataTable) SubSet(indices []int) Table {
return NewVirtualTable(dataSet, indices)
}
// VirtualTable is a virtual subset of DataTable.
type VirtualTable struct {
data *DataTable
index []int
}
// NewVirtualTable creates a new virtual data set.
func NewVirtualTable(dataSet *DataTable, index []int) *VirtualTable {
return &VirtualTable{
data: dataSet,
index: index,
}
}
func (dataSet *VirtualTable) Len() int {
return len(dataSet.index)
}
func (dataSet *VirtualTable) Get(i int) (int, int, float64) {
indexInData := dataSet.index[i]
return dataSet.data.Get(indexInData)
}
func (dataSet *VirtualTable) ForEach(f func(userId, itemId int, rating float64)) {
for i := 0; i < dataSet.Len(); i++ {
userId, itemId, rating := dataSet.Get(i)
f(userId, itemId, rating)
}
}
func (dataSet *VirtualTable) Mean() float64 {
mean := 0.0
dataSet.ForEach(func(userId, itemId int, rating float64) {
mean += rating
})
return mean / float64(dataSet.Len())
}
func (dataSet *VirtualTable) StdDev() float64 {
mean := dataSet.Mean()
sum := 0.0
dataSet.ForEach(func(userId, itemId int, rating float64) {
sum += (rating - mean) * (rating - mean)
})
return math.Sqrt(sum / float64(dataSet.Len()))
}
func (dataSet *VirtualTable) Min() float64 {
_, _, min := dataSet.Get(0)
dataSet.ForEach(func(userId, itemId int, rating float64) {
if rating < min {
min = rating
}
})
return min
}
func (dataSet *VirtualTable) Max() float64 {
_, _, max := dataSet.Get(0)
dataSet.ForEach(func(userId, itemId int, rating float64) {
if rating > max {
max = rating
}
})
return max
}
func (dataSet *VirtualTable) SubSet(indices []int) Table {
rawIndices := make([]int, len(indices))
for i, index := range indices {
rawIndices[i] = dataSet.index[index]
}
return NewVirtualTable(dataSet.data, rawIndices)
}
|
package main
import (
"fmt"
m "math"
"github.com/MaxHalford/gago"
)
func simulate(start, end int) []float64 {
data := []float64{}
for x := start; x <= end; x++ {
value := 1 * m.Pow(float64(x), 2)
data = append(data, value)
}
return data
}
var (
data = simulate(1, 20)
)
func leastSquares(X []float64) float64 {
error := 0.0
for i, target := range data {
x := i + 1
difference := target - (X[0] * m.Pow(float64(x), 2))
error += m.Pow(difference, 2)
}
return error
}
func main() {
// Instantiate a population
ga := gago.Default
// Number of demes
ga.NbDemes = 4
// Number of individuals in each deme
ga.NbIndividuals = 30
// Initial random boundaries
ga.Boundary = 10.0
// Mutation rate
ga.MutRate = 0.2
// Fitness function
function := leastSquares
// Number of variables the function takes as input
variables := 1
// Initialize the genetic algorithm
ga.Initialize(function, variables)
// Enhancement
for i := 0; i < 20; i++ {
ga.Enhance()
}
fmt.Println(ga.Best)
}
|
package main
import "fmt"
type person struct {
first string
last string
flavor string
}
func main() {
p1 := person{
first: "Jonathan",
last: "Thompson",
flavor: "chocolate",
}
p2 := person{
first: "Nicole",
last: "Thompson",
flavor: "vanilla",
}
fmt.Println(p1, p2)
for _, v := range []person{p1, p2} {
m := map[string][]string{
v.last: []string{v.first, v.flavor},
}
fmt.Println(m)
}
}
|
// Copyright 2020 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tcpip_test
import (
"math"
"sync"
"testing"
"time"
"gvisor.dev/gvisor/pkg/tcpip"
)
func TestMonotonicTimeBefore(t *testing.T) {
var mt tcpip.MonotonicTime
if mt.Before(mt) {
t.Errorf("%#v.Before(%#v)", mt, mt)
}
one := mt.Add(1)
if one.Before(mt) {
t.Errorf("%#v.Before(%#v)", one, mt)
}
if !mt.Before(one) {
t.Errorf("!%#v.Before(%#v)", mt, one)
}
}
func TestMonotonicTimeAfter(t *testing.T) {
var mt tcpip.MonotonicTime
if mt.After(mt) {
t.Errorf("%#v.After(%#v)", mt, mt)
}
one := mt.Add(1)
if mt.After(one) {
t.Errorf("%#v.After(%#v)", mt, one)
}
if !one.After(mt) {
t.Errorf("!%#v.After(%#v)", one, mt)
}
}
func TestMonotonicTimeAddSub(t *testing.T) {
var mt tcpip.MonotonicTime
if one, two := mt.Add(2), mt.Add(1).Add(1); one != two {
t.Errorf("mt.Add(2) != mt.Add(1).Add(1) (%#v != %#v)", one, two)
}
min := mt.Add(math.MinInt64)
max := mt.Add(math.MaxInt64)
if overflow := mt.Add(1).Add(math.MaxInt64); overflow != max {
t.Errorf("mt.Add(math.MaxInt64) != mt.Add(1).Add(math.MaxInt64) (%#v != %#v)", max, overflow)
}
if underflow := mt.Add(-1).Add(math.MinInt64); underflow != min {
t.Errorf("mt.Add(math.MinInt64) != mt.Add(-1).Add(math.MinInt64) (%#v != %#v)", min, underflow)
}
if got, want := min.Sub(min), time.Duration(0); want != got {
t.Errorf("got min.Sub(min) = %d, want %d", got, want)
}
if got, want := max.Sub(max), time.Duration(0); want != got {
t.Errorf("got max.Sub(max) = %d, want %d", got, want)
}
if overflow, want := max.Sub(min), time.Duration(math.MaxInt64); overflow != want {
t.Errorf("mt.Add(math.MaxInt64).Sub(mt.Add(math.MinInt64) != %s (%#v)", want, overflow)
}
if underflow, want := min.Sub(max), time.Duration(math.MinInt64); underflow != want {
t.Errorf("mt.Add(math.MinInt64).Sub(mt.Add(math.MaxInt64) != %s (%#v)", want, underflow)
}
}
func TestMonotonicTimeSub(t *testing.T) {
var mt tcpip.MonotonicTime
if one, two := mt.Add(2), mt.Add(1).Add(1); one != two {
t.Errorf("mt.Add(2) != mt.Add(1).Add(1) (%#v != %#v)", one, two)
}
if max, overflow := mt.Add(math.MaxInt64), mt.Add(1).Add(math.MaxInt64); max != overflow {
t.Errorf("mt.Add(math.MaxInt64) != mt.Add(1).Add(math.MaxInt64) (%#v != %#v)", max, overflow)
}
if max, underflow := mt.Add(math.MinInt64), mt.Add(-1).Add(math.MinInt64); max != underflow {
t.Errorf("mt.Add(math.MinInt64) != mt.Add(-1).Add(math.MinInt64) (%#v != %#v)", max, underflow)
}
}
const (
shortDuration = 1 * time.Nanosecond
middleDuration = 100 * time.Millisecond
)
func TestJobReschedule(t *testing.T) {
clock := tcpip.NewStdClock()
var wg sync.WaitGroup
var lock sync.Mutex
for i := 0; i < 2; i++ {
wg.Add(1)
go func() {
lock.Lock()
// Assigning a new timer value updates the timer's locker and function.
// This test makes sure there is no data race when reassigning a timer
// that has an active timer (even if it has been stopped as a stopped
// timer may be blocked on a lock before it can check if it has been
// stopped while another goroutine holds the same lock).
job := tcpip.NewJob(clock, &lock, func() {
wg.Done()
})
job.Schedule(shortDuration)
lock.Unlock()
}()
}
wg.Wait()
}
func stdClockWithAfter() (tcpip.Clock, func(time.Duration) <-chan time.Time) {
return tcpip.NewStdClock(), time.After
}
func TestJobExecution(t *testing.T) {
t.Parallel()
clock, after := stdClockWithAfter()
var lock sync.Mutex
ch := make(chan struct{})
job := tcpip.NewJob(clock, &lock, func() {
ch <- struct{}{}
})
job.Schedule(shortDuration)
// Wait for timer to fire.
select {
case <-ch:
case <-after(middleDuration):
t.Fatal("timed out waiting for timer to fire")
}
// The timer should have fired only once.
select {
case <-ch:
t.Fatal("no other timers should have fired")
case <-after(middleDuration):
}
}
func TestCancellableTimerResetFromLongDuration(t *testing.T) {
t.Parallel()
clock, after := stdClockWithAfter()
var lock sync.Mutex
ch := make(chan struct{})
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(middleDuration)
lock.Lock()
job.Cancel()
lock.Unlock()
job.Schedule(shortDuration)
// Wait for timer to fire.
select {
case <-ch:
case <-after(middleDuration):
t.Fatal("timed out waiting for timer to fire")
}
// The timer should have fired only once.
select {
case <-ch:
t.Fatal("no other timers should have fired")
case <-after(middleDuration):
}
}
func TestJobRescheduleFromShortDuration(t *testing.T) {
t.Parallel()
clock, after := stdClockWithAfter()
var lock sync.Mutex
ch := make(chan struct{})
lock.Lock()
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(shortDuration)
job.Cancel()
lock.Unlock()
// Wait for timer to fire if it wasn't correctly stopped.
select {
case <-ch:
t.Fatal("timer fired after being stopped")
case <-after(middleDuration):
}
job.Schedule(shortDuration)
// Wait for timer to fire.
select {
case <-ch:
case <-after(middleDuration):
t.Fatal("timed out waiting for timer to fire")
}
// The timer should have fired only once.
select {
case <-ch:
t.Fatal("no other timers should have fired")
case <-after(middleDuration):
}
}
func TestJobImmediatelyCancel(t *testing.T) {
t.Parallel()
clock, after := stdClockWithAfter()
var lock sync.Mutex
ch := make(chan struct{})
for i := 0; i < 1000; i++ {
lock.Lock()
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(shortDuration)
job.Cancel()
lock.Unlock()
}
// Wait for timer to fire if it wasn't correctly stopped.
select {
case <-ch:
t.Fatal("timer fired after being stopped")
case <-after(middleDuration):
}
}
func stdClockWithAfterAndSleep() (tcpip.Clock, func(time.Duration) <-chan time.Time, func(time.Duration)) {
clock, after := stdClockWithAfter()
return clock, after, time.Sleep
}
func TestJobCancelledRescheduleWithoutLock(t *testing.T) {
t.Parallel()
clock, after, sleep := stdClockWithAfterAndSleep()
var lock sync.Mutex
ch := make(chan struct{})
lock.Lock()
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(shortDuration)
job.Cancel()
lock.Unlock()
for i := 0; i < 10; i++ {
job.Schedule(middleDuration)
lock.Lock()
// Sleep until the timer fires and gets blocked trying to take the lock.
sleep(middleDuration * 2)
job.Cancel()
lock.Unlock()
}
// Wait for double the duration so timers that weren't correctly stopped can
// fire.
select {
case <-ch:
t.Fatal("timer fired after being stopped")
case <-after(middleDuration * 2):
}
}
func TestManyCancellableTimerResetAfterBlockedOnLock(t *testing.T) {
t.Parallel()
clock, after, sleep := stdClockWithAfterAndSleep()
var lock sync.Mutex
ch := make(chan struct{})
lock.Lock()
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(shortDuration)
for i := 0; i < 10; i++ {
// Sleep until the timer fires and gets blocked trying to take the lock.
sleep(middleDuration)
job.Cancel()
job.Schedule(shortDuration)
}
lock.Unlock()
// Wait for double the duration for the last timer to fire.
select {
case <-ch:
case <-after(middleDuration):
t.Fatal("timed out waiting for timer to fire")
}
// The timer should have fired only once.
select {
case <-ch:
t.Fatal("no other timers should have fired")
case <-after(middleDuration):
}
}
func TestManyJobReschedulesUnderLock(t *testing.T) {
t.Parallel()
clock, after := stdClockWithAfter()
var lock sync.Mutex
ch := make(chan struct{})
lock.Lock()
job := tcpip.NewJob(clock, &lock, func() { ch <- struct{}{} })
job.Schedule(shortDuration)
for i := 0; i < 10; i++ {
job.Cancel()
job.Schedule(shortDuration)
}
lock.Unlock()
// Wait for double the duration for the last timer to fire.
select {
case <-ch:
case <-after(middleDuration):
t.Fatal("timed out waiting for timer to fire")
}
// The timer should have fired only once.
select {
case <-ch:
t.Fatal("no other timers should have fired")
case <-after(middleDuration):
}
}
|
package main
import "github.com/newmizanur/forumapi/api"
func main() {
api.Run()
}
|
package optional
import (
"errors"
"reflect"
)
type Optional interface {
Map(func(o interface{}) interface{}) Optional
OrElse(interface{}) interface{}
IsPresent() bool
Get() (interface{}, error)
}
type OptionalImpl struct {
value interface{}
}
func (op *OptionalImpl) Map(fn func(o interface{}) interface{}) Optional {
if op.IsPresent() {
op.value = fn(op.value)
}
return op
}
func (op *OptionalImpl) OrElse(o interface{}) interface{} {
if op.IsPresent() {
return op.value
}
return o
}
func (op *OptionalImpl) Get() (interface{}, error) {
if op.IsPresent() {
return op.value, nil
}
return nil, errors.New("No such element")
}
func(op *OptionalImpl) IsPresent() bool {
if op.value == nil {
return false
}
r := reflect.ValueOf(op.value)
if r.Kind() == reflect.Ptr {
return !r.IsNil()
}
return true
}
func OfNullable(o interface{}) Optional {
return &OptionalImpl{o}
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package phonehub
import (
"context"
"regexp"
"time"
"chromiumos/tast/ctxutil"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/crossdevice"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/chrome/uiauto/nodewith"
"chromiumos/tast/local/chrome/uiauto/quicksettings"
"chromiumos/tast/local/chrome/uiauto/role"
"chromiumos/tast/local/input"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: MessageNotification,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that Android message notifications appear in Phone Hub and that inline reply works",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
"chromeos-cross-device-eng@google.com",
},
Attr: []string{"group:cross-device", "cross-device_phonehub"},
SoftwareDeps: []string{"chrome"},
Fixture: "crossdeviceOnboardedAllFeatures",
Timeout: 3 * time.Minute,
})
}
// MessageNotification tests receiving message notifications in Phone Hub and replying to them.
func MessageNotification(ctx context.Context, s *testing.State) {
tconn := s.FixtValue().(*crossdevice.FixtData).TestConn
androidDevice := s.FixtValue().(*crossdevice.FixtData).AndroidDevice
// Reserve time for deferred cleanup functions.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 5*time.Second)
defer cancel()
// Clear any notifications that are currently displayed.
if err := ash.CloseNotifications(ctx, tconn); err != nil {
s.Fatal("Failed to clear notifications")
}
// Generate a message notification on the Android device.
title := "Hello!"
text := "Notification test"
waitForReply, err := androidDevice.GenerateMessageNotification(ctx, 1 /*id*/, title, text)
if err != nil {
s.Fatal("Failed to generate Android message notification: ", err)
}
// Wait for the notification on ChromeOS.
n, err := ash.WaitForNotification(ctx, tconn, 10*time.Second, ash.WaitTitle(title))
if err != nil {
s.Fatal("Failed waiting for the message notification to appear on CrOS: ", err)
}
if n.Message != text {
s.Fatalf("Notification text does not match: wanted %v, got %v", text, n.Message)
}
// Open Quick Settings to make sure the notification is visible.
if err := quicksettings.Show(ctx, tconn); err != nil {
s.Fatal("Failed to open Quick Settings to check notifications: ", err)
}
defer quicksettings.Hide(cleanupCtx, tconn)
defer faillog.DumpUITreeOnError(cleanupCtx, s.OutDir(), s.HasError, tconn)
// Reply using the notification's inline reply field.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to set up virtual keyboard: ", err)
}
replyText := "Goodbye!"
ui := uiauto.New(tconn)
if err := ui.LeftClick(nodewith.Role(role.Button).NameRegex(regexp.MustCompile("(?i)reply")))(ctx); err != nil {
s.Fatal("Failed to click notification's reply button: ", err)
}
if err := kb.Type(ctx, replyText+"\n"); err != nil {
s.Fatal("Failed to type a reply in the notification: ", err)
}
// Wait for the Android device to receive the reply and verify the text matches.
received, err := waitForReply(ctx)
if err != nil {
s.Fatal("Failed waiting to receive a reply on the Android device: ", err)
}
if received != replyText {
s.Fatalf("Reply received by the snippet does not match: wanted %v, got %v", replyText, received)
}
}
|
package slices
import "fmt"
func main() {
simpleSlice := make([]string, 3)
fmt.Println("empty slice: ", simpleSlice)
// adds values to slice
simpleSlice[0] = "a"
simpleSlice[1] = "b"
simpleSlice[2] = "c"
fmt.Println("set:", simpleSlice)
fmt.Println("get:", simpleSlice[2])
fmt.Println("length:", len(simpleSlice))
// adds new values to simpleSlice even if the length is
// out of bounds
simpleSlice = append(simpleSlice, "d")
// can be added multiple values
simpleSlice = append(simpleSlice, "e", "f")
// make another slice with the same length as simpleSlice
anotherSlice := make([]string, len(simpleSlice))
// copy the values of slimpleSlice
copy(anotherSlice, simpleSlice)
fmt.Println("anotherSlice: ", anotherSlice)
// creating another slice with values from 2 to 4
// the last value in position 5 is not included
slice3 := simpleSlice[2:5]
fmt.Println("slice3:", slice3)
// declaring a two dimensional slice
twoDimensionalSlice := make([][]int, 3)
for i := 0; i < 3; i++ {
innerLen := i + 1
twoDimensionalSlice[i] = make([]int, innerLen)
for j := 0; j < innerLen; j++ {
twoDimensionalSlice[i][j] = i + j
}
}
fmt.Println("2D slice:", twoDimensionalSlice)
} |
package multirun
import (
"errors"
"math/rand"
"sync"
"testing"
"time"
)
var globalMutex sync.Mutex
var isNotReady bool
var errorNewRun = errors.New("A new run method was started before we were ready")
type SimpleRunnable struct {
t *testing.T
RunnableReady
sync.Mutex
closed bool
c chan (struct{})
running bool
readyDelay bool
}
func SimpleRunnableNew(t *testing.T, readyDelay bool) *SimpleRunnable {
return &SimpleRunnable{
c: make(chan (struct{})),
t: t,
readyDelay: readyDelay,
}
}
func checkReady() bool {
/* Check ready handling */
valid := true
globalMutex.Lock()
if isNotReady {
valid = false
}
isNotReady = true
globalMutex.Unlock()
if valid {
time.Sleep(time.Duration(20000*rand.Float32()) * time.Microsecond)
globalMutex.Lock()
if !isNotReady {
valid = false
}
isNotReady = false
globalMutex.Unlock()
}
return valid
}
func (s *SimpleRunnable) Run(ready func()) error {
defer func() {
s.Lock()
s.running = false
s.Unlock()
}()
s.Lock()
s.running = true
s.Unlock()
if !s.readyDelay {
ready()
}
if !checkReady() {
return errorNewRun
}
if s.readyDelay {
ready()
}
<-s.c
return nil
}
func (s *SimpleRunnable) Close() error {
s.Lock()
defer s.Unlock()
if s.closed {
s.t.Error("SimpleRunnable closed twice")
} else {
s.closed = true
close(s.c)
}
return nil
}
type ErrorRunnable struct {
runError error
closeError error
}
func (s *ErrorRunnable) Run() error {
return s.runError
}
func (s *ErrorRunnable) Close() error {
return s.closeError
}
func testBasicInternal(t *testing.T, runError int, closeError bool, readyDelay bool) {
errorRunnable := &ErrorRunnable{}
runErrorE := errors.New("Test Error, Run")
if runError == 1 {
errorRunnable.runError = runErrorE
}
if closeError {
errorRunnable.closeError = errors.New("Test Error, Close")
}
m := &MultiRun{}
funcCalled := false
items := make([]*SimpleRunnable, 10)
for i := range items {
items[i] = SimpleRunnableNew(t, readyDelay)
m.RegisterRunnableReady(items[i])
if i == 3 {
m.RegisterRunnable(errorRunnable)
}
if i == 6 {
m.RegisterFunc(func() error {
funcCalled = true
if runError == 2 {
return runErrorE
}
if !checkReady() {
return errorNewRun
}
return nil
}, nil)
}
}
err := m.Run(func() {
if !funcCalled {
t.Error("The registered function was not called...")
}
if !readyDelay {
/* It is not guaranteed when the running flag will be updated, so wait 100 ms*/
time.Sleep(100 * time.Millisecond)
}
for i := range items {
if !items[i].running {
t.Error("Item", i, "is not running")
}
}
err := m.Close()
if err != errorRunnable.closeError {
t.Error("Returned error from Close() was not correct", err, errorRunnable.closeError)
}
})
expectedError := ErrorClosed
if runError > 0 {
expectedError = runErrorE
}
if !readyDelay {
expectedError = errorNewRun
}
if err != expectedError {
t.Error("Returned error from Run() was not correct", err, expectedError)
}
/* It is not guaranteed when the running flag will be updated, so wait 50 ms*/
time.Sleep(50 * time.Millisecond)
for i := range items {
if items[i].running {
t.Error("Item", i, "is still running")
}
}
/* Try to close again. */
m.Close()
}
func TestBasicFunctionality(t *testing.T) {
/* Try with ready handling */
testBasicInternal(t, 0, false, true)
testBasicInternal(t, 1, false, true)
testBasicInternal(t, 2, false, true)
testBasicInternal(t, 0, true, true)
/* Use instant ready */
testBasicInternal(t, 0, false, false)
}
type SimpleRunnableNoReady struct {
sync.Mutex
t *testing.T
closed bool
c chan (struct{})
}
func SimpleRunnableNoReadyNew(t *testing.T) *SimpleRunnableNoReady {
return &SimpleRunnableNoReady{
c: make(chan (struct{})),
t: t,
}
}
func (s *SimpleRunnableNoReady) Run() error {
<-s.c
/* Take some time to close */
time.Sleep(50 * time.Millisecond)
return nil
}
func (s *SimpleRunnableNoReady) Close() error {
s.Lock()
if s.closed {
s.t.Error("SimpleRunnableNoReady closed twice")
} else {
s.closed = true
close(s.c)
}
s.Unlock()
return nil
}
func TestClose(t *testing.T) {
for i := 0; i < 10; i++ {
m := &MultiRun{}
if i == 0 {
m.Close()
} else {
go func() {
time.Sleep(time.Duration(100000*rand.Float32()) * time.Microsecond)
m.Close()
}()
}
for j := 0; j < 10; j++ {
m.RegisterRunnable(SimpleRunnableNoReadyNew(t))
if j == 5 && i < 10 {
m.RegisterFunc(func() error {
/* Waste some time during starting */
time.Sleep(50 * time.Millisecond)
return nil
}, nil)
}
}
c := make(chan (struct{}), 1)
go func() {
err := m.Run(nil)
if err != ErrorClosed && err != nil {
t.Error("Run returned error", err)
}
close(c)
}()
select {
case <-c:
case <-time.After(time.Second):
t.Error("Close function did not stop Run")
return
}
}
}
|
// Copyright 2017 Baidu, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
// except in compliance with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
// either express or implied. See the License for the specific language governing permissions
// and limitations under the License.
package bce_multi_language
const (
BOSPROBE_NET_STATUS_PROMT = 1
BOSPROBE_PING_AUTH_PROMT = 2
BOSPROBE_BASIC_INFO_PROMT = 3
BOSPROBE_EXIT_REASON_PROMT = 4
BOSPROBE_ERROR_INFO_PROMT = 5
)
type BosprobeChinese struct {
}
func (b *BosprobeChinese) GetMsg(id MultiLangMsgId) string {
}
|
package middleware
import (
"github.com/gin-gonic/gin"
"github.com/Charelyz/gin-mgo-demo/db"
"net/http"
"strings"
)
// 数据库连接中间件:克隆每一个数据库会话,并且确保 `db` 属性在每一个 handler 里均有效
func Connect (context *gin.Context){
s := db.Session.Clone()
defer s.Clone()
context.Set("db",s.DB(db.Mongo.Database))
context.Next()
}
const (
APPLICATION_JSON = "application/json"
)
// 错误处理中间件
func ErrorHandler(context *gin.Context){
context.Next()
// TODO
if len(context.Errors) > 0 {
ct := context.Request.Header.Get("Content-Type")
if strings.Contains(ct,APPLICATION_JSON) {
context.JSON(http.StatusBadRequest,gin.H{"error":context.Errors})
}else{
context.HTML(http.StatusBadRequest,"400",gin.H{"error":context.Errors})
}
}
} |
package api
import (
"net/http"
"github.com/thetogi/YReserve2/api/wrapper"
"github.com/thetogi/YReserve2/model"
)
func (a *API) InitUser() {
// swagger:operation POST /user user users
// ---
// summary: Create new user
// description: Send email with other user details to create new user. Email must be unique.
// parameters:
// - name: body
// in: body
// schema:
// $ref: '#/definitions/User'
// Responses:
// 201:
// description: success return user auth containing user with auth token
// schema:
// "$ref": '#/definitions/UserAuth'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/AppError"
a.Router.User.Handle("", a.requestHandler(a.createUser)).Methods("POST")
// swagger:operation PUT /user/{userId} user users
// ---
// summary: Update user
// description: Send user body with userId with other user data which needs to be updated.
// Security:
// - AuthKey: []
// parameters:
// - name: Authorization
// in: header
// description: JTW token used to validate user
// type: string
// required: true
// - name: userId
// in: path
// description: unique identifier of user
// type: int
// required: true
// - name: body
// in: body
// description: updated user body, must have userId non zero
// schema:
// $ref: '#/definitions/User'
// Responses:
// 200:
// description: success return updated user
// schema:
// "$ref": '#/definitions/User'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/AppError"
a.Router.User.Handle("/{userId:[0-9]+}", a.requestWithAuthHandler(a.updateUser)).Methods("PUT")
// swagger:operation DELETE /user/{userId} user users
// ---
// summary: Delete user
// description: Delete user entry from backend.
// Security:
// - AuthKey: []
// parameters:
// - name: Authorization
// in: header
// description: JTW token used to validate user
// type: string
// required: true
// - name: userId
// in: path
// description: unique identifier of user
// type: int
// required: true
// Responses:
// 200:
// description: deleted successfully return response ok
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/AppError"
a.Router.User.Handle("/{userId:[0-9]+}", a.requestWithAuthHandler(a.deleteUser)).Methods("DELETE")
// swagger:operation GET /user/{userId} user users
// ---
// summary: Get user object
// description: Get user object based on unique user identifier.
// Security:
// - AuthKey: []
// parameters:
// - name: Authorization
// in: header
// description: JTW token used to validate user
// type: string
// required: true
// - name: userId
// in: path
// description: unique identifier of user
// type: int
// required: true
// Responses:
// 200:
// description: success return user data
// schema:
// "$ref": '#/definitions/User'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/AppError"
a.Router.User.Handle("/{userId:[0-9]+}", a.requestWithAuthHandler(a.getUser)).Methods("GET")
// swagger:operation GET /user user users
// ---
// summary: Get all user objects
// description: Get all user objects created, can be fetched only by sudo user
// Security:
// - AuthKey: []
// parameters:
// - name: Authorization
// in: header
// description: JTW token used to validate user
// type: string
// required: true
// Responses:
// 200:
// description: success return user data
// schema:
// type: array
// items:
// "$ref": '#/definitions/User'
// default:
// description: unexpected error
// schema:
// "$ref": "#/definitions/AppError"
a.Router.User.Handle("", a.requestWithSudoHandler(a.getAllUser)).Methods("GET")
}
// CreateHandler func is used to create user
func (a *API) createUser(rc *wrapper.RequestContext, w http.ResponseWriter, r *http.Request) {
user := model.UserFromJson(r.Body)
if user == nil {
rc.SetError("Body received for user creation is invalid.", http.StatusBadRequest)
return
}
if err := user.Valid(); err != nil {
rc.SetError("User object received is not valid.", http.StatusBadRequest)
return
}
var userAuth *model.UserAuth
var err *model.AppError
if userAuth, err = rc.App.CreateUser(user); err != nil {
rc.SetError("User object creation failed.", http.StatusInternalServerError)
return
}
rc.SetAppResponse(userAuth.ToJson(), http.StatusCreated)
}
// UpdateHandler func is used to create user
func (a *API) updateUser(rc *wrapper.RequestContext, w http.ResponseWriter, r *http.Request) {
user := model.UserFromJson(r.Body)
if user == nil {
rc.SetError("Body received for user creation is invalid.", http.StatusBadRequest)
return
}
if user.UserID == 0 {
rc.SetError("UserId received to update userID is 0.", http.StatusBadRequest)
return
}
var err *model.AppError
if user, err = rc.App.UpdateUser(user); err != nil {
rc.SetError("User object update failed.", http.StatusInternalServerError)
return
}
rc.SetAppResponse(user.ToJson(), http.StatusOK)
}
// getUser func is used to get user or users
func (a *API) getUser(rc *wrapper.RequestContext, w http.ResponseWriter, r *http.Request) {
userID := rc.App.UserSession.UserID
var user *model.User
var appErr *model.AppError
if user, appErr = rc.App.GetUser(userID); appErr != nil {
rc.SetError("User object get failed.", http.StatusInternalServerError)
return
}
rc.SetAppResponse(user.ToJson(), http.StatusOK)
}
// deleteUser func is to delete user
func (a *API) deleteUser(rc *wrapper.RequestContext, w http.ResponseWriter, r *http.Request) {
userID := rc.App.UserSession.UserID
if _, err := rc.App.DeleteUser(userID); err != nil {
rc.SetError("User object get failed.", http.StatusInternalServerError)
return
}
rc.SetAppResponse("{'response': 'OK'}", http.StatusOK)
}
// getAllUser func is used to get user or users
func (a *API) getAllUser(rc *wrapper.RequestContext, w http.ResponseWriter, r *http.Request) {
var users []*model.User
var err *model.AppError
if users, err = rc.App.GetAllUser(); err != nil {
rc.SetError("All users object get failed.", http.StatusInternalServerError)
return
}
rc.SetAppResponse(model.UsersToJson(users), http.StatusOK)
}
|
package order
import (
"context"
"time"
"github.com/benkim0414/bundle/bundle"
"github.com/go-kit/kit/endpoint"
"github.com/go-kit/kit/log"
)
type Endpoints struct {
OrderEndpoint endpoint.Endpoint
}
// MakeServerEndpoints returns an Endpoints struct where each endpoint invokes
// the corresponding method on the provided service.
func MakeServerEndpoints(s Service) Endpoints {
return Endpoints{
OrderEndpoint: MakeOrderEndpoint(s),
}
}
// Order implements Service.
func (e Endpoints) Order(ctx context.Context, quantity int, c bundle.FlowerCode) (*Invoice, error) {
request := orderRequest{OrderItem{
Quantity: quantity,
Code: c,
}}
response, err := e.OrderEndpoint(ctx, request)
if err != nil {
return nil, err
}
resp := response.(orderResponse)
return resp.Invoice, resp.Err
}
// MakeOrderEndpoint returns an endpoint that invokes Order on the service.
func MakeOrderEndpoint(s Service) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (response interface{}, err error) {
orderReq := request.(orderRequest)
invoice, err := s.Order(ctx, orderReq.Quantity, orderReq.Code)
return orderResponse{
Invoice: invoice,
Err: err,
}, nil
}
}
// EndpointLoggingMiddleware returns an endpoint middleware that logs
// the duration of each invocation, and the resulting error, if any.
func EndpointLoggingMiddleware(logger log.Logger) endpoint.Middleware {
return func(next endpoint.Endpoint) endpoint.Endpoint {
return func(ctx context.Context, request interface{}) (response interface{}, err error) {
defer func(begin time.Time) {
logger.Log("error", err, "took", time.Since(begin))
}(time.Now())
return next(ctx, request)
}
}
}
type orderRequest struct {
OrderItem
}
type orderResponse struct {
Invoice *Invoice `json:"invoice"`
Err error `json:"error"`
}
func (r orderResponse) error() error { return r.Err }
|
// Copyright 2015 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package resolver
import (
"context"
"net"
"testing"
"github.com/cockroachdb/cockroach/pkg/base"
"github.com/cockroachdb/errors"
"github.com/stretchr/testify/require"
)
func TestParseResolverAddress(t *testing.T) {
def := ensureHostPort(":", base.DefaultPort)
testCases := []struct {
input string
success bool
resolverType string
resolverAddress string
}{
// Ports are not checked at parsing time. They are at GetAddress time though.
{"127.0.0.1:26222", true, "tcp", "127.0.0.1:26222"},
{":" + base.DefaultPort, true, "tcp", def},
{"127.0.0.1", true, "tcp", "127.0.0.1:" + base.DefaultPort},
{"", false, "", ""},
{"", false, "tcp", ""},
{":", true, "tcp", def},
}
for tcNum, tc := range testCases {
resolver, err := NewResolver(tc.input)
if (err == nil) != tc.success {
t.Errorf("#%d: expected success=%t, got err=%v", tcNum, tc.success, err)
}
if err != nil {
continue
}
if resolver.Type() != tc.resolverType {
t.Errorf("#%d: expected resolverType=%s, got %+v", tcNum, tc.resolverType, resolver)
}
if resolver.Addr() != tc.resolverAddress {
t.Errorf("#%d: expected resolverAddress=%s, got %+v", tcNum, tc.resolverAddress, resolver)
}
}
}
func TestGetAddress(t *testing.T) {
testCases := []struct {
address string
success bool
addressType string
addressValue string
}{
{"127.0.0.1:26222", true, "tcp", "127.0.0.1:26222"},
{"127.0.0.1", true, "tcp", "127.0.0.1:" + base.DefaultPort},
{"localhost:80", true, "tcp", "localhost:80"},
}
for tcNum, tc := range testCases {
resolver, err := NewResolver(tc.address)
if err != nil {
t.Fatal(err)
}
address, err := resolver.GetAddress()
if (err == nil) != tc.success {
t.Errorf("#%d: expected success=%t, got err=%v", tcNum, tc.success, err)
}
if err != nil {
continue
}
if address.Network() != tc.addressType {
t.Errorf("#%d: expected address type=%s, got %+v", tcNum, tc.addressType, address)
}
if address.String() != tc.addressValue {
t.Errorf("#%d: expected address value=%s, got %+v", tcNum, tc.addressValue, address)
}
}
}
func TestSRV(t *testing.T) {
type lookupFunc func(service, proto, name string) (string, []*net.SRV, error)
lookupWithErr := func(err error) lookupFunc {
return func(service, proto, name string) (string, []*net.SRV, error) {
if service != "" || proto != "" {
t.Errorf("unexpected params in erroring LookupSRV() call")
}
return "", nil, err
}
}
dnsErr := &net.DNSError{Err: "no such host", Name: "", Server: "", IsTimeout: false}
lookupSuccess := func(service, proto, name string) (string, []*net.SRV, error) {
if service != "" || proto != "" {
t.Errorf("unexpected params in successful LookupSRV() call")
}
srvs := []*net.SRV{
{Target: "node1", Port: 26222},
{Target: "node2", Port: 35222},
{Target: "node3", Port: 0},
}
return "cluster", srvs, nil
}
expectedAddrs := []string{"node1:26222", "node2:35222"}
testCases := []struct {
address string
lookuper lookupFunc
want []string
}{
{":26222", nil, nil},
{"some.host", lookupWithErr(dnsErr), nil},
{"some.host", lookupWithErr(errors.New("another error")), nil},
{"some.host", lookupSuccess, expectedAddrs},
{"some.host:26222", lookupSuccess, expectedAddrs},
// "real" `lookupSRV` returns "no such host" when resolving IP addresses
{"127.0.0.1", lookupWithErr(dnsErr), nil},
{"127.0.0.1:26222", lookupWithErr(dnsErr), nil},
{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]", lookupWithErr(dnsErr), nil},
{"[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:26222", lookupWithErr(dnsErr), nil},
}
for tcNum, tc := range testCases {
func() {
defer TestingOverrideSRVLookupFn(tc.lookuper)()
resolvers, err := SRV(context.Background(), tc.address)
if err != nil {
t.Errorf("#%d: expected success, got err=%v", tcNum, err)
}
require.Equal(t, tc.want, resolvers, "Test #%d failed", tcNum)
}()
}
}
|
// Copyright 2019 Red Hat, Inc. and/or its affiliates
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package test
import (
"bytes"
"github.com/kiegroup/kogito-operator/core/test"
"github.com/spf13/cobra"
"strings"
"github.com/kiegroup/kogito-operator/cmd/kogito/command/context"
"github.com/kiegroup/kogito-operator/core/client"
clitest "github.com/kiegroup/kogito-operator/core/client/test"
"k8s.io/apimachinery/pkg/runtime"
)
var (
testErr *bytes.Buffer
testOut *bytes.Buffer
rootCommand *cobra.Command
)
// CLITestContext holds the text context for the CLI Unit Tests.
// Use SetupCliTest or SetupCliTestWithKubeClient to get a reference for your test cases
type CLITestContext interface {
ExecuteCli() (string, string, error)
ExecuteCliCmd(cmd string) (string, string, error)
GetClient() *client.Client
}
type cliTestContext struct {
*context.CommandContext
client *client.Client
}
// SetupCliTest creates the CLI default test environment. The mocked Kubernetes client does not support OpenShift.
func SetupCliTest(cli string, factory context.CommandFactory, kubeObjects ...runtime.Object) CLITestContext {
return SetupCliTestWithKubeClient(cli, factory, test.NewFakeClientBuilder().AddK8sObjects(kubeObjects...).Build())
}
// SetupCliTestWithKubeClient Setup a CLI test environment with the given Kubernetes client
func SetupCliTestWithKubeClient(cmd string, factory context.CommandFactory, kubeCli *client.Client) CLITestContext {
testErr = new(bytes.Buffer)
testOut = new(bytes.Buffer)
ctx := &context.CommandContext{Client: kubeCli}
kogitoRootCmd := context.NewRootCommand(ctx, testOut)
kogitoRootCmd.Command().SetArgs(strings.Split(cmd, " "))
kogitoRootCmd.Command().SetOut(testOut)
kogitoRootCmd.Command().SetErr(testErr)
rootCommand = kogitoRootCmd.Command()
factory.BuildCommands(ctx, rootCommand)
return &cliTestContext{CommandContext: ctx, client: kubeCli}
}
// ExecuteCli executes the CLI setup before executing the test
func (c *cliTestContext) ExecuteCli() (string, string, error) {
err := rootCommand.Execute()
return testOut.String(), testErr.String(), err
}
// ExecuteCliCmd executes the given command in the actual context
func (c *cliTestContext) ExecuteCliCmd(cmd string) (string, string, error) {
rootCommand.SetArgs(strings.Split(cmd, " "))
err := rootCommand.Execute()
return testOut.String(), testErr.String(), err
}
func (c *cliTestContext) GetClient() *client.Client {
return c.client
}
// OverrideKubeConfig overrides the default KUBECONFIG location to a temporary one
func OverrideKubeConfig() (teardown func()) {
_, teardown = clitest.OverrideDefaultKubeConfig()
return
}
// OverrideKubeConfigAndCreateContextInNamespace overrides the default KUBECONFIG location to a temporary one and creates a mock context in the given namespace
func OverrideKubeConfigAndCreateContextInNamespace(namespace string) (teardown func()) {
_, teardown = clitest.OverrideDefaultKubeConfigWithNamespace(namespace)
return
}
// OverrideKubeConfigAndCreateDefaultContext initializes the default KUBECONFIG location to a temporary one and creates a mock context in the "default" namespace
func OverrideKubeConfigAndCreateDefaultContext() (teardown func()) {
_, teardown = clitest.OverrideDefaultKubeConfigEmptyContext()
return
}
|
package main
import (
"cloud.google.com/go/compute/metadata"
"cloud.google.com/go/profiler"
"context"
"contrib.go.opencensus.io/exporter/stackdriver"
"contrib.go.opencensus.io/exporter/stackdriver/propagation"
"fmt"
"github.com/gin-gonic/gin"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/trace"
"io/ioutil"
"net/http"
"os"
"os/signal"
"sync"
"syscall"
"time"
)
func main() {
// profiler
if err := profiler.Start(profiler.Config{
//DebugLogging: true,
}); err != nil {
panic("プロファイラの起動に失敗 : " + err.Error())
}
// trace
exporter, err := stackdriver.NewExporter(stackdriver.Options{
//ProjectID: os.Getenv("GOOGLE_CLOUD_PROJECT"),
})
if err != nil {
fmt.Println("Stackdriver exporter initialize NG.")
panic(err)
}
fmt.Println("Stackdriver exporter initialize OK.")
trace.RegisterExporter(exporter)
defer exporter.Flush()
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) // 毎回取得
route := gin.Default()
http.Handle("/", route)
route.GET("/", handle)
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
server := &http.Server{
Addr: fmt.Sprintf(":%s", port),
Handler: &ochttp.Handler{
Handler: route,
Propagation: &propagation.HTTPFormat{},
},
}
go func() {
if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed {
panic(err)
}
}()
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, syscall.SIGTERM)
<-sigCh
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
if err := server.Shutdown(ctx); err != nil {
panic(err)
}
}
func handle(context *gin.Context) {
// ProjectID
_, spanPrjID := trace.StartSpan(context.Request.Context(), "pid")
projectId := getProjectID()
fmt.Println(projectId) // TODO
spanPrjID.Annotate([]trace.Attribute{trace.StringAttribute("key", "value")}, "something happened")
spanPrjID.AddAttributes(trace.StringAttribute("hello", "world"))
spanPrjID.End()
// audience
_, spanAud := trace.StartSpan(context.Request.Context(), "aud")
audience := os.Getenv("ID_TOKEN_AUDIENCE")
fmt.Printf("Audience: %s\n", audience)
spanAud.End()
// ID_Token
_, spanToken := trace.StartSpan(context.Request.Context(), "token")
idToken := generateToken(audience)
fmt.Println(idToken) // TODO
spanToken.End()
// Call backend service
ctx2, spanBackend := trace.StartSpan(context.Request.Context(), "backend")
client := &http.Client{Transport: &ochttp.Transport{}}
path := fmt.Sprintf("https://server-dot-%s.appspot.com", projectId)
req, err := http.NewRequest("GET", path, nil)
req = req.WithContext(ctx2)
//req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
req.Header.Add("Authorization", "Bearer "+idToken)
if err != nil {
context.AbortWithError(http.StatusInternalServerError, err)
return
}
resp, err := client.Do(req)
if err != nil {
context.AbortWithError(http.StatusInternalServerError, err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
context.AbortWithError(http.StatusInternalServerError, err)
return
}
spanBackend.End()
context.String(http.StatusOK, "Response from backend:\n %s", string(b))
}
type KeyCache struct {
Key string
TTL time.Time
}
var keyCache = sync.Map{}
func generateToken(audience string) string {
_key, ok := keyCache.Load(audience)
if ok {
key := _key.(KeyCache).Key
return key
}
idToken, err := metadata.Get("instance/service-accounts/default/identity?audience=" + audience)
if err != nil {
panic(err) // TODO 手抜き
}
fmt.Printf("ID Token: %s\n", idToken)
return idToken
}
// project id の取得
// metaサーバから取得するより環境変数(GAEの場合)で取得したほうがパフォーマンス良さそうなので
func getProjectID() string {
envProjID, ok := os.LookupEnv("PROJECT_ID")
if ok {
fmt.Printf("Project ID: %s (env)\n", envProjID)
return envProjID
}
projectId, err := metadata.ProjectID()
if err != nil {
panic(err) // TODO 手抜き
}
fmt.Printf("Project ID: %s (meta)\n", envProjID)
return projectId
}
|
package user
import (
"context"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"go.mongodb.org/mongo-driver/mongo"
"log"
)
type UserRepository interface {
Create(*User, *mongo.Database) error
MultiInsert([]interface{}, *mongo.Database) error
MultiDelete(interface{}, *mongo.Database) error
Delete(string, *mongo.Database) error
Get(string, *mongo.Database) (*User, error)
List(interface{}, *mongo.Database) ([]*User, error)
}
type userRepository struct {
}
func NewUserRepository() *userRepository {
return &userRepository{}
}
func (u *userRepository) Create(user *User, db *mongo.Database) error {
collection := db.Collection("users")
_, err := collection.InsertOne(context.TODO(), user)
if err != nil {
return err
}
return nil
}
func (u *userRepository) MultiInsert(users []interface{}, db *mongo.Database) error {
collection := db.Collection("users")
_, err := collection.InsertMany(context.TODO(), users)
if err != nil {
return nil
}
return nil
}
func (u *userRepository) MultiDelete(user interface{}, db *mongo.Database) error {
collection := db.Collection("users")
_, err := collection.DeleteMany(context.TODO(), user)
if err != nil {
return err
}
return nil
}
func (u *userRepository) Delete(id string, db *mongo.Database) error {
collection := db.Collection("users")
hex, err := primitive.ObjectIDFromHex(id)
if err != nil {
log.Fatal(err)
}
_, err = collection.DeleteOne(context.TODO(), bson.M{"_id": hex})
if err != nil {
log.Fatal(err)
}
return nil
}
func (u *userRepository) Get(id string, db *mongo.Database) (*User, error) {
collection := db.Collection("users")
hex, err := primitive.ObjectIDFromHex(id)
if err != nil {
return nil, err
}
var user User
err = collection.FindOne(context.TODO(), bson.M{"_id": hex}).Decode(&user)
if err != nil {
return nil, err
}
return &user, nil
}
func (u *userRepository) List(user interface{}, db *mongo.Database) ([]*User, error) {
var users []*User
collection := db.Collection("users")
cursor, err := collection.Find(context.TODO(), bson.M{"username":"username2"})
if err != nil {
return nil, err
}
for cursor.Next(context.TODO()) {
var model *User
if err := cursor.Decode(&model); err != nil {
log.Fatal(err)
}
users = append(users, model)
}
return users, nil
}
|
package air
import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
"github.com/BurntSushi/toml"
"github.com/integrii/flaggy"
)
// AppName is the name of the current web application.
//
// It is called "app_name" in the configuration file.
var AppName = "air"
// MaintainerEmail is the e-mail address of the one who is responsible for
// maintaining the current web application.
//
// It is called "maintainer_email" in the configuration file.
var MaintainerEmail = ""
// AutoCert sets whether or not to use let's encrypt for TLS
var AutoCert = false
// DebugMode indicates whether the current web application is in debug mode.
//
// It is called "debug_mode" in the configuration file.
var DebugMode = false
// DevAutoCert don't run autocert if DebugMode/DevMode is on
var DevAutoCert = false
// LoggerLowestLevel is the lowest level of the logger.
//
// It will be forced to `LoggerLevelDebug` when the `DebugMode` is true.
//
// It is called "logger_lowest_level" in the configuration file.
var LoggerLowestLevel = LoggerLevelDebug
// LoggerOutput is the output destination of the logger.
var LoggerOutput = io.Writer(os.Stdout)
// Address is the TCP address that the server listens on.
//
// It is called "address" in the configuration file.
var Address = "localhost:2333"
// HostWhitelist is the hosts allowed by the server.
//
// It is called "host_whitelist" in the configuration file.
var HostWhitelist = []string{}
// ReadTimeout is the maximum duration the server reads the request.
//
// It is called "read_timeout" in the configuration file.
var ReadTimeout = time.Duration(0)
// ReadHeaderTimeout is the amount of time allowed the server reads the request
// headers.
//
// It is called "read_header_timeout" in the configuration file.
var ReadHeaderTimeout = time.Duration(0)
// WriteTimeout is the maximum duration the server writes an response.
//
// It is called "write_timeout" in the configuration file.
var WriteTimeout = time.Duration(0)
// IdleTimeout is the maximum amount of time the server waits for the next
// request. If it is zero, the value of `ReadTimeout` is used. If both are zero,
// `ReadHeaderTimeout` is used.
//
// It is called "idle_timeout" in the configuration file.
var IdleTimeout = time.Duration(0)
// MaxHeaderBytes is the maximum number of bytes the server will read parsing
// the request header's names and values, including the request line.
//
// It is called "max_header_bytes" in the configuration file.
var MaxHeaderBytes = 1 << 20
// TLSCertFile is the path to the TLS certificate file used when starting the
// server.
//
// It is called "tls_cert_file" in the configuration file.
var TLSCertFile = ""
// TLSKeyFile is the path to the TLS key file used when starting the server.
//
// It is called "tls_key_file" in the configuration file.
var TLSKeyFile = ""
// HTTPSEnforced indicates whether the HTTPS is enforced.
//
// It is called "https_enforced" in the configuration file.
var HTTPSEnforced = true
// ACMECertRoot is the root of the ACME certificates.
//
// It is called "acme_cert_root" in the configuration file.
var ACMECertRoot = "acme-certs"
// WebSocketHandshakeTimeout is the maximum amount of time the server waits for
// the WebSocket handshake to complete.
//
// It is called "websocket_handshake_timeout" in the configuration file.
var WebSocketHandshakeTimeout = time.Duration(0)
// WebSocketSubprotocols is the server's supported WebSocket subprotocols.
//
// It is called "websocket_subprotocols" in the configuration file.
var WebSocketSubprotocols = []string{}
// ErrorHandler is the centralized error handler for the server.
var ErrorHandler = func(err error, req *Request, res *Response) {
if res.Written {
return
}
if res.Status < 400 {
res.Status = 500
}
m := err.Error()
if res.Status == 500 && !DebugMode {
m = "internal server error"
}
if req.Method == "GET" || req.Method == "HEAD" {
delete(req.Headers, "etag")
delete(req.Headers, "last-modified")
}
res.WriteString(m)
}
// Pregases is the `Gas` chain that performs before routing.
var Pregases = []Gas{}
// Gases is the `Gas` chain that performs after routing.
var Gases = []Gas{}
// AutoPushEnabled indicates whether the auto push is enabled.
//
// It is called "auto_push_enabled" in the configuration file.
var AutoPushEnabled = false
// MinifierEnabled indicates whether the minifier is enabled.
//
// It is called "minifier_enabled" in the configuration file.
var MinifierEnabled = false
// TemplateRoot is the root of the HTML templates. All the HTML templates inside
// it will be recursively parsed into the renderer.
//
// It is called "template_root" in the configuration file.
var TemplateRoot = "templates"
// TemplateExts is the filename extensions of the HTML templates used to
// distinguish the HTML template files in the `TemplateRoot` when parsing them
// into the renderer.
//
// It is called "template_exts" in the configuration file.
var TemplateExts = []string{".html"}
// TemplateLeftDelim is the left side of the HTML template delimiter the
// renderer renders the HTML templates.
//
// It is called "template_left_delim" in the configuration file.
var TemplateLeftDelim = "{{"
// TemplateRightDelim is the right side of the HTML template delimiter the
// renderer renders the HTML templates.
//
// It is called "template_right_delim" in the configuration file.
var TemplateRightDelim = "}}"
// TemplateFuncMap is the HTML template function map the renderer renders the
// HTML templates.
var TemplateFuncMap = obj{
"strlen": strlen,
"substr": substr,
"timefmt": timefmt,
}
// CofferEnabled indicates whether the coffer is enabled.
//
// It is called "coffer_enabled" in the configuration file.
var CofferEnabled = false
// AssetRoot is the root of the asset files. All the asset files inside it will
// be recursively parsed into the coffer.
//
// It is called "asset_root" in the configuration file.
var AssetRoot = "assets"
// AssetExts is the filename extensions of the asset files used to distinguish
// the asset files in the `AssetRoot` when loading them into the coffer.
//
// It is called "asset_exts" in the configuration file.
var AssetExts = []string{
".html",
".css",
".js",
".json",
".xml",
".svg",
".jpg",
".jpeg",
".png",
".gif",
}
// Config is a set of key-value pairs parsed from the configuration file found
// in the path specified by a command-line flag named "config". The default path
// of the configuration file is "config.toml".
var Config = obj{}
func init() {
var cf string
flaggy.String(&cf, "c", "config", "configuration file")
flaggy.Bool(&DebugMode, "dev", "devmode", "put the server into dev mode for extra logging and checks")
flaggy.Parse()
if cf == "" {
cf = "./private/config.toml"
}
if b, err := ioutil.ReadFile(cf); err != nil {
if !os.IsNotExist(err) {
panic(fmt.Errorf(
"air: failed to read configuration file: %v",
err,
))
}
} else if err := toml.Unmarshal(b, &Config); err != nil {
panic(fmt.Errorf(
"air: failed to unmarshal configuration file: %v",
err,
))
}
if v, ok := Config["app_name"].(string); ok {
AppName = v
}
if v, ok := Config["autocert"].(bool); ok {
AutoCert = v
if v, ok := Config["dev_autocert"].(bool); ok {
DevAutoCert = v
}
}
if v, ok := Config["debug_mode"].(bool); ok {
DebugMode = v
} else if v, ok := Config["dev_mode"].(bool); ok {
DebugMode = v
}
if v, ok := Config["maintainer_email"].(string); ok {
MaintainerEmail = v
}
if v, ok := Config["logger_lowest_level"].(string); ok {
switch v {
case LoggerLevelDebug.String():
LoggerLowestLevel = LoggerLevelDebug
case LoggerLevelInfo.String():
LoggerLowestLevel = LoggerLevelInfo
case LoggerLevelWarn.String():
LoggerLowestLevel = LoggerLevelWarn
case LoggerLevelError.String():
LoggerLowestLevel = LoggerLevelError
case LoggerLevelFatal.String():
LoggerLowestLevel = LoggerLevelFatal
case LoggerLevelPanic.String():
LoggerLowestLevel = LoggerLevelPanic
case LoggerLevelOff.String():
LoggerLowestLevel = LoggerLevelOff
}
}
if v, ok := Config["address"].(string); ok {
Address = v
}
if v, ok := Config["host_whitelist"].([]interface{}); ok {
HostWhitelist = make([]string, 0, len(v))
for _, v := range v {
if v, ok := v.(string); ok {
HostWhitelist = append(HostWhitelist, strings.ToLower(v))
}
}
}
if v, ok := Config["read_timeout"].(int64); ok {
ReadTimeout = time.Duration(v)
}
if v, ok := Config["read_header_timeout"].(int64); ok {
ReadHeaderTimeout = time.Duration(v)
}
if v, ok := Config["write_timeout"].(int64); ok {
WriteTimeout = time.Duration(v)
}
if v, ok := Config["idle_timeout"].(int64); ok {
IdleTimeout = time.Duration(v)
}
if v, ok := Config["max_header_bytes"].(int64); ok {
MaxHeaderBytes = int(v)
}
if v, ok := Config["tls_cert_file"].(string); ok {
TLSCertFile = v
}
if v, ok := Config["tls_key_file"].(string); ok {
TLSKeyFile = v
}
if v, ok := Config["https_enforced"].(bool); ok {
HTTPSEnforced = v
}
if v, ok := Config["acme_cert_root"].(string); ok {
ACMECertRoot = v
}
if v, ok := Config["websocket_handshake_timeout"].(int64); ok {
WebSocketHandshakeTimeout = time.Duration(v)
}
if v, ok := Config["websocket_subprotocols"].([]interface{}); ok {
WebSocketSubprotocols = make([]string, 0, len(v))
for _, v := range v {
if v, ok := v.(string); ok {
WebSocketSubprotocols = append(
WebSocketSubprotocols,
v,
)
}
}
}
if v, ok := Config["auto_push_enabled"].(bool); ok {
AutoPushEnabled = v
}
if v, ok := Config["minifier_enabled"].(bool); ok {
MinifierEnabled = v
}
if v, ok := Config["template_root"].(string); ok {
TemplateRoot = v
}
if v, ok := Config["template_exts"].([]interface{}); ok {
TemplateExts = make([]string, 0, len(v))
for _, v := range v {
if v, ok := v.(string); ok {
TemplateExts = append(TemplateExts, v)
}
}
}
if v, ok := Config["template_left_delim"].(string); ok {
TemplateLeftDelim = v
}
if v, ok := Config["template_right_delim"].(string); ok {
TemplateRightDelim = v
}
if v, ok := Config["coffer_enabled"].(bool); ok {
CofferEnabled = v
}
if v, ok := Config["asset_root"].(string); ok {
AssetRoot = v
}
if v, ok := Config["asset_exts"].([]interface{}); ok {
AssetExts = make([]string, 0, len(v))
for _, v := range v {
if v, ok := v.(string); ok {
AssetExts = append(AssetExts, v)
}
}
}
}
// DEBUG logs the msg at the `LoggerLevelDebug` with the optional extras.
func DEBUG(msg string, extras ...obj) {
theLogger.log(LoggerLevelDebug, msg, extras...)
}
// INFO logs the msg at the `LoggerLevelInfo` with the optional extras.
func INFO(msg string, extras ...obj) {
theLogger.log(LoggerLevelInfo, msg, extras...)
}
// WARN logs the msg at the `LoggerLevelWarn` with the optional extras.
func WARN(msg string, extras ...obj) {
theLogger.log(LoggerLevelWarn, msg, extras...)
}
// ERROR logs the msg at the `LoggerLevelError` with the optional extras.
func ERROR(msg string, extras ...obj) {
theLogger.log(LoggerLevelError, msg, extras...)
}
// FATAL logs the msg at the `LoggerLevelFatal` with the optional extras
// followed by a call to `os.Exit(1)`.
func FATAL(msg string, extras ...obj) {
theLogger.log(LoggerLevelFatal, msg, extras...)
os.Exit(1)
}
// PANIC logs the msg at the `LoggerLevelPanic` with the optional extras
// followed by a call to `panic()`.
func PANIC(msg string, extras ...obj) {
theLogger.log(LoggerLevelPanic, msg, extras...)
panic(msg)
}
// GET registers a new GET route for the path with the matching h in the router
// with the optional route-level gases.
func GET(path string, h Handler, gases ...Gas) {
theRouter.register("GET", path, h, gases...)
}
// HEAD registers a new HEAD route for the path with the matching h in the
// router with the optional route-level gases.
func HEAD(path string, h Handler, gases ...Gas) {
theRouter.register("HEAD", path, h, gases...)
}
// POST registers a new POST route for the path with the matching h in the
// router with the optional route-level gases.
func POST(path string, h Handler, gases ...Gas) {
theRouter.register("POST", path, h, gases...)
}
// PUT registers a new PUT route for the path with the matching h in the router
// with the optional route-level gases.
func PUT(path string, h Handler, gases ...Gas) {
theRouter.register("PUT", path, h, gases...)
}
// PATCH registers a new PATCH route for the path with the matching h in the
// router with the optional route-level gases.
func PATCH(path string, h Handler, gases ...Gas) {
theRouter.register("PATCH", path, h, gases...)
}
// DELETE registers a new DELETE route for the path with the matching h in the
// router with the optional route-level gases.
func DELETE(path string, h Handler, gases ...Gas) {
theRouter.register("DELETE", path, h, gases...)
}
// CONNECT registers a new CONNECT route for the path with the matching h in the
// router with the optional route-level gases.
func CONNECT(path string, h Handler, gases ...Gas) {
theRouter.register("CONNECT", path, h, gases...)
}
// OPTIONS registers a new OPTIONS route for the path with the matching h in the
// router with the optional route-level gases.
func OPTIONS(path string, h Handler, gases ...Gas) {
theRouter.register("OPTIONS", path, h, gases...)
}
// TRACE registers a new TRACE route for the path with the matching h in the
// router with the optional route-level gases.
func TRACE(path string, h Handler, gases ...Gas) {
theRouter.register("TRACE", path, h, gases...)
}
// STATIC registers a new route with the path prefix to serve the static files
// from the root with the optional route-level gases.
func STATIC(prefix, root string, gases ...Gas) {
h := func(req *Request, res *Response) error {
err := res.WriteFile(filepath.Join(
root,
req.Params["*"].Value().String(),
))
if os.IsNotExist(err) {
return NotFoundHandler(req, res)
}
return err
}
GET(prefix+"*", h, gases...)
HEAD(prefix+"*", h, gases...)
}
// FILE registers a new route with the path to serve a static file with the
// filename and the optional route-level gases.
func FILE(path, filename string, gases ...Gas) {
h := func(req *Request, res *Response) error {
err := res.WriteFile(filename)
if os.IsNotExist(err) {
return NotFoundHandler(req, res)
}
return err
}
GET(path, h, gases...)
HEAD(path, h, gases...)
}
// Serve starts the server.
func Serve() error {
return TheServer.serve()
}
// Close closes the server immediately.
func Close() error {
return TheServer.close()
}
// Shutdown gracefully shuts down the server without interrupting any active
// connections until timeout. It waits indefinitely for connections to return to
// idle and then shut down when the timeout is less than or equal to zero.
func Shutdown(timeout time.Duration) error {
return TheServer.shutdown(timeout)
}
// Handler defines a function to serve requests.
type Handler func(*Request, *Response) error
// NotFoundHandler is a `Handler` that returns not found error.
var NotFoundHandler = func(req *Request, res *Response) error {
res.Status = 404
return errors.New("not found")
}
// MethodNotAllowedHandler is a `Handler` that returns method not allowed error.
var MethodNotAllowedHandler = func(req *Request, res *Response) error {
res.Status = 405
return errors.New("method not allowed")
}
// Gas defines a function to process gases.
type Gas func(Handler) Handler
type obj = map[string]interface{}
|
package main
type X struct {
Xa int
Xb int
Xy Y
}
type Y struct {
Ya int
Yb int
}
func main() {
x := X{
Xy: Y{
}
|
// GOG
package main
var a string
func main() {
a = "G"
print(a)
f1()
}
func f1() {
a := "O"
print(a)
f2()
}
func f2() {
// 注意, 这里 f2 定义的位置的作用域获得的 a 是全局的
// 而不是 f1 中调用时使用 f1 的 a
// 这个值在编译时已经决定好了
print(a)
}
|
package main
import "fmt"
func main() {
cards := newDeck()
cards.shuffle()
hand1, _ := deal(cards, 5)
hand2, _ := deal(cards, 5)
fmt.Println(hand1, hand2)
}
|
package tests
import (
"testing"
ravendb "github.com/ravendb/ravendb-go-client"
"github.com/stretchr/testify/assert"
)
func NewUsersInvalidIndex() *ravendb.IndexCreationTask {
res := ravendb.NewIndexCreationTask("UsersInvalidIndex")
res.Map = "from u in docs.Users select new { a = 5 / u.Age }"
return res
}
func NewUsers_Index() *ravendb.IndexCreationTask {
res := ravendb.NewIndexCreationTask("Users_Index")
res.Map = "from u in docs.Users select new { u.name }"
return res
}
func testIndexCanDeleteIndex(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
err = index.Execute(store, nil, "")
assert.NoError(t, err)
op := ravendb.NewGetIndexNamesOperation(0, 10)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexNames := op.Command.Result
assert.True(t, stringArrayContains(indexNames, "UsersIndex"))
op2 := ravendb.NewDeleteIndexOperation("UsersIndex")
err = store.Maintenance().Send(op2)
assert.NoError(t, err)
op3 := ravendb.NewGetIndexNamesOperation(0, 10)
err = store.Maintenance().Send(op3)
assert.NoError(t, err)
indexNames = op3.Command.Result
assert.Equal(t, len(indexNames), 0)
}
func testIndexCanDisableAndEnableIndex(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
err = index.Execute(store, nil, "")
assert.NoError(t, err)
{
op, err := ravendb.NewDisableIndexOperation("UsersIndex")
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusDisabled)
}
{
op, err := ravendb.NewEnableIndexOperation("UsersIndex")
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusRunning)
}
}
func testIndexGetCanIndexes(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
err = index.Execute(store, nil, "")
assert.NoError(t, err)
{
op := ravendb.NewGetIndexesOperation(0, 10)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexDefinitions := op.Command.Result
assert.Equal(t, len(indexDefinitions), 1)
}
}
func testIndexGetCanIndexesStats(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
index := NewUsersIndex()
err = index.Execute(store, nil, "")
assert.NoError(t, err)
{
op := ravendb.NewGetIndexesStatisticsOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexStats := op.Command.Result
assert.Equal(t, len(indexStats), 1)
}
}
func testIndexGetTerms(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
err = index.Execute(store, nil, "")
assert.NoError(t, err)
{
session := openSessionMust(t, store)
user := &User{}
user.setName("Marcin")
err = session.Store(user)
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
err = driver.waitForIndexing(store, store.GetDatabase(), 0)
assert.NoError(t, err)
{
op, err := ravendb.NewGetTermsOperation("UsersIndex", "name", "", 0)
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
terms := op.Command.Result
assert.Equal(t, len(terms), 1)
assert.Equal(t, terms[0], "marcin")
}
}
func testIndexHasIndexChanged(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
indexDef := index.CreateIndexDefinition()
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
op2 := ravendb.NewIndexHasChangedOperation(indexDef)
err = store.Maintenance().Send(op2)
assert.NoError(t, err)
{
cmd := op2.Command
assert.False(t, cmd.Result)
}
indexDef.Maps = []string{"from users"}
op3 := ravendb.NewIndexHasChangedOperation(indexDef)
err = store.Maintenance().Send(op3)
assert.NoError(t, err)
{
cmd := op3.Command
assert.True(t, cmd.Result)
}
}
func testIndexCanStopStartIndexing(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
indexDef := index.CreateIndexDefinition()
{
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewStopIndexingOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusPaused)
}
{
op := ravendb.NewStartIndexingOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusRunning)
}
}
func testIndexCanStopStartIndex(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
indexDef := index.CreateIndexDefinition()
{
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op, err := ravendb.NewStopIndexOperation(indexDef.Name)
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
assert.Equal(t, indexingStatus.Status, ravendb.IndexRunningStatusRunning)
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusPaused)
}
{
op, err := ravendb.NewStartIndexOperation(indexDef.Name)
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexingStatusOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexingStatus := op.Command.Result
assert.Equal(t, indexingStatus.Status, ravendb.IndexRunningStatusRunning)
indexStatus := indexingStatus.Indexes[0]
assert.Equal(t, indexStatus.Status, ravendb.IndexRunningStatusRunning)
}
}
func testIndexCanSetIndexLockMode(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
indexDef := index.CreateIndexDefinition()
{
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op, err := ravendb.NewSetIndexesLockOperation(indexDef.Name, ravendb.IndexLockModeLockedError)
assert.NoError(t, err)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
}
{
op := ravendb.NewGetIndexOperation(indexDef.Name)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
newIndexDef := op.Command.Result
assert.Equal(t, newIndexDef.LockMode, ravendb.IndexLockModeLockedError)
}
}
func testIndexCanSetIndexPriority(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersIndex()
indexDef := index.CreateIndexDefinition()
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
op2, err := ravendb.NewSetIndexesPriorityOperation(indexDef.Name, ravendb.IndexPriorityHigh)
assert.NoError(t, err)
err = store.Maintenance().Send(op2)
assert.NoError(t, err)
op3 := ravendb.NewGetIndexOperation(indexDef.Name)
err = store.Maintenance().Send(op3)
assert.NoError(t, err)
newIndexDef := op3.Command.Result
assert.Equal(t, newIndexDef.Priority, ravendb.IndexPriorityHigh)
}
func testIndexCanListErrors(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
index := NewUsersInvalidIndex()
indexDef := index.CreateIndexDefinition()
op := ravendb.NewPutIndexesOperation(indexDef)
err = store.Maintenance().Send(op)
assert.NoError(t, err)
{
session := openSessionMust(t, store)
user := &User{}
//user.setName(null)
user.Age = 0
err = session.Store(user)
assert.NoError(t, err)
err = session.SaveChanges()
assert.NoError(t, err)
session.Close()
}
err = driver.waitForIndexing(store, store.GetDatabase(), 0)
assert.NoError(t, err)
op2 := ravendb.NewGetIndexErrorsOperation(nil)
err = store.Maintenance().Send(op2)
assert.NoError(t, err)
indexErrors := op2.Command.Result
assert.Equal(t, len(indexErrors), 1)
op3 := ravendb.NewGetIndexErrorsOperation([]string{indexDef.Name})
err = store.Maintenance().Send(op3)
assert.NoError(t, err)
perIndexErrors := op3.Command.Result
assert.Equal(t, len(perIndexErrors), 1)
}
func testIndexCanGetIndexStatistics(t *testing.T, driver *RavenTestDriver) {
var err error
store := driver.getDocumentStoreMust(t)
defer store.Close()
userIndex := NewUsers_Index()
err = userIndex.Execute(store, nil, "")
assert.NoError(t, err)
op := ravendb.NewGetIndexesStatisticsOperation()
err = store.Maintenance().Send(op)
assert.NoError(t, err)
indexStats := op.Command.Result
assert.Equal(t, len(indexStats), 1)
}
func TestIndexOperations(t *testing.T) {
driver := createTestDriver(t)
destroy := func() { destroyDriver(t, driver) }
defer recoverTest(t, destroy)
// order matches Java tests
testIndexHasIndexChanged(t, driver)
testIndexCanListErrors(t, driver)
testIndexCanGetIndexStatistics(t, driver)
testIndexCanSetIndexPriority(t, driver)
testIndexCanDisableAndEnableIndex(t, driver)
testIndexGetCanIndexes(t, driver)
testIndexCanDeleteIndex(t, driver)
testIndexCanStopStartIndexing(t, driver)
testIndexCanGetIndexStatistics(t, driver)
testIndexCanStopStartIndex(t, driver)
testIndexCanSetIndexLockMode(t, driver)
testIndexGetTerms(t, driver)
}
|
// Copyright 2015 Apcera Inc. All rights reserved.
// Package aws provides a standard way to create a virtual machine on AWS.
package aws
import (
"errors"
"fmt"
"net"
"time"
"github.com/apcera/libretto/ssh"
"github.com/apcera/libretto/util"
"github.com/apcera/libretto/virtualmachine"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
)
const (
// PublicIP is the index of the public IP address that GetIPs returns.
PublicIP = 0
// PrivateIP is the index of the private IP address that GetIPs returns.
PrivateIP = 1
// StateStarted is the state AWS reports when the VM is started.
StateStarted = "running"
// StateHalted is the state AWS reports when the VM is halted.
StateHalted = "stopped"
// StateDestroyed is the state AWS reports when the VM is destroyed.
StateDestroyed = "terminated"
// StatePending is the state AWS reports when the VM is pending.
StatePending = "pending"
)
// SSHTimeout is the maximum time to wait before failing to GetSSH. This is not
// thread-safe.
var SSHTimeout = 5 * time.Minute
var (
// This ensures that aws.VM implements the virtualmachine.VirtualMachine
// interface at compile time.
_ virtualmachine.VirtualMachine = (*VM)(nil)
// limiter rate limits channel to prevent saturating AWS API limits.
limiter = time.Tick(time.Millisecond * 500)
)
var (
// ErrNoCreds is returned when no credentials are found in environment or
// home directory.
ErrNoCreds = errors.New("Missing AWS credentials")
// ErrNoRegion is returned when a request was sent without a region.
ErrNoRegion = errors.New("Missing AWS region")
// ErrNoInstance is returned querying an instance, but none is found.
ErrNoInstance = errors.New("Missing VM instance")
// ErrNoInstanceID is returned when attempting to perform an operation on
// an instance, but the ID is missing.
ErrNoInstanceID = errors.New("Missing instance ID")
// ErrProvisionTimeout is returned when the EC2 instance takes too long to
// enter "running" state.
ErrProvisionTimeout = errors.New("AWS provision timeout")
// ErrNoIPs is returned when no IP addresses are found for an instance.
ErrNoIPs = errors.New("Missing IPs for instance")
// ErrNoSupportSuspend is returned when vm.Suspend() is called.
ErrNoSupportSuspend = errors.New("Suspend action not supported by AWS")
// ErrNoSupportResume is returned when vm.Resume() is called.
ErrNoSupportResume = errors.New("Resume action not supported by AWS")
)
// VM represents an AWS EC2 virtual machine.
type VM struct {
Name string
Region string // required
AMI string
InstanceType string
InstanceID string // required when adding volume
KeyPair string // required
IamInstanceProfileName string
PrivateIPAddress string
// required when addding or deleting volume
Volumes []EbsBlockVolume
KeepRootVolumeOnDestroy bool
DeleteNonRootVolumeOnDestroy bool
VPC string
Subnet string
// required when modifying security group rules
// all other parameters except this one and Region
// is ingnored while security group modification
SecurityGroups []SecurityGroup
SSHCreds ssh.Credentials // required
DeleteKeysOnDestroy bool
// only relevant in GetSubnetList, GetSecurityGroupList & GetImageList
// filters result with given key-values
Filters map[string][]*string
}
// Region represents a AWS Region
type Region struct {
Name string `json:"name,omitempty"`
RegionEndpoint string `json:"region_endpoint,omitempty"`
}
// Zone represents a AWS availability zone
type Zone struct {
Name string `json:"name,omitempty"`
State string `json:"state,omitempty"`
Region string `json:"region,omitempty"`
}
// VPC represents a AWS VPC
type VPC struct {
Id string `json:"id,omitempty"`
State string `json:"state,omitempty"`
IsDefault *bool `json:"is_default,omitempty"`
IPv4Blocks []string `json:"ipv4_blocks,omitempty"`
IPv6Blocks []string `json:"ipv6_blocks,omitempty"`
// ID of DHCP options associated with VPC
DhcpOptionsId string `json:"dhcp_options_id,omitempty"`
// Allowed tenancy of instances launched into the VPC
InstanceTenancy string `json:"instance_tenancy,omitempty"`
}
// Subnet represents a AWS Subnet
type Subnet struct {
Id string `json:"id,omitempty"`
State string `json:"state,omitempty"`
VpcId string `json:"vpc_id,omitempty"`
IPv4Block string `json:"ipv4block,omitempty"`
IPv6Blocks []string `json:"ipv6blocks,omitempty"`
AvailableAddressCount *int64 `json:"available_address_count,omitempty"`
// Availability Zone of the subnet
AvailabilityZone string `json:"availability_zone,omitempty"`
// Indicates if this is default for Availability Zone
DefaultForAz bool `json:"default_for_az,omitempty"`
MapPublicIpOnLaunch bool `json:"map_public_ip_on_launch,omitempty"`
}
// IpPermission in AWS is used to represent inbound or outbound rules
// associated with SecurityGroup
type IpPermission struct {
FromPort *int64 `json:"from_port,omitempty"`
ToPort *int64 `json:"to_port,omitempty"`
IpProtocol string `json:"ip_protocol,omitempty"`
Ipv4Ranges []string `json:"ipv4_ranges,omitempty"`
Ipv6Ranges []string `json:"ipv6_ranges,omitempty"`
}
// SecurityGroup represents a AWS SecurityGroup
type SecurityGroup struct {
Id string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Description string `json:"description,omitempty"`
OwnerId string `json:"owner_id,omitempty"`
VpcId string `json:"vpc_id,omitempty"`
IpPermissionsEgress []IpPermission `json:"ip_permissions_egress,omitempty"`
IpPermissions []IpPermission `json:"ip_permissions,omitempty"`
}
// InstanceStatus represents AWS InstanceStatus
type InstanceStatus struct {
AvailabilityZone string `json:"availability_zone,omitempty"`
InstanceId string `json:"instance_id,omitempty"`
State string `json:"state,omitempty"`
}
// EbsBlockVolume represents a AWS EbsBlockDevice
type EbsBlockVolume struct {
DeviceName string `json:"device_name,omitempty"`
VolumeSize *int64 `json:"volume_size,omitempty"`
VolumeType string `json:"volume_type,omitempty"`
AvailabilityZone string `json:"availability_zone,omitempty"`
VolumeId string `json:"volume_id,omitempty"`
SnapshotId string `json:"snapshot_id,omitempty"`
}
// Image represents a AWS Image
type Image struct {
Id *string `json:"id,omitempty"`
Name *string `json:"name,omitempty"`
Description *string `json:"description,omitempty"`
State *string `json:"state,omitempty"`
OwnerId *string `json:"owner_id,omitempty"`
OwnerAlias *string `json:"owner_alias,omitempty"`
CreationDate *string `json:"creation_date,omitempty"`
Architecture *string `json:"architecture,omitempty"`
Platform *string `json:"platform,omitempty"`
Hypervisor *string `json:"hypervisor,omitempty"`
VirtualizationType *string `json:"virtualization_type,omitempty"`
ImageType *string `json:"image_type,omitempty"`
KernelId *string `json:"kernel_id,omitemtpy"`
RootDeviceName *string `json:"root_device_name,omitempty"`
RootDeviceType *string `json:"root_device_type,omitempty"`
Public *bool `json:"public,omitempty"`
EbsVolumes []*EbsBlockVolume `json:"ebs_volumes,omitempty"`
}
// GetName returns the name of the virtual machine
func (vm *VM) GetName() string {
return vm.Name
}
// SetTag adds a tag to the VM and its attached volumes.
func (vm *VM) SetTag(key, value string) error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
return ErrNoInstanceID
}
volIDs, err := getInstanceVolumeIDs(svc, vm.InstanceID)
if err != nil {
return fmt.Errorf("Failed to get instance's volumes IDs: %s", err)
}
ids := make([]*string, 0, len(volIDs)+1)
ids = append(ids, aws.String(vm.InstanceID))
for _, v := range volIDs {
ids = append(ids, aws.String(v))
}
_, err = svc.CreateTags(&ec2.CreateTagsInput{
Resources: ids,
Tags: []*ec2.Tag{
{Key: aws.String(key),
Value: aws.String(value)},
},
})
if err != nil {
return fmt.Errorf("Failed to create tag on VM: %v", err)
}
return nil
}
// SetTags takes in a map of tags to set to the provisioned instance. This is
// essentially a shorter way than calling SetTag many times.
func (vm *VM) SetTags(tags map[string]string) error {
for k, v := range tags {
if err := vm.SetTag(k, v); err != nil {
return err
}
}
return nil
}
// Provision creates a virtual machine on AWS. It returns an error if
// there was a problem during creation, if there was a problem adding a tag, or
// if the VM takes too long to enter "running" state.
func (vm *VM) Provision() error {
<-limiter
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("failed to get AWS service: %v", err)
}
resp, err := svc.RunInstances(instanceInfo(vm))
if err != nil {
return fmt.Errorf("Failed to create instance: %v", err)
}
if hasInstanceID(resp.Instances[0]) {
vm.InstanceID = *resp.Instances[0].InstanceId
} else {
return ErrNoInstanceID
}
if err := waitUntilReady(svc, vm.InstanceID); err != nil {
return err
}
if vm.DeleteNonRootVolumeOnDestroy {
return setNonRootDeleteOnDestroy(svc, vm.InstanceID, true)
}
if vm.Name != "" {
if err := vm.SetTag("Name", vm.GetName()); err != nil {
return err
}
}
return nil
}
// GetIPs returns a slice of IP addresses assigned to the VM. The PublicIP or
// PrivateIP consts can be used to retrieve respective IP address type. It
// returns nil if there was an error obtaining the IPs.
func (vm *VM) GetIPs() ([]net.IP, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
// Probably need to call Provision first.
return nil, ErrNoInstanceID
}
inst, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{
aws.String(vm.InstanceID),
},
})
if err != nil {
return nil, fmt.Errorf("Failed to describe instance: %s", err)
}
if len(inst.Reservations) < 1 {
return nil, errors.New("Missing instance reservation")
}
if len(inst.Reservations[0].Instances) < 1 {
return nil, ErrNoInstance
}
ips := make([]net.IP, 2)
if ip := inst.Reservations[0].Instances[0].PublicIpAddress; ip != nil {
ips[PublicIP] = net.ParseIP(*ip)
}
if ip := inst.Reservations[0].Instances[0].PrivateIpAddress; ip != nil {
ips[PrivateIP] = net.ParseIP(*ip)
}
return ips, nil
}
// Destroy terminates the VM on AWS. It returns an error if AWS credentials are
// missing or if there is no instance ID.
func (vm *VM) Destroy() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
// Probably need to call Provision first.
return ErrNoInstanceID
}
_, err = svc.TerminateInstances(&ec2.TerminateInstancesInput{
InstanceIds: []*string{
aws.String(vm.InstanceID),
},
})
if err != nil {
return err
}
if err := waitUntilTerminated(svc, vm.InstanceID); err != nil {
return err
}
if !vm.DeleteKeysOnDestroy {
return nil
}
vm.ResetKeyPair()
return nil
}
// GetSSH returns an SSH client that can be used to connect to a VM. An error
// is returned if the VM has no IPs.
func (vm *VM) GetSSH(options ssh.Options) (ssh.Client, error) {
ips, err := util.GetVMIPs(vm, options)
if err != nil {
return nil, err
}
client := &ssh.SSHClient{
Creds: &vm.SSHCreds,
IP: ips[PublicIP],
Options: options,
Port: 22,
}
if err := client.WaitForSSH(SSHTimeout); err != nil {
return nil, err
}
return client, nil
}
// GetState returns the state of the VM, such as "running". An error is
// returned if the instance ID is missing, if there was a problem querying AWS,
// or if there are no instances.
func (vm *VM) GetState() (string, error) {
svc, err := getService(vm.Region)
if err != nil {
return "", fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
// Probably need to call Provision first.
return "", ErrNoInstanceID
}
stat, err := svc.DescribeInstances(&ec2.DescribeInstancesInput{
InstanceIds: []*string{
aws.String(vm.InstanceID),
},
})
if err != nil {
return "", fmt.Errorf("Failed to describe instance: %s", err)
}
if n := len(stat.Reservations); n < 1 {
return "", ErrNoInstance
}
if n := len(stat.Reservations[0].Instances); n < 1 {
return "", ErrNoInstance
}
return *stat.Reservations[0].Instances[0].State.Name, nil
}
// Halt shuts down the VM on AWS.
func (vm *VM) Halt() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
// Probably need to call Provision first.
return ErrNoInstanceID
}
_, err = svc.StopInstances(&ec2.StopInstancesInput{
InstanceIds: []*string{
aws.String(vm.InstanceID),
},
DryRun: aws.Bool(false),
Force: aws.Bool(true),
})
if err != nil {
return fmt.Errorf("Failed to stop instance: %v", err)
}
if err := waitUntilStopped(svc, vm.InstanceID); err != nil {
return err
}
return nil
}
// Start boots a stopped VM.
func (vm *VM) Start() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("failed to get AWS service: %v", err)
}
if vm.InstanceID == "" {
// Probably need to call Provision first.
return ErrNoInstanceID
}
_, err = svc.StartInstances(&ec2.StartInstancesInput{
InstanceIds: []*string{
aws.String(vm.InstanceID),
},
DryRun: aws.Bool(false),
})
if err != nil {
return fmt.Errorf("Failed to start instance: %v", err)
}
if err := waitUntilRunning(svc, vm.InstanceID); err != nil {
return err
}
return nil
}
// GetRegionList: returns list of regions
func (vm *VM) GetRegionList() ([]Region, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
regionListOutput, err := svc.DescribeRegions(&ec2.DescribeRegionsInput{})
if err != nil {
return nil, fmt.Errorf("Failed to get region list: %v", err)
}
response := make([]Region, 0)
for _, region := range regionListOutput.Regions {
response = append(response, Region{
Name: *region.RegionName,
RegionEndpoint: *region.Endpoint})
}
return response, nil
}
// GetAvailabilityZoneList: returns list of availability zones for a region
func (vm *VM) GetAvailabilityZoneList() ([]Zone, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
zoneListOutput, err := svc.DescribeAvailabilityZones(
&ec2.DescribeAvailabilityZonesInput{})
if err != nil {
return nil, fmt.Errorf("Failed to get availabilityZone list: %v", err)
}
response := make([]Zone, 0)
for _, zone := range zoneListOutput.AvailabilityZones {
response = append(response, Zone{
Name: *zone.ZoneName,
State: *zone.State,
Region: *zone.RegionName})
}
return response, nil
}
// GetVPCList: returns list of VPCs for given region
func (vm *VM) GetVPCList() ([]VPC, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
vpcListOutput, err := svc.DescribeVpcs(&ec2.DescribeVpcsInput{})
if err != nil {
return nil, fmt.Errorf("Failed to get VPC list: %v", err)
}
response := make([]VPC, 0)
for _, vpc := range vpcListOutput.Vpcs {
ipv4Blocks := make([]string, 0)
for _, ipv4Block := range vpc.CidrBlockAssociationSet {
ipv4Blocks = append(ipv4Blocks,
*ipv4Block.CidrBlock)
}
ipv6Blocks := make([]string, 0)
for _, ipv6Block := range vpc.Ipv6CidrBlockAssociationSet {
ipv6Blocks = append(ipv6Blocks,
*ipv6Block.Ipv6CidrBlock)
}
response = append(response, VPC{
Id: *vpc.VpcId,
State: *vpc.State,
IsDefault: vpc.IsDefault,
DhcpOptionsId: *vpc.DhcpOptionsId,
InstanceTenancy: *vpc.InstanceTenancy,
IPv4Blocks: ipv4Blocks,
IPv6Blocks: ipv6Blocks})
}
return response, nil
}
// GetSubnetList: returns list of all subnet for given region
// most relevant filter(s) (map-keys): "vpc-id", "subnet-id", "availabilityZone"
// See all available filters at below link
// http://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#DescribeSubnetsInput
func (vm *VM) GetSubnetList() ([]Subnet, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
filters := getFilters(vm.Filters)
input := &ec2.DescribeSubnetsInput{}
if filters != nil && len(filters) > 0 {
input.Filters = filters
}
subnetListOutput, err := svc.DescribeSubnets(input)
if err != nil {
return nil, fmt.Errorf("Failed to get Subnet list: %v", err)
}
response := make([]Subnet, 0)
for _, subnet := range subnetListOutput.Subnets {
ipv6Blocks := make([]string, 0)
for _, ipv6Block := range subnet.Ipv6CidrBlockAssociationSet {
ipv6Blocks = append(ipv6Blocks,
*ipv6Block.Ipv6CidrBlock)
}
response = append(response, Subnet{
Id: *subnet.SubnetId,
State: *subnet.State,
VpcId: *subnet.VpcId,
IPv4Block: *subnet.CidrBlock,
AvailableAddressCount: subnet.AvailableIpAddressCount,
AvailabilityZone: *subnet.AvailabilityZone,
DefaultForAz: *subnet.DefaultForAz,
MapPublicIpOnLaunch: *subnet.MapPublicIpOnLaunch,
IPv6Blocks: ipv6Blocks})
}
return response, nil
}
// GetSecurityGroupList : returns list of all securityGroup for given region
// most relevant filter(s) (map-keys): "vpc-id", "group-id"
// See all available filters at below link
// http://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#DescribeSecurityGroupsInput
func (vm *VM) GetSecurityGroupList() ([]SecurityGroup, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
filters := getFilters(vm.Filters)
input := &ec2.DescribeSecurityGroupsInput{}
if filters != nil && len(filters) > 0 {
input.Filters = filters
}
secGrpListOutput, err := svc.DescribeSecurityGroups(input)
if err != nil {
return nil, fmt.Errorf("Failed to get SecurityGroup list: %v", err)
}
response := make([]SecurityGroup, 0)
for _, securityGroup := range secGrpListOutput.SecurityGroups {
ipPermissionsEgress := toVMAWSIpPermissions(securityGroup.IpPermissionsEgress)
ipPermissions := toVMAWSIpPermissions(securityGroup.IpPermissions)
response = append(response, SecurityGroup{
Id: *securityGroup.GroupId,
Name: *securityGroup.GroupName,
Description: *securityGroup.Description,
OwnerId: *securityGroup.OwnerId,
VpcId: *securityGroup.VpcId,
IpPermissionsEgress: ipPermissionsEgress,
IpPermissions: ipPermissions})
}
return response, nil
}
// GetImageList: returns list of images available for given account
// Includes public,owned private images & private images with explicit permission
func (vm *VM) GetImageList() ([]Image, error) {
svc, err := getService(vm.Region)
if err != nil {
return nil, fmt.Errorf("Failed to get AWS service: %v", err)
}
filters := getFilters(vm.Filters)
input := &ec2.DescribeImagesInput{}
if filters != nil && len(filters) > 0 {
input.Filters = filters
}
imageListOutput, err := svc.DescribeImages(input)
if err != nil {
return nil, fmt.Errorf("Failed to get Image list: %v", err)
}
response := make([]Image, 0)
for _, image := range imageListOutput.Images {
img := getVMAWSImage(image)
response = append(response, img)
}
return response, nil
}
// AuthorizeSecurityGroup: Adds one or more rules to a security group
func (vm *VM) AuthorizeSecurityGroup() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
secGrp := vm.SecurityGroups[0]
ec2IpPermissions := toEc2IpPermissions(secGrp.IpPermissions)
input := &ec2.AuthorizeSecurityGroupIngressInput{
GroupId: &secGrp.Id,
IpPermissions: ec2IpPermissions}
_, err = svc.AuthorizeSecurityGroupIngress(input)
if err != nil {
return fmt.Errorf("Failed to authorize security group ingress rules: %v", err)
}
ec2IpPermissionsEgress := toEc2IpPermissions(
secGrp.IpPermissionsEgress)
egressInput := &ec2.AuthorizeSecurityGroupEgressInput{
GroupId: &secGrp.Id,
IpPermissions: ec2IpPermissionsEgress}
_, err = svc.AuthorizeSecurityGroupEgress(egressInput)
if err != nil {
return fmt.Errorf("Failed to authorize security group egress rules: %v", err)
}
return nil
}
// RevokeSecurityGroup: Removes one or more rules from a security group
func (vm *VM) RevokeSecurityGroup() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
secGrp := vm.SecurityGroups[0]
ec2IpPermissions := toEc2IpPermissions(secGrp.IpPermissions)
input := &ec2.RevokeSecurityGroupIngressInput{
GroupId: &secGrp.Id,
IpPermissions: ec2IpPermissions}
_, err = svc.RevokeSecurityGroupIngress(input)
if err != nil {
return fmt.Errorf("Failed to revoke security group ingress rules: %v", err)
}
ec2IpPermissionsEgress := toEc2IpPermissions(
secGrp.IpPermissionsEgress)
egressInput := &ec2.RevokeSecurityGroupEgressInput{
GroupId: &secGrp.Id,
IpPermissions: ec2IpPermissionsEgress}
_, err = svc.RevokeSecurityGroupEgress(egressInput)
if err != nil {
return fmt.Errorf("Failed to revoke security group egress rules: %v", err)
}
return nil
}
// CreateVolume: Creates a volume with given parameter
func (vm *VM) CreateVolume() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
volume := vm.Volumes[0]
instanceStatus, err := GetInstanceStatus(svc, vm.InstanceID)
if err != nil {
return fmt.Errorf("Failed to get availability zone of instance: %v", err)
}
volume.AvailabilityZone = instanceStatus.AvailabilityZone
input := getVolumeInput(&volume)
response, err := svc.CreateVolume(input)
if err != nil {
return fmt.Errorf("Failed to create volume: %v", err)
}
if err := waitForCreate(svc, *response.VolumeId); err != nil {
return err
}
volume.VolumeId = *response.VolumeId
vm.Volumes[0] = volume
return nil
}
// AttachVolume: Attaches given volume to given instance
func (vm *VM) AttachVolume() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
volume := vm.Volumes[0]
input := &ec2.AttachVolumeInput{
Device: &volume.DeviceName,
InstanceId: &vm.InstanceID,
VolumeId: &volume.VolumeId}
_, err = svc.AttachVolume(input)
if err != nil {
return fmt.Errorf("Failed to attach volume (volumeId %s) to "+
"instance (instanceId %s): %v", volume.VolumeId, err)
}
if err := waitForAttach(svc, volume.VolumeId); err != nil {
return err
}
return nil
}
// DetachVolume: Detaches volume with given Id from instance
func (vm *VM) DetachVolume() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
volume := vm.Volumes[0]
input := &ec2.DetachVolumeInput{
VolumeId: &volume.VolumeId}
_, err = svc.DetachVolume(input)
if err != nil {
return fmt.Errorf("Failed to detach volume (volumeId %s) from "+
"instance (instanceId %s): %v", volume.VolumeId, err)
}
if err := waitForDetach(svc, volume.VolumeId); err != nil {
return err
}
return nil
}
// DeleteVolume: Deletes volume with given Id
// Disk must not be in-use by any instance
func (vm *VM) DeleteVolume() error {
svc, err := getService(vm.Region)
if err != nil {
return fmt.Errorf("Failed to get AWS service: %v", err)
}
volume := vm.Volumes[0]
input := &ec2.DeleteVolumeInput{
VolumeId: &volume.VolumeId}
_, err = svc.DeleteVolume(input)
if err != nil {
return fmt.Errorf("Failed to delete volume (volumeId %s): %v",
volume.VolumeId, err)
}
return nil
}
// Suspend always returns an error because this isn't supported by AWS.
func (vm *VM) Suspend() error {
return ErrNoSupportSuspend
}
// Resume always returns an error because this isn't supported by AWS.
func (vm *VM) Resume() error {
return ErrNoSupportResume
}
// SetKeyPair sets the given private key and AWS key name for this vm
func (vm *VM) SetKeyPair(privateKey string, name string) {
vm.SSHCreds.SSHPrivateKey = privateKey
vm.KeyPair = name
}
// ResetKeyPair resets the key pair for this VM.
func (vm *VM) ResetKeyPair() {
vm.SSHCreds.SSHPrivateKey = ""
vm.KeyPair = ""
}
// ValidateAuth: returns error if credentials are incorrect
func (vm *VM) ValidateAuth() error {
return errors.New("Action : validate auth not supported")
}
|
package main
import (
"encoding/csv"
"fmt"
"io"
"os"
"regexp"
"strings"
)
func readCsvCn(srcStr string, fp *os.File, outfile string) {
f, err := os.Create(outfile + ".txt")
checkErr(err)
db := csv.NewReader(fp)
for {
db, err := db.Read()
if err == io.EOF {
break
}
matchedCn, err := regexp.MatchString(strings.ToLower(srcStr),
strings.ToLower(db[2]))
checkErr(err)
if matchedCn == true {
fmt.Println(string(ColorGreen), db[0], "AS"+db[1], db[2])
_, err := f.WriteString(db[0] + "\n")
checkErr(err)
}
}
defer fp.Close()
}
func readCsvAsn(srcStr string, fp *os.File, outfile string) {
f, err := os.Create(outfile + ".txt")
checkErr(err)
db := csv.NewReader(fp)
for {
db, err := db.Read()
if err == io.EOF {
break
}
matchedCn, err := regexp.MatchString(strings.ToLower(srcStr),
strings.ToLower(db[1]))
checkErr(err)
if matchedCn == true {
fmt.Println(string(ColorGreen), db[0], "AS"+db[1], db[2])
_, err := f.WriteString(db[0] + "\n")
checkErr(err)
}
}
defer fp.Close()
}
func searchCn(srcStr string, mode string, outfile string) {
//colorGreen := "\033[32m"
if mode == "ipv4" {
file, err := os.Open("GeoLite2-ASN-Blocks-IPv4.csv")
checkErr(err)
readCsvCn(srcStr, file, outfile)
} else if mode == "ipv6" {
file, err := os.Open("GeoLite2-ASN-Blocks-IPv6.csv")
checkErr(err)
readCsvCn(srcStr, file, outfile)
}
}
func searchAsn(srcStr string, mode string, outfile string) {
if mode == "ipv4" {
file, err := os.Open("GeoLite2-ASN-Blocks-IPv4.csv")
checkErr(err)
readCsvAsn(srcStr, file, outfile)
} else if mode == "ipv6" {
file, err := os.Open("GeoLite2-ASN-Blocks-IPv6.csv")
checkErr(err)
readCsvAsn(srcStr, file, outfile)
}
}
|
package main
import (
"bufio"
"bytes"
"fmt"
"github.com/Yprolic/TomlConfiguration"
"io"
"io/ioutil"
"log"
"math"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
)
//Config 全局配置类
type Config struct {
Input string
ErrorLog string
User int
Timeout int
Url string
}
//Conf 全局配置
var (
Conf *Config
logger *log.Logger
logFile *os.File
)
func init() {
m := TomlConfiguration.TOMLLoader{Path: "conf.toml"}
Conf = &Config{}
if err := m.Load(Conf); err != nil {
panic("config load error")
}
logFile, _ = os.Create(Conf.ErrorLog)
logger = log.New(logFile, "", 0)
}
var ch = make(chan string, 10000)
var done = make(chan int, 1E6)
var d = make(chan []int, 1E6)
func counter() {
t := time.NewTicker(time.Second)
for {
LOOP:
var v []int
for {
select {
case latency := <-done:
v = append(v, latency)
case <-t.C:
d <- v
goto LOOP
}
}
}
}
func p(wg *sync.WaitGroup) {
for t := range d {
qps := len(t)
if qps == 0 {
break
}
sort.Ints(t)
pos1 := int(math.Round(float64(qps)*0.95) - 1)
pos2 := int(math.Round(float64(qps)*0.99) - 1)
errCount := 0
for i := 0; i < qps; i++ {
if int(Conf.Timeout*1000000) <= t[i] {
errCount ++
}
}
fmt.Printf("QPS %d P95 %.2f ms P99 %.2f ms Timeout %.2f %% \n", qps, float32(t[pos1])/1000000, float32(t[pos2])/1000000, (float32(errCount)/float32(qps))*100)
}
wg.Done()
}
func worker(wg *sync.WaitGroup) {
client := http.Client{}
t := http.Transport{}
t.DisableKeepAlives = false
client.Transport = &t
client.Timeout = time.Duration(Conf.Timeout) * time.Millisecond
for q := range ch {
n := time.Now()
jsonStr := []byte(q)
req, _ := http.NewRequest("POST", Conf.Url, bytes.NewBuffer(jsonStr))
resp, err := client.Do(req)
if err != nil {
logger.Print(q)
logger.Print(err)
done <- int(Conf.Timeout * 1000000)
} else {
ioutil.ReadAll(resp.Body)
resp.Body.Close()
done <- int(time.Now().Sub(n).Nanoseconds())
}
}
wg.Done()
}
func main() {
f, _ := os.Open(Conf.Input)
r := bufio.NewReader(f)
var lines []string
for {
l, err := r.ReadBytes('\n')
if err == io.EOF {
break
} else {
lines = append(lines, strings.TrimSpace(string(l)))
}
}
go counter()
var wg sync.WaitGroup
wg.Add(1)
go p(&wg)
for i := 0; i < Conf.User; i++ {
wg.Add(1)
go worker(&wg)
}
for _, q := range lines {
ch <- string(q)
}
close(ch)
wg.Wait()
logFile.Close()
}
|
package packer
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"log"
rand2 "math/rand"
"os"
"path/filepath"
"reflect"
"sync"
"testing"
"time"
)
func TestMarshalUnMarshal(t *testing.T) {
var fromBin = func(data []byte) (*fileHeader, error) {
r := bytes.NewReader(data)
return unMarshallBinary(r)
}
var toBin = func(hdr *fileHeader) ([]byte, error) {
outb := bytes.NewBuffer(nil)
err := hdr.marshallBinary(outb)
return outb.Bytes(), err
}
var hdr fileHeader
{
in := make([]byte, 32)
rand.Read(in)
// set name length explicitly to zero
copy(in[0:], []byte{0, 0, 0, 0})
hdr, err := fromBin(in)
if err != nil {
t.Fatal(err)
}
out, err := toBin(hdr)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(out, in) {
t.Fatalf("input: \n%x\n != output:\n%x\n", in, out)
}
}
{
hdr.path = "abcde"
hdr.Data.NameLen = uint32(len(hdr.path) + 1)
out, err := toBin(&hdr)
if err != nil {
t.Fatal(err)
}
hdr2, err := fromBin(out)
if err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&hdr, hdr2) {
t.Fatalf("err: %v != %v", hdr, hdr2)
}
}
}
func swapDirs(a, b string) error {
c := fmt.Sprintf("%v.tmp", a)
if err := os.Rename(a, c); err != nil {
return err
}
if err := os.Rename(b, a); err != nil {
return err
}
if err := os.Rename(c, b); err != nil {
return err
}
return nil
}
func TestEntireDirectory(t *testing.T) {
// Shoot over the /foobar dirs
testEntireDirectory(t, "./testdata/foobar")
testEntireDirectory(t, "./testdata/foobar2")
// These now become
// - /tmp/packtest/foobar and
// - /tmp/packtest/foobar2
// respectively
// Now we swap them, and sync again. This should cause some headache, since
// some files are now dirs and vice versa.
if err := swapDirs("./testdata/foobar", "./testdata/foobar2"); err != nil {
t.Fatal(err)
}
defer swapDirs("./testdata/foobar", "./testdata/foobar2")
testEntireDirectory(t, "./testdata/foobar")
testEntireDirectory(t, "./testdata/foobar2")
}
func testEntireDirectory(t *testing.T, path string) {
pipeOneIn, pipeOneOut := io.Pipe()
pipeTwoIn, pipeTwoOut := io.Pipe()
// Resolve the syncsource before we chdir
syncSource, err := filepath.Abs(path)
if err != nil {
t.Fatal(err)
}
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
os.MkdirAll("/tmp/packtest", 0755)
if err := os.Chdir("/tmp/packtest/"); err != nil {
t.Fatal(err)
}
defer os.Chdir(cwd)
opts := &Options{
Compression: CompressionSnappy,
//Compression: CompressionOff,
CrcUsage: FileCrcAtimeNsecMetadata,
Verbosity: 4,
IgnoreSymlinks: false,
}
var wg sync.WaitGroup
wg.Add(1)
var send = func() {
defer wg.Done()
defer pipeOneOut.Close()
sender, err := NewSender(pipeOneOut, pipeTwoIn, opts)
if err != nil {
t.Fatal(err)
}
if err := sender.Sync(syncSource); err != nil {
t.Fatal(err)
}
// wait for response
log.Print("Sender all done")
}
var recv = func() {
defer pipeTwoOut.Close()
r, err := NewReceiver(pipeOneIn, pipeTwoOut)
if err != nil {
t.Fatal(err)
}
// Receive directories + metadata
if err := r.Sync(); err != nil {
t.Fatalf("Error during sync: %v", err)
}
log.Printf("Receiver all done")
}
go send()
recv()
wg.Wait()
}
func testOsWalk(dirname string) error {
absPath, _ := filepath.Abs(filepath.Clean(dirname))
root, path := filepath.Split(absPath)
stat, err := os.Lstat(absPath)
if err != nil {
return err
}
// Check that it actually is a directory
if !stat.IsDir() {
return fmt.Errorf("%v is not a directory", dirname)
}
if err := testOsWalkInternal(root, path, stat); err != nil {
return err
}
return err
}
// With crc:
//BenchmarkCrcFilesBuf/test-32-6 1 1033402134 ns/op
//BenchmarkCrcFilesBuf/test-64-6 2 884616798 ns/op // 884 ms-- sane choice
//BenchmarkCrcFilesBuf/test-128-6 2 869347812 ns/op
//BenchmarkCrcFilesBuf/test-1M-6 2 873511816 ns/op
// Without crc:
//BenchmarkCrcFilesBuf/test-32-6 10 161151702 ns/op // 161 ms
//BenchmarkCrcFilesBuf/test-64-6 10 161965409 ns/op
//BenchmarkCrcFilesBuf/test-128-6 10 170212225 ns/op
//BenchmarkCrcFilesBuf/test-1M-6 10 166505231 ns/op
func testOsWalkInternal(root, path string, stat os.FileInfo) error {
var (
cur = filepath.Join(root, path)
)
_, err := CrcFile(cur, stat)
if err != nil {
return err
}
if stat.IsDir() {
files, err := ioutil.ReadDir(cur)
if err != nil {
return fmt.Errorf("read dir err on %v: %v", cur, err)
}
for _, finfo := range files {
fName := filepath.Join(path, finfo.Name())
if err := testOsWalkInternal(root, fName, finfo); err != nil {
return err
}
}
}
return nil
}
func TestCrcFiles(t *testing.T) {
err := testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
if err != nil {
t.Fatal(err)
}
}
func BenchmarkCrcFiles(b *testing.B) {
for i := 0; i < b.N; i++ {
testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
}
}
func BenchmarkCrcFilesBuf(b *testing.B) {
b.Run("test-32", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
}
})
b.Run("test-64", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
}
})
b.Run("test-128", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
}
})
b.Run("test-1M", func(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
testOsWalk("/home/user/go/src/github.com/ethereum/go-ethereum")
}
})
}
// TestSymlinkOutsideOfJailRemoval tests that if the root-jailing is not active,
// that we still do not remove files outside of the sync directory.
//
// Scenario: the receiver has a symlink on the local filesystem:
// /tmp/linktest/thelink.baz -> /tmp/linktest.target
// If we sync over a directory where 'thelink.baz' is not present, the receiver
// should remove the symlink, _not_ the symlink target
func TestSymlinkOutsideOfJailRemoval(t *testing.T) {
// Set up the canaries
// One for a direct symlink
if f, err := os.Create("/tmp/linktest.target"); err != nil {
t.Fatal(err)
} else {
f.Write([]byte("bazonk"))
f.Close()
}
// One for a symlink within a dir that gets nuked
if f, err := os.Create("/tmp/linktest.target2"); err != nil {
t.Fatal(err)
} else {
f.Write([]byte("bazonk"))
f.Close()
}
// Now sync 'linktest' directory
testEntireDirectory(t, "./testdata/linktest")
/*
At this point, the symlinks are 'live', and resolve to actual existing files:
[user@work linktest]$ ls -laR
.:
total 0
drwxrwxr-x 3 user user 80 Nov 28 09:35 .
drwxr-xr-x 3 user user 60 Nov 28 09:35 ..
drwxrwxr-x 2 user user 60 Nov 28 09:35 directory
lrwxrwxrwx 1 user user 20 Nov 28 09:35 link1 -> /tmp/linktest.target
./directory:
total 0
drwxrwxr-x 2 user user 60 Nov 28 09:35 .
drwxrwxr-x 3 user user 80 Nov 28 09:35 ..
lrwxrwxrwx 1 user user 21 Nov 28 09:35 link2 -> /tmp/linktest.target2
*/
// So now we sync over 'emptydir', which will trigger the removal of these
// But we must first swap the names
if err := swapDirs("./testdata/linktest", "./testdata/emptydir"); err != nil {
t.Fatal(err)
}
defer swapDirs("./testdata/linktest", "./testdata/emptydir")
// Now sync 'linktest' directory (which is now empty)
testEntireDirectory(t, "./testdata/linktest")
// verify that none of the targets have been removed
if _, err := os.Stat("/tmp/linktest.target"); err != nil {
t.Fatalf("File missing: %v", err)
}
if _, err := os.Stat("/tmp/linktest.target2"); err != nil {
t.Fatalf("File missing: %v", err)
}
}
func TestOverwriteROnlyFiles(t *testing.T) {
rand2.Seed(time.Now().Unix())
dir := fmt.Sprintf("/tmp/rdonlytest-%d/readonlydir", rand2.Uint32())
p := filepath.Join(dir, "readonlyfile")
// create dir with permissive perms first, so we can create the file
if err := os.MkdirAll(dir, 0777); err != nil {
t.Fatal(err)
}
f, err := os.Create(p)
if err != nil {
t.Fatal(err)
}
f.Write([]byte("This file is generated to check if we can delete files which " +
"have perms 'r--r--r--'"))
f.Close()
if err := os.Chmod(p, 0444); err != nil {
t.Fatal(err)
}
// If the dir doesn't have x, we can't open in
if err := os.Chmod(dir, 0555); err != nil {
t.Fatal(err)
}
// Now, we have a rdonly directory, with an rdonly file in it. Shoot it
// over to a receiver
testEntireDirectory(t, dir)
RemoveIfExist(dir)
}
|
package rigger
import (
"github.com/AsynkronIT/protoactor-go/actor"
"github.com/sirupsen/logrus"
"time"
)
const allApplicationTopSupName = "@rg$0"
func init() {
Register(allApplicationTopSupName, SupervisorBehaviourProducer(func() SupervisorBehaviour {
return &applicationTopSup{}
}))
}
type applicationTopSup struct {
}
func (a *applicationTopSup) OnRestarting(ctx actor.Context) {
}
func (a *applicationTopSup) OnStarted(ctx actor.Context, args interface{}) error {
//registeredProcess[allApplicationTopSupName] = ctx.Self()
logrus.Tracef("started: %v", ctx.Self())
//if pid, exists := GetPid(allApplicationTopSupName); exists {
// logrus.Tracef("top sup: %v", pid)
//} else {
// logrus.Error("faild to get top sup pid\r\n")
//}
return nil
}
func (a *applicationTopSup) OnPostStarted(ctx actor.Context, args interface{}) {
}
func (a *applicationTopSup) OnStopping(ctx actor.Context) {
}
func (a *applicationTopSup) OnStopped(ctx actor.Context) {
}
func (a *applicationTopSup) OnGetSupFlag(ctx actor.Context) (supFlag SupervisorFlag, childSpecs []*SpawnSpec) {
supFlag.WithinDuration = 3 * time.Second
supFlag.MaxRetries = 10
supFlag.Decider = func(reason interface{}) actor.Directive {
return actor.RestartDirective
}
supFlag.StrategyFlag = OneForOne
return
}
|
package main
func main() {}
func bonus(sales []int) int {
minSaleValue := 10_000
for _, sale := range sales {
if sale >= minSaleValue {
percent := 100
bonusValue := 5
managerBonus := (sale / percent) * bonusValue
return managerBonus
}
}
return 0
}
|
package sort
func Merge(list []int) {
if len(list) <= 1 {
return
}
msort(list, 0, len(list)-1)
}
func msort(list []int, start, end int) {
if start == end {
return
}
min := (end-start)/2 + start
msort(list, start, min)
msort(list, min+1, end)
merge(list, start, min, end)
}
func merge(list []int, start, min, end int) {
var tmpList []int
li := start
ri := min + 1
for {
if list[li] < list[ri] {
tmpList = append(tmpList, list[li])
li++
if li > min {
break
}
} else {
tmpList = append(tmpList, list[ri])
ri++
if ri > end {
break
}
}
}
for i := li; i <= min; i++ {
tmpList = append(tmpList, list[i])
}
for i := ri; i <= end; i++ {
tmpList = append(tmpList, list[i])
}
copy(list[start:], tmpList)
}
|
package ravendb
// Note: Java's IAttachmentsSessionOperations is DocumentSessionAttachments
// TODO: make a unique wrapper type
type AttachmentsSessionOperations = DocumentSessionAttachments
type DocumentSessionAttachments struct {
*DocumentSessionAttachmentsBase
}
func NewDocumentSessionAttachments(session *InMemoryDocumentSessionOperations) *DocumentSessionAttachments {
res := &DocumentSessionAttachments{}
res.DocumentSessionAttachmentsBase = NewDocumentSessionAttachmentsBase(session)
return res
}
func (s *DocumentSessionAttachments) Exists(documentID string, name string) (bool, error) {
command, err := NewHeadAttachmentCommand(documentID, name, nil)
if err != nil {
return false, err
}
err = s.requestExecutor.ExecuteCommand(command, s.sessionInfo)
if err != nil {
return false, err
}
res := command.Result != ""
return res, nil
}
func (s *DocumentSessionAttachments) GetByID(documentID string, name string) (*AttachmentResult, error) {
operation := NewGetAttachmentOperation(documentID, name, AttachmentDocument, "", nil)
err := s.session.GetOperations().Send(operation, s.sessionInfo)
if err != nil {
return nil, err
}
res := operation.Command.Result
return res, nil
}
func (s *DocumentSessionAttachments) Get(entity interface{}, name string) (*AttachmentResult, error) {
document := getDocumentInfoByEntity(s.documents, entity)
if document == nil {
return nil, throwEntityNotInSession(entity)
}
return s.GetByID(document.id, name)
}
func (s *DocumentSessionAttachments) GetRevision(documentID string, name string, changeVector *string) (*AttachmentResult, error) {
operation := NewGetAttachmentOperation(documentID, name, AttachmentRevision, "", changeVector)
err := s.session.GetOperations().Send(operation, s.sessionInfo)
if err != nil {
return nil, err
}
res := operation.Command.Result
return res, nil
}
|
package main
import (
"fmt"
"gopkg.in/ldap.v2"
)
// getBaseDN construct the baseDN out of the baseDNTemplate containing %s
// that would be replaced by username
func getBaseDN(dnTemplate, username string) string {
return fmt.Sprintf(dnTemplate, username)
}
func ldapChangePassword(cfg *config, username, userdn, password, newpassword string) error {
c, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", cfg.ldapHost, cfg.ldapPort))
if err != nil {
return err
}
defer c.Close()
err = c.Bind(userdn, password)
if err != nil {
return err
}
pwdModify := ldap.NewPasswordModifyRequest(userdn, password, newpassword)
_, err = c.PasswordModify(pwdModify)
if err != nil {
return err
}
return nil
}
func ldapAddMailalias(cfg *config, username, userdn, password, newmailalias string) error {
c, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", cfg.ldapHost, cfg.ldapPort))
if err != nil {
return err
}
defer c.Close()
err = c.Bind(userdn, password)
if err != nil {
return err
}
maMod := ldap.NewModifyRequest(userdn)
maMod.Add("mailalias", []string{newmailalias})
err = c.Modify(maMod)
if err != nil {
return err
}
return nil
}
func ldapSearch(cfg *config, username, userdn, password string) (*ldap.SearchResult, error) {
c, err := ldap.Dial("tcp", fmt.Sprintf("%s:%d", cfg.ldapHost, cfg.ldapPort))
if err != nil {
return nil, err
}
defer c.Close()
err = c.Bind(userdn, password)
if err != nil {
return nil, err
}
searchReq := ldap.NewSearchRequest(cfg.basedn,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
fmt.Sprintf(cfg.userFilterTemplate, username),
[]string{},
nil,
)
srch, err := c.Search(searchReq)
if err != nil {
return nil, err
}
return srch, nil
}
|
package controllers
import (
"github.com/gin-gonic/gin"
)
//Register register API routes
func Register(group gin.IRouter) {
group.GET("/health-check", HealthCheck)
planets := group.Group("/planets")
planets.GET("/", Planets)
planets.POST("/", Planet)
planets.GET("/:id", Planet)
planets.DELETE("/:id", Planet)
}
|
// See License for license information.
// Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
package main
import (
"fmt"
"net/http"
"net/url"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-plugin-jira/server/utils/types"
)
const (
Nobody = "_nobody_"
commentDeleted = "comment_deleted"
commentUpdated = "comment_updated"
commentCreated = "comment_created"
worklogUpdated = "jira:worklog_updated"
)
type Webhook interface {
Events() StringSet
PostToChannel(p *Plugin, instanceID types.ID, channelID, fromUserID, subscriptionName string) (*model.Post, int, error)
PostNotifications(p *Plugin, instanceID types.ID) ([]*model.Post, int, error)
}
type webhookField struct {
name string
id string
from string
to string
}
type webhook struct {
*JiraWebhook
eventTypes StringSet
headline string
text string
fields []*model.SlackAttachmentField
notifications []webhookUserNotification
fieldInfo webhookField
}
type webhookUserNotification struct {
jiraUsername string
jiraAccountID string
message string
postType string
commentSelf string
}
func (wh *webhook) Events() StringSet {
return wh.eventTypes
}
func (wh webhook) PostToChannel(p *Plugin, instanceID types.ID, channelID, fromUserID, subscriptionName string) (*model.Post, int, error) {
if wh.headline == "" {
return nil, http.StatusBadRequest, errors.Errorf("unsupported webhook")
} else if p.getConfig().DisplaySubscriptionNameInNotifications && subscriptionName != "" {
wh.headline = fmt.Sprintf("%s\nSubscription: **%s**", wh.headline, subscriptionName)
}
post := &model.Post{
ChannelId: channelID,
UserId: fromUserID,
}
text := ""
if wh.text != "" && !p.getConfig().HideDecriptionComment {
text = p.replaceJiraAccountIds(instanceID, wh.text)
}
if text != "" || len(wh.fields) != 0 {
model.ParseSlackAttachment(post, []*model.SlackAttachment{
{
// TODO is this supposed to be themed?
Color: "#95b7d0",
Fallback: wh.headline,
Pretext: wh.headline,
Text: text,
Fields: wh.fields,
},
})
} else {
post.Message = wh.headline
}
err := p.client.Post.CreatePost(post)
if err != nil {
return nil, http.StatusInternalServerError, err
}
return post, http.StatusOK, nil
}
func (wh *webhook) PostNotifications(p *Plugin, instanceID types.ID) ([]*model.Post, int, error) {
if len(wh.notifications) == 0 {
return nil, http.StatusOK, nil
}
// We will only send webhook events if we have a connected instance.
instance, err := p.instanceStore.LoadInstance(instanceID)
if err != nil {
// This isn't an internal server error. There's just no instance installed.
return nil, http.StatusOK, nil
}
posts := []*model.Post{}
for _, notification := range wh.notifications {
var mattermostUserID types.ID
var err error
// prefer accountId to username when looking up UserIds
if notification.jiraAccountID != "" {
mattermostUserID, err = p.userStore.LoadMattermostUserID(instance.GetID(), notification.jiraAccountID)
} else {
mattermostUserID, err = p.userStore.LoadMattermostUserID(instance.GetID(), notification.jiraUsername)
}
if err != nil {
continue
}
// Check if the user has permissions.
c, err2 := p.userStore.LoadConnection(instance.GetID(), mattermostUserID)
if err2 != nil {
// Not connected to Jira, so can't check permissions
continue
}
client, err2 := instance.GetClient(c)
if err2 != nil {
p.errorf("PostNotifications: error while getting jiraClient, err: %v", err2)
continue
}
// If this is a comment-related webhook, we need to check if they have permissions to read that.
// Otherwise, check if they can view the issue.
isCommentEvent := wh.Events().Intersection(commentEvents).Len() > 0
if isCommentEvent {
if instance.Common().IsCloudInstance() {
err = client.RESTGet(fmt.Sprintf("/2/issue/%s/comment/%s", wh.Issue.ID, wh.Comment.ID), nil, &struct{}{})
} else {
err = client.RESTGet(notification.commentSelf, nil, &struct{}{})
}
} else {
_, err = client.GetIssue(wh.Issue.ID, nil)
}
if err != nil {
p.errorf("PostNotifications: failed to get self: %v", err)
continue
}
notification.message = p.replaceJiraAccountIds(instance.GetID(), notification.message)
post, err := p.CreateBotDMPost(instance.GetID(), mattermostUserID, notification.message, notification.postType)
if err != nil {
p.errorf("PostNotifications: failed to create notification post, err: %v", err)
continue
}
posts = append(posts, post)
}
return posts, http.StatusOK, nil
}
func newWebhook(jwh *JiraWebhook, eventType string, format string, args ...interface{}) *webhook {
return &webhook{
JiraWebhook: jwh,
eventTypes: NewStringSet(eventType),
headline: jwh.mdUser() + " " + fmt.Sprintf(format, args...) + " " + jwh.mdKeySummaryLink(),
}
}
func (p *Plugin) GetWebhookURL(jiraURL string, teamID, channelID string) (subURL, legacyURL string, err error) {
cf := p.getConfig()
instanceID, err := p.ResolveWebhookInstanceURL(jiraURL)
if err != nil {
return "", "", err
}
team, err := p.client.Team.Get(teamID)
if err != nil {
return "", "", err
}
channel, err := p.client.Channel.Get(channelID)
if err != nil {
return "", "", err
}
v := url.Values{}
v.Add("secret", cf.Secret)
subURL = p.GetPluginURL() + instancePath(makeAPIRoute(routeAPISubscribeWebhook), instanceID) + "?" + v.Encode()
// For the legacy URL, add team and channel. Secret is already in the map.
v.Add("team", team.Name)
v.Add("channel", channel.Name)
legacyURL = p.GetPluginURL() + instancePath(routeIncomingWebhook, instanceID) + "?" + v.Encode()
return subURL, legacyURL, nil
}
func (p *Plugin) getSubscriptionsWebhookURL(instanceID types.ID) string {
cf := p.getConfig()
v := url.Values{}
v.Add("secret", cf.Secret)
return p.GetPluginURL() + instancePath(makeAPIRoute(routeAPISubscribeWebhook), instanceID) + "?" + v.Encode()
}
|
package server
import (
"2C_vehicle_ms/pkg"
"log"
"net/http"
"os"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
)
type Server struct {
router *mux.Router
}
func NewServer(v root.VehicleService, f root.FavrouteService) *Server {
s := Server{router: mux.NewRouter()}
NewVehicleRouter(v, s.newSubrouter("/"))
NewFavrouteRouter(f, s.newSubrouter("/"))
return &s
}
/*func NewServerVehicle(v root.VehicleService) *Server {
s := Server{router: mux.NewRouter()}
NewVehicleRouter(v, s.newSubrouter("/"))
return &s
}
func NewServerFavroute(v root.FavrouteService) *Server {
s := Server{router: mux.NewRouter()}
NewFavrouteRouter(v, s.newSubrouter("/"))
return &s
}*/
func (s *Server) Start() {
log.Println("Listening on port 6005")
if err := http.ListenAndServe(":6005", handlers.LoggingHandler(os.Stdout, s.router)); err != nil {
log.Fatal("http.ListenAndServe: ", err)
}
}
func (s *Server) newSubrouter(path string) *mux.Router {
return s.router.PathPrefix(path).Subrouter()
}
|
package version
import "fmt"
var (
APPNAME = "Unknown"
BRANCH = "Unknown"
TAG = "Unknown"
REVISION = "Unknown"
BUILDTIME = "Unknown"
GOVERSION = "Unknown"
BINDVERSION = "Unknown"
)
func String() string {
return fmt.Sprintf(`-----------------------------------------
-----------------------------------------
AppName: %v
Branch: %v
Tag: %v
BindVersion: %v
Revision: %v
Go: %v
BuildTime: %v
-----------------------------------------
-----------------------------------------
`, APPNAME, BRANCH, TAG, BINDVERSION, REVISION, GOVERSION, BUILDTIME)
}
|
package ru_test
import (
"testing"
"time"
"github.com/olebedev/when"
"github.com/olebedev/when/rules"
"github.com/olebedev/when/rules/ru"
)
func TestHourMinute(t *testing.T) {
w := when.New(nil)
w.Add(ru.HourMinute(rules.Override))
fixtok := []Fixture{
{"5:30вечера", 0, "5:30вечера", (17 * time.Hour) + (30 * time.Minute)},
{"в 5:30 вечера", 3, "5:30 вечера", (17 * time.Hour) + (30 * time.Minute)},
{"в 5:59 вечера", 3, "5:59 вечера", (17 * time.Hour) + (59 * time.Minute)},
{"в 5-59 вечера", 3, "5-59 вечера", (17 * time.Hour) + (59 * time.Minute)},
{"в 17-59 вечерело", 3, "17-59", (17 * time.Hour) + (59 * time.Minute)},
{"до 11.10 вечера", 5, "11.10 вечера", (23 * time.Hour) + (10 * time.Minute)},
}
fixtnil := []Fixture{
{"28:30вечера", 0, "", 0},
{"12:61вечера", 0, "", 0},
{"24:10", 0, "", 0},
}
// ApplyFixtures(t, "ru.HourMinute", w, fixtok)
// ApplyFixturesNil(t, "on.HourMinute nil", w, fixtnil)
w.Add(ru.Hour(rules.Skip))
// ApplyFixtures(t, "ru.HourMinute|ru.Hour", w, fixtok)
ApplyFixturesNil(t, "ru.HourMinute|ru.Hour nil", w, fixtnil)
w = when.New(nil)
w.Add(
ru.Hour(rules.Override),
ru.HourMinute(rules.Override),
)
ApplyFixtures(t, "ru.Hour|ru.HourMinute", w, fixtok)
ApplyFixturesNil(t, "ru.Hour|ru.HourMinute nil", w, fixtnil)
}
|
package sorts
// QuickSort sort
func QuickSort(a []int, p, r int) {
if p < r {
i := partitionHoare(a, p, r)
QuickSort(a, p, i-1)
QuickSort(a, i+1, r)
}
}
// SortK get kth ordered element, kth will be the final index,
// the array will be partitioned for kth element
func SortK(a []int, k int) int {
n := len(a)
if k < 0 || k >= n {
panic("k < 0 || k >= n")
}
index := selection4K(a, 0, n-1, k)
return a[index]
}
// selection4K select the kth order index of the array
func selection4K(a []int, left, right, k int) int {
if left == right {
return left
}
mid := partitionS(a, left, right)
if mid == right {
return mid
}
diff := mid - left + 1
if diff >= k {
//fmt.Println("diff>k", left, mid, diff, k)
return selection4K(a, left, mid, k)
}
//fmt.Println("diff<=k", mid+1, right, k-diff)
return selection4K(a, mid+1, right, k-diff)
}
// partitionS LomutoPartition for selection
func partitionS(a []int, p, r int) int {
i := p
x := a[r]
for j := p; j < r; j++ { // j must begin with p
if a[j] <= x {
if i != j {
a[j], a[i] = a[i], a[j]
}
i++
}
}
a[i], a[r] = a[r], a[i]
return i
}
// partitionQ LomutoPartition for quick sort
func partitionQ(a []int, p, r int) int {
i := p - 1
x := a[r]
for j := p; j < r; j++ {
if a[j] <= x {
i++
if i != j {
a[j], a[i] = a[i], a[j]
}
}
}
i++
a[i], a[r] = a[r], a[i]
return i
}
// partitionHoare partition from both ends
func partitionHoare(a []int, l, r int) int {
p := a[l]
i := l
j := r + 1
for i < j {
i++
for i < r && a[i] < p {
i++
}
j--
for j > l && a[j] > p {
j--
}
a[i], a[j] = a[j], a[i]
}
a[i], a[j] = a[j], a[i]
a[l], a[j] = a[j], a[l]
return j
}
|
// Showcase the usage of strings in Go.
package main
import (
"fmt"
"strings"
)
func hasSuffix() {
s := "Hello, World!"
fmt.Printf("s: %v\n", s)
suffix := "World"
fmt.Printf("Has suffix \"%v\": %v\n", suffix, strings.HasSuffix(s, suffix))
suffix = "World!"
fmt.Printf("Has suffix \"%v\": %v\n", suffix, strings.HasSuffix(s, suffix))
}
func convertByteSlicesToAndFromString() {
s := `Ima string`
b := []byte(s)
t := string(b)
fmt.Printf("String s: %s\n", s)
fmt.Printf("[]byte: %+v\n", b)
fmt.Printf("String t: %s\n", t)
}
func main() {
fmt.Println("--- 1 HasSuffix ---")
hasSuffix()
fmt.Println("\n--- 2 byte[] and string conversion is trivial ---")
convertByteSlicesToAndFromString()
}
|
package main
import (
"testing"
"github.com/monax/peptide/cmd/protoc-gen-go-peptide/testdata/gogo"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
)
func TestGogo(t *testing.T) {
t.Run("Test gogogo", func(t *testing.T) {
msg := &gogo.TestMessage{
WithMoreTags: "tagly",
WithJsonTag: false,
Fruit: gogo.Apples,
}
bs, err := proto.Marshal(msg)
require.NoError(t, err)
if err != nil {
t.Fatal(err)
}
msgOut := new(gogo.TestMessage)
err = proto.Unmarshal(bs, msgOut)
require.NoError(t, err)
require.True(t, proto.Equal(msg, msgOut))
})
}
|
// Copyright 2022 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package bluetooth
import (
"context"
"time"
"chromiumos/tast/remote/bluetooth"
"chromiumos/tast/testing"
)
func init() {
testing.AddTest(&testing.Test{
Func: TestConnectToBTPeers,
LacrosStatus: testing.LacrosVariantUnneeded,
Desc: "Checks that a remote test can connect to btpeers and call a chameleond method",
Contacts: []string{
"jaredbennett@google.com",
"cros-connectivity@google.com",
},
// TODO(b/245584709): Need to make new btpeer test attributes.
Attr: []string{},
SoftwareDeps: []string{"chrome"},
Fixture: "chromeLoggedInWith2BTPeers",
Timeout: time.Second * 15,
})
}
// TestConnectToBTPeers tests that a remote test can connect to btpeers and call
// a chameleond method.
func TestConnectToBTPeers(ctx context.Context, s *testing.State) {
fv := s.FixtValue().(*bluetooth.FixtValue)
if _, err := fv.BTPeers[0].GetMacAddress(ctx); err != nil {
s.Fatal("Failed to call chamleleond method 'GetMacAddress' on btpeer1: ", err)
}
if err := fv.BTPeers[1].BluetoothAudioDevice().Reboot(ctx); err != nil {
s.Fatal("Failed to call chamleleond method 'Reboot' on btpeer2.BluetoothAudioDevice: ", err)
}
}
|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package etcd
import (
"github.com/apache/servicecomb-kie/server/datasource"
"github.com/apache/servicecomb-kie/server/datasource/etcd/counter"
"github.com/apache/servicecomb-kie/server/datasource/etcd/history"
"github.com/apache/servicecomb-kie/server/datasource/etcd/kv"
"github.com/apache/servicecomb-kie/server/datasource/etcd/rbac"
"github.com/apache/servicecomb-kie/server/datasource/etcd/track"
rbacdao "github.com/apache/servicecomb-kie/server/datasource/rbac"
)
type Broker struct {
}
func NewFrom(c *datasource.Config) (datasource.Broker, error) {
kv.Init()
return &Broker{}, nil
}
func (*Broker) GetRevisionDao() datasource.RevisionDao {
return &counter.Dao{}
}
func (*Broker) GetKVDao() datasource.KVDao {
return &kv.Dao{}
}
func (*Broker) GetHistoryDao() datasource.HistoryDao {
return &history.Dao{}
}
func (*Broker) GetTrackDao() datasource.TrackDao {
return &track.Dao{}
}
func (*Broker) GetRbacDao() rbacdao.Dao {
return &rbac.Dao{}
}
func init() {
datasource.RegisterPlugin("etcd", NewFrom)
datasource.RegisterPlugin("embedded_etcd", NewFrom)
}
|
package main
import (
"fmt"
"os"
"golang.org/x/net/html"
)
func forEachNode(n *html.Node, pre, post func(n *html.Node) bool) *html.Node {
if pre != nil {
if !pre(n) {
return n
}
}
for c := n.FirstChild; c != nil; c = c.NextSibling {
if res := forEachNode(c, pre, post); res != nil {
return res
}
}
if post != nil {
if !post(n) {
return n
}
}
return nil
}
func ElementById(node *html.Node, id string) *html.Node {
f := func(n *html.Node) bool {
if n.Type == html.ElementNode {
for _, a := range n.Attr {
if a.Key == "id" {
return !(a.Val == id)
}
}
}
return true
}
return forEachNode(node, f, nil)
}
func main() {
doc, err := html.Parse(os.Stdin)
if err != nil {
fmt.Fprintf(os.Stderr, "outline2: parse: %v", err)
os.Exit(1)
}
id := os.Args[1]
res := ElementById(doc, id)
fmt.Println(res)
}
|
package rest_grab
// A Receiver is a type that performs its own logic related to
// receiving data from a request. This is useful for password types
// (to automatically hash the value) or anything that needs to perform
// some type of validation.
type Puller interface {
// Receive takes a value (as would be sent in a request) and
// attempts to read set the Receiver's value to match the passed
// in value. It returns an error if the value isn't valid or
// can't be parsed to the Receiver.
Pull(interface{}) error
}
|
package goinstagrab
import (
"encoding/json"
"io/ioutil"
"log"
"os"
"github.com/iveronanomi/goinstagrab/crawler"
)
// Dump ...
var Dump *dump
type dump struct {
Users map[string]string `json:"users"`
Media map[string]map[string]map[string]bool `json:"scanned"`
Conversations map[string][]crawler.Message `json:"conversations"`
}
// IsScanned ...
func (d *dump) IsScanned(user, media, filename string) bool {
if _, ok := d.Media[user]; !ok {
return false
}
if _, ok := d.Media[user][media]; !ok {
return false
}
_, ok := d.Media[user][media][filename]
return ok
}
// MarkMediaScanned ...
func (d *dump) MarkMediaScanned(user, media, filename string) {
log.SetPrefix("MarkMediaScanned ")
if d.Media == nil {
d.Media = make(map[string]map[string]map[string]bool)
}
if d.Media[user] == nil {
d.Media[user] = make(map[string]map[string]bool)
}
if d.Media[user][media] == nil {
d.Media[user][media] = make(map[string]bool)
}
d.Media[user][media][filename] = true
}
// MarkConversationAsRead ...
func (d *dump) DumpMessage(username string, message crawler.Message) {
if d.Conversations == nil {
d.Conversations = make(map[string][]crawler.Message)
}
d.Conversations[username] = append(d.Conversations[username], message)
}
// ReadDump from local file
func ReadDump() error {
log.SetPrefix("ReadDump ")
var (
err error
f *os.File
)
if f, err = os.Open("./dump.json"); err != nil {
log.Print("couldn't read dump.json")
}
defer func() { err = f.Close() }()
Dump = &dump{}
return json.NewDecoder(f).Decode(Dump)
}
// SaveDump to local file
func SaveDump() error {
log.SetPrefix("SaveDump ")
var err error
b, err := json.Marshal(Dump)
if err != nil {
return err
}
if err := ioutil.WriteFile("./dump.json", b, os.ModePerm); err != nil {
return err
}
return err
}
|
package main
import (
"github.com/gorilla/context"
"log"
"net/http"
"strconv"
)
func main() {
//http.Handle("/",context.ClearHandler(http.HandlerFunc(myHander)))
// 自动清除
http.HandleFunc("/", myHander)
log.Fatal(http.ListenAndServe(":9999", nil))
}
func myHander(rw http.ResponseWriter, r *http.Request) {
context.Set(r, "user", "Gai")
context.Set(r, "age", 22)
context.Set(r, "sex", 1)
context.Delete(r, "sex")
doHander(rw, r)
}
func doHander(rw http.ResponseWriter, r *http.Request) {
allParams := context.GetAll(r)
log.Println(allParams)
// 分析GetAll() 通过遍历返回map,因为map是一个引用类型,如果我们直接返回map,调用者就可能会对这个map进行修改,
// 破坏了map的存储,所以必须要返回一个map的拷贝
user := context.Get(r, "user").(string)
age := context.Get(r, "age").(int)
rw.WriteHeader(http.StatusOK)
rw.Write([]byte("the user is " + user + ",age is " + strconv.Itoa(age)))
}
|
package wasmvm
import (
"bytes"
"github.com/zhaohaijun/matrixchain/common"
"github.com/zhaohaijun/matrixchain/core/states"
scommon "github.com/zhaohaijun/matrixchain/core/store/common"
"github.com/zhaohaijun/matrixchain/errors"
"github.com/zhaohaijun/matrixchain/vm/wasmvm/exec"
"github.com/zhaohaijun/matrixchain/vm/wasmvm/memory"
"github.com/zhaohaijun/matrixchain/vm/wasmvm/util"
)
//======================store apis here============================================
func (this *WasmVmService) putstore(engine *exec.ExecutionEngine) (bool, error) {
vm := engine.GetVM()
envCall := vm.GetEnvCall()
params := envCall.GetParams()
if len(params) != 2 {
return false, errors.NewErr("[putstore] parameter count error")
}
key, err := vm.GetPointerMemory(params[0])
if err != nil {
return false, err
}
if len(key) > 1024 {
return false, errors.NewErr("[putstore] Get Storage key to long")
}
value, err := vm.GetPointerMemory(params[1])
if err != nil {
return false, err
}
k, err := serializeStorageKey(vm.ContractAddress, []byte(util.TrimBuffToString(key)))
if err != nil {
return false, err
}
this.CloneCache.Add(scommon.ST_STORAGE, k, &states.StorageItem{Value: value})
vm.RestoreCtx()
return true, nil
}
func (this *WasmVmService) getstore(engine *exec.ExecutionEngine) (bool, error) {
vm := engine.GetVM()
envCall := vm.GetEnvCall()
params := envCall.GetParams()
if len(params) != 1 {
return false, errors.NewErr("[getstore] parameter count error ")
}
key, err := vm.GetPointerMemory(params[0])
if err != nil {
return false, err
}
k, err := serializeStorageKey(vm.ContractAddress, []byte(util.TrimBuffToString(key)))
if err != nil {
return false, err
}
item, err := this.CloneCache.Get(scommon.ST_STORAGE, k)
if err != nil {
return false, err
}
if item == nil {
vm.RestoreCtx()
if envCall.GetReturns() {
vm.PushResult(uint64(memory.VM_NIL_POINTER))
}
return true, nil
}
idx, err := vm.SetPointerMemory(item.(*states.StorageItem).Value)
if err != nil {
return false, err
}
vm.RestoreCtx()
if envCall.GetReturns() {
vm.PushResult(uint64(idx))
}
return true, nil
}
func (this *WasmVmService) deletestore(engine *exec.ExecutionEngine) (bool, error) {
vm := engine.GetVM()
envCall := vm.GetEnvCall()
params := envCall.GetParams()
if len(params) != 1 {
return false, errors.NewErr("[deletestore] parameter count error")
}
key, err := vm.GetPointerMemory(params[0])
if err != nil {
return false, err
}
k, err := serializeStorageKey(vm.ContractAddress, []byte(util.TrimBuffToString(key)))
if err != nil {
return false, err
}
this.CloneCache.Delete(scommon.ST_STORAGE, k)
vm.RestoreCtx()
return true, nil
}
func serializeStorageKey(contractAddress common.Address, key []byte) ([]byte, error) {
bf := new(bytes.Buffer)
storageKey := &states.StorageKey{ContractAddress: contractAddress, Key: key}
if _, err := storageKey.Serialize(bf); err != nil {
return []byte{}, errors.NewErr("[serializeStorageKey] StorageKey serialize error!")
}
return bf.Bytes(), nil
}
|
package main
import (
"log"
"net/http"
"net/http/httputil"
"net/url"
"flag"
"strings"
"path/filepath"
"os"
)
type handle struct {
reverseProxy string
}
func substring(source string, start int, end int) string {
var r = []rune(source)
length := len(r)
if start < 0 || end > length || start > end {
return ""
}
if start == 0 && end == length {
return source
}
return string(r[start : end])
}
func (this *handle) ServeHTTP(w http.ResponseWriter, r *http.Request) {
remote, err := url.Parse(this.reverseProxy)
if err != nil {
log.Fatalln(err)
}
proxy := httputil.NewSingleHostReverseProxy(remote)
r.Host = remote.Host
log.Println(r.RemoteAddr + " " + r.Method + " " + r.URL.String() + " " + r.URL.Path + " " + r.Proto + " " + r.UserAgent())
path := r.URL.Path
pos := strings.Index(path, specUrlHead)
if pos != -1 {
realFilePath := substring(path, pos + len(specUrlHead), len(path))
if len(realFilePath) > 0 {
log.Println("start serve local file " + realFilePath)
localPath := filepath.Join(".", localDir)
os.MkdirAll(localPath, os.ModePerm)
http.ServeFile(w, r, localPath + "/" + realFilePath)
}
} else {
proxy.ServeHTTP(w, r)
}
}
var remoteHttpAddr = "http://114.55.5.207:82"
var localDir = "spa"
var specUrlHead = "/spa/"
func main() {
bind := flag.String("l", "0.0.0.0:8888", "listen on ip:port")
remote := flag.String("r", remoteHttpAddr, "reverse proxy addr")
flag.Parse()
log.Printf("Listening on %s, forwarding to %s", *bind, *remote)
h := &handle{reverseProxy: *remote}
err := http.ListenAndServe(*bind, h)
if err != nil {
log.Fatalln("ListenAndServe: ", err)
}
} |
package main
import (
"io"
)
type ByteCounter int
func (b *ByteCounter) Write(bytes []byte) (int, error) {
*b += ByteCounter(len(bytes)) // 转换类型
return len(bytes), nil
}
type Writer interface {
Write([]byte) (int, error)
}
type Reader interface {
Read([]byte) (int, error)
}
type Closer interface {
Close() error
}
// type ReadWriter interface {
// Reader
// Writer
// }
type ReadWriter interface {
Reader
Write([]byte) (int, error)
}
type ReadWriteCloser interface {
ReadWriter
Closer
}
type StringReader struct {
str []byte
p int
}
func (s *StringReader) Read(b []byte) (int, error) {
start := s.p
if start >= len(s.str) {
return 0, io.EOF
}
end := start + len(b)
if end > len(s.str) {
end = len(s.str)
}
copy(b, s.str[start:end])
s.p += len(b)
return len(b), nil
}
func NewStringReader(s string) *StringReader {
return &StringReader{str: []byte(s)}
}
|
// Tomato static website generator
// Copyright Quentin Ribac, 2018
// Free software license can be found in the LICENSE file.
package main
import (
"fmt"
"os"
)
// FileExists returns whether a given name exists and is a regular file.
func FileExists(name string) bool {
if fi, err := os.Stat(name); err == nil && fi.Mode().IsRegular() {
return true
}
return false
}
// FileExists returns whether a given path exists and is a directory.
func DirectoryExists(name string) bool {
if fi, err := os.Stat(name); err == nil && fi.Mode().IsDir() {
return true
}
return false
}
// ReadFile reads all the content of a file and returns it as a slice of bytes.
func ReadFile(name string) ([]byte, error) {
f, err := os.Open(name)
if err != nil {
return nil, err
}
var content []byte
buff := make([]byte, 1024)
offset := int64(0)
for {
n, err := f.ReadAt(buff, offset)
offset += int64(n)
content = append(content, buff[:n]...)
if err != nil {
break
}
}
return content, nil
}
// WalkDir walks a directory tree beginning at the given root.
// In every directory, it first calls the callback on every regular file.
// Then it pushes all subdirectories to the queue.
func WalkDir(root string, callback func(fname string) error) error {
for dirQueue := []string{root}; len(dirQueue) > 0; dirQueue = dirQueue[1:] {
dir, err := os.Open(dirQueue[0])
if err != nil {
fmt.Fprintln(os.Stderr, err)
return err
}
names, err := dir.Readdirnames(0)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return err
}
for _, name := range names {
if FileExists(dirQueue[0] + "/" + name) {
err = callback(dirQueue[0] + "/" + name)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return err
}
} else if DirectoryExists(dirQueue[0] + "/" + name) {
dirQueue = append(dirQueue, dirQueue[0]+"/"+name)
}
}
}
return nil
}
|
package main
// Leetcode 5891. (medium)
func missingRolls(rolls []int, mean int, n int) (res []int) {
sum := 0
for _, num := range rolls {
sum += num
}
sum = (len(rolls)+n)*mean - sum
if sum < n || sum > 6*n {
return
}
redundancy := sum % n
val := sum / n
i := 0
for i < redundancy {
res = append(res, val+1)
i++
}
for i < n {
res = append(res, val)
i++
}
return
}
|
package mssql
import "time"
type DBAttachment struct {
AttachmentId int32 `gorm:"column:AttachmentId;primaryKey"`
FileName string `gorm:"column:FileName"`
FileStoreId int32 `gorm:"column:FileStoreId"`
DocumentId int32 `gorm:"column:DocumentId"`
FileTypeId int32 `gorm:"column:FileTypeId"`
JournalDocument DBJournalDocument `gorm:"foreignKey:DocumentId;references:DocumentId"`
FileType DBFileType `gorm:"foreignKey:FileTypeId;references:FileTypeId"`
FileStore DBFileStore `gorm:"foreignKey:FileStoreId;references:FileStoreId"`
}
func (DBAttachment) TableName() string {
return "Attachment"
}
type DBFileStore struct {
FileStoreId int32 `gorm:"column:FileStoreId;primaryKey"`
Path string `gorm:"column:Path"`
}
type DBFileType struct {
FileTypeId int32 `gorm:"column:FileTypeId;primaryKey"`
Name string `gorm:"column:Name"`
}
func (DBFileType) TableName() string {
return "FileType"
}
func (DBFileStore) TableName() string {
return "FileStore"
}
type DBJournalDocument struct {
DocumentId int32 `gorm:"column:DocumentId;primaryKey"`
Content string `gorm:"column:Content"`
JournalId int32 `gorm:"column:Journalid"`
CreatedBy string `gorm:"column:CreatedBy"`
Title string `gorm:"column:Title"`
Summary string `gorm:"column:Summary"`
CreationTime time.Time `gorm:"column:CreationTime"`
Attachments []DBAttachment `gorm:"foreignKey:DocumentId"`
}
func (DBJournalDocument) TableName() string {
return "Document"
}
func (DBJournalDocument) GetPatientIDQuery() string {
return "SELECT j.PatientId FROM Document AS d INNER JOIN Journal AS j on j.JournalId = d.JournalId WHERE d.DocumentId=?"
}
type DBJournal struct {
JournalId int32 `gorm:"column:JournalId;primaryKey"`
CreationTime time.Time `gorm:"column:CreationTime"`
Intro string `gorm:"column:Intro"`
Patient string `gorm:"column:Patient"`
CreatedBy string `gorm:"column:CreatedBy"`
}
func (DBJournal) TableName() string {
return "Journal"
}
type DBToken struct {
Token string `gorm:"column:Token;primaryKey"`
IssuedAt time.Time `gorm:"column:IssuedAt"`
ValidUntil time.Time `gorm:"column:ValidUntil"`
}
func (DBToken) TableName() string {
return "Tokens"
}
type DBPatientDiagnoseSymptom struct {
PatientDiagnoseId int32 `gorm:"column:PatientDiagnoseId"`
SymptomId int32 `gorm:"column:SymptomId"`
PatientDiagnose DBPatientDiagnose `gorm:"foreignKey:PatientDiagnoseId;references:PatientDiagnoseId"`
Symptom DBSymptom `gorm:"foreignKey:SymptomId;references:SymptomId"`
}
func (DBPatientDiagnoseSymptom) TableName() string {
return "PatientDiagnoseSymptom"
}
type DBPatientDiagnose struct {
PatientDiagnoseId int32 `gorm:"column:PatientDiagnoseId;primaryKey"`
Patient string `gorm:"column:Patient"`
DiagnoseId int32 `gorm:"column:DiagnoseId"`
CreationTime time.Time `gorm:"column:CreationTime"`
Diagnose DBDiagnose `gorm:"foreignKey:DiagnoseId;references:DiagnoseId"`
Symptoms []DBSymptom `gorm:"many2many:PatientDiagnoseSymptom;foreignKey:PatientDiagnoseId;joinForeignKey:PatientDiagnoseId;"`
}
func (DBPatientDiagnose) TableName() string {
return "PatientDiagnose"
}
type DBDiagnose struct {
DiagnoseId int32 `gorm:"column:DiagnoseId;primaryKey"`
Description string `gorm:"column:Description"`
}
func (DBDiagnose) TableName() string {
return "Diagnose"
}
type DBSymptom struct {
SymptomId int32 `gorm:"column:SymptomId;primaryKey"`
Description string `gorm:"column:Description"`
PatientDiagnoses []DBPatientDiagnose `gorm:"many2many:PatientDiagnoseSymptom;foreignKey:SymptomId;joinForeignKey:SymptomId;"`
}
func (DBSymptom) TableName() string {
return "Symptom"
}
type DBHospital struct {
HospitalId int32 `gorm:"column:HospitalId"`
Name string `gorm:"column:Name"`
Address string `gorm:"column:Address"`
City string `gorm:"column:City"`
PostalCode string `gorm:"column:PostalCode"`
Country string `gorm:"column:Country"`
}
func (DBHospital) TableName() string {
return "Hospital"
}
type DBBooking struct {
BookingId int32 `gorm:"column:BookingId;primaryKey"`
Bookedtime time.Time `gorm:"column:BookedTime"`
BookedEnd time.Time `gorm:"column:BookedEnd"`
Patient string `gorm:"column:Patient"`
Employee string `gorm:"column:Employee"`
Approved bool `gorm:"column:Approved"`
HospitalId int32 `gorm:"column:HospitalId"`
Hospital DBHospital `gorm:"foreignKey:HospitalId;references:HospitalId"`
}
func (DBBooking) TableName() string {
return "Booking"
}
// type DBEmployee struct {
// EmployeeId int32 `gorm:"column:EmployeeId;primaryKey"`
// Name string `gorm:"column:Name"`
// WorktitleId int32 `gorm:"column:WorktitleId"`
// DepartmentId int32 `gorm:"column:DepartmentId"`
// Username string `gorm:"column:Username"`
// }
// func (DBEmployee) TableName() string {
// return "Employee"
// }
type DBBed struct {
BedId int32 `gorm:"column:BedId;primaryKey"`
Name string `gorm:"column:Name"`
DepartmentId int32 `gorm:"column:DepartmentId"`
IsAvailable bool `gorm:"column:IsAvailable"`
Department DBDepartment `gorm:"foreignKey:DepartmentId;references:DepartmentId"`
}
func (DBBed) TableName() string {
return "Bed"
}
type DBHospitilization struct {
HospitilizationId int32 `gorm:"column:HospitilizationId;primaryKey"`
BookingId int32 `gorm:"column:BookingId"`
Description string `gorm:"column:Description"`
StartedTime time.Time `gorm:"column:StartedTime"`
EndedTime time.Time `gorm:"column:EndedTime"`
BedId int32 `gorm:"column:BedId"`
Bed DBBed `gorm:"foreignKey:BedId;references:BedId"`
Booking DBBooking `gorm:"foreignKey:BookingId;references:BookingId"`
}
func (DBHospitilization) TableName() string {
return "Hospitilization"
}
type DBExamination struct {
ExaminationId int32 `gorm:"column:ExaminationId;primaryKey"`
Description string `gorm:"column:Description"`
StartedTime time.Time `gorm:"column:StartedTime"`
EndedTime time.Time `gorm:"column:EndedTime"`
BookingId int32 `gorm:"column:BookingId"`
Booking DBBooking `gorm:"foreignKey:BookingId;references:BookingId"`
}
func (DBExamination) TableName() string {
return "Examination"
}
type DBDepartment struct {
DepartmentId int32 `gorm:"column:DepartmentId;primaryKey"`
Name string `gorm:"column:Name"`
Description string `gorm:"column:Description"`
HospitalId int32 `gorm:"column:HospitalId"`
Hospital DBHospital `gorm:"foreignKey:HospitalId;references:HospitalId"`
Beds []DBBed `gorm:"foreignKey:DepartmentId"`
}
func (DBDepartment) TableName() string {
return "Department"
}
|
// Copyright 2018 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linux
import "encoding/binary"
// AIORingSize is sizeof(struct aio_ring).
const AIORingSize = 32
// I/O commands.
const (
IOCB_CMD_PREAD = 0
IOCB_CMD_PWRITE = 1
IOCB_CMD_FSYNC = 2
IOCB_CMD_FDSYNC = 3
// 4 was the experimental IOCB_CMD_PREADX.
IOCB_CMD_POLL = 5
IOCB_CMD_NOOP = 6
IOCB_CMD_PREADV = 7
IOCB_CMD_PWRITEV = 8
)
// I/O flags.
const (
IOCB_FLAG_RESFD = 1
IOCB_FLAG_IOPRIO = 2
)
// IOCallback describes an I/O request.
//
// The priority field is currently ignored in the implementation below. Also
// note that the IOCB_FLAG_RESFD feature is not supported.
//
// +marshal
type IOCallback struct {
Data uint64
Key uint32
_ uint32
OpCode uint16
ReqPrio int16
FD int32
Buf uint64
Bytes uint64
Offset int64
Reserved2 uint64
Flags uint32
// eventfd to signal if IOCB_FLAG_RESFD is set in flags.
ResFD int32
}
// IOEvent describes an I/O result.
//
// +marshal
// +stateify savable
type IOEvent struct {
Data uint64
Obj uint64
Result int64
Result2 int64
}
// IOEventSize is the size of an ioEvent encoded.
var IOEventSize = binary.Size(IOEvent{})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.